aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/scsi
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c20
-rw-r--r--drivers/scsi/3w-sas.c1925
-rw-r--r--drivers/scsi/3w-sas.h396
-rw-r--r--drivers/scsi/3w-xxxx.c9
-rw-r--r--drivers/scsi/53c700.c10
-rw-r--r--drivers/scsi/BusLogic.c1
-rw-r--r--drivers/scsi/FlashPoint.c2
-rw-r--r--drivers/scsi/Kconfig45
-rw-r--r--drivers/scsi/Makefile4
-rw-r--r--drivers/scsi/NCR_D700.c1
-rw-r--r--drivers/scsi/NCR_Q720.c1
-rw-r--r--drivers/scsi/a100u2w.c3
-rw-r--r--drivers/scsi/a2091.c1
-rw-r--r--drivers/scsi/a3000.c1
-rw-r--r--drivers/scsi/a4000t.c1
-rw-r--r--drivers/scsi/aacraid/aachba.c52
-rw-r--r--drivers/scsi/aacraid/aacraid.h11
-rw-r--r--drivers/scsi/aacraid/commctrl.c28
-rw-r--r--drivers/scsi/aacraid/comminit.c8
-rw-r--r--drivers/scsi/aacraid/commsup.c72
-rw-r--r--drivers/scsi/aacraid/dpcsup.c36
-rw-r--r--drivers/scsi/aacraid/linit.c6
-rw-r--r--drivers/scsi/aacraid/rx.c1
-rw-r--r--drivers/scsi/aacraid/sa.c1
-rw-r--r--drivers/scsi/advansys.c60
-rw-r--r--drivers/scsi/aha152x.c1
-rw-r--r--drivers/scsi/aha1542.c1
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.seq4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c55
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.seq2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg_def.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/atari_NCR5380.c1
-rw-r--r--drivers/scsi/atp870u.c1
-rw-r--r--drivers/scsi/be2iscsi/be.h45
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c330
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h49
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c155
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1297
-rw-r--r--drivers/scsi/be2iscsi/be_main.h85
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c177
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h16
-rw-r--r--drivers/scsi/bfa/Makefile8
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h10
-rw-r--r--drivers/scsi/bfa/bfa_cee.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c19
-rw-r--r--drivers/scsi/bfa/bfa_csdebug.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcpim_priv.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c1742
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c63
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c86
-rw-r--r--drivers/scsi/bfa/bfa_fcs_port.c11
-rw-r--r--drivers/scsi/bfa/bfa_fcs_uf.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcxp.c8
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c13
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c9
-rw-r--r--drivers/scsi/bfa/bfa_intr.c113
-rw-r--r--drivers/scsi/bfa/bfa_intr_priv.h18
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c770
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h69
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c274
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c423
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c32
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h11
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c26
-rw-r--r--drivers/scsi/bfa/bfa_itnim.c36
-rw-r--r--drivers/scsi/bfa/bfa_log.c4
-rw-r--r--drivers/scsi/bfa/bfa_lps.c134
-rw-r--r--drivers/scsi/bfa/bfa_module.c4
-rw-r--r--drivers/scsi/bfa/bfa_modules_priv.h2
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h57
-rw-r--r--drivers/scsi/bfa/bfa_priv.h2
-rw-r--r--drivers/scsi/bfa/bfa_rport.c32
-rw-r--r--drivers/scsi/bfa/bfa_trcmod_priv.h62
-rw-r--r--drivers/scsi/bfa/bfa_tskim.c23
-rw-r--r--drivers/scsi/bfa/bfa_uf.c2
-rw-r--r--drivers/scsi/bfa/bfad.c215
-rw-r--r--drivers/scsi/bfa/bfad_attr.c77
-rw-r--r--drivers/scsi/bfa/bfad_attr.h9
-rw-r--r--drivers/scsi/bfa/bfad_drv.h35
-rw-r--r--drivers/scsi/bfa/bfad_fwimg.c8
-rw-r--r--drivers/scsi/bfa/bfad_im.c56
-rw-r--r--drivers/scsi/bfa/bfad_im.h5
-rw-r--r--drivers/scsi/bfa/bfad_im_compat.h2
-rw-r--r--drivers/scsi/bfa/bfad_intr.c14
-rw-r--r--drivers/scsi/bfa/fabric.c75
-rw-r--r--drivers/scsi/bfa/fcbuild.c190
-rw-r--r--drivers/scsi/bfa/fcbuild.h12
-rw-r--r--drivers/scsi/bfa/fcpim.c58
-rw-r--r--drivers/scsi/bfa/fcs.h2
-rw-r--r--drivers/scsi/bfa/fcs_fabric.h2
-rw-r--r--drivers/scsi/bfa/fcs_fcpim.h5
-rw-r--r--drivers/scsi/bfa/fcs_lport.h7
-rw-r--r--drivers/scsi/bfa/fcs_port.h3
-rw-r--r--drivers/scsi/bfa/fcs_rport.h3
-rw-r--r--drivers/scsi/bfa/fcs_uf.h3
-rw-r--r--drivers/scsi/bfa/fcs_vport.h8
-rw-r--r--drivers/scsi/bfa/fdmi.c87
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen.h52
-rw-r--r--drivers/scsi/bfa/include/bfa.h32
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h107
-rw-r--r--drivers/scsi/bfa/include/bfa_timer.h2
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi.h16
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cbreg.h16
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ctreg.h26
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h4
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lps.h12
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pport.h172
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_rport.h8
-rw-r--r--drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h4
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_checksum.h6
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_log.h2
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_plog.h9
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_sm.h14
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_trc.h2
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_aen.h10
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_auth.h22
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h14
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_driver.h3
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ethport.h1
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcport.h94
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_common.h32
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_team.h72
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ioc.h3
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h12
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_lport.h4
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_mfg.h111
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_port.h19
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pport.h153
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h17
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h2
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h1
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs.h5
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h2
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h25
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_hal.h6
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_linux.h16
-rw-r--r--drivers/scsi/bfa/include/protocol/ct.h14
-rw-r--r--drivers/scsi/bfa/include/protocol/fc.h27
-rw-r--r--drivers/scsi/bfa/include/protocol/pcifw.h75
-rw-r--r--drivers/scsi/bfa/loop.c235
-rw-r--r--drivers/scsi/bfa/lport_api.c20
-rw-r--r--drivers/scsi/bfa/ms.c29
-rw-r--r--drivers/scsi/bfa/ns.c41
-rw-r--r--drivers/scsi/bfa/plog.c2
-rw-r--r--drivers/scsi/bfa/rport.c92
-rw-r--r--drivers/scsi/bfa/rport_api.c2
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c40
-rw-r--r--drivers/scsi/bfa/scn.c10
-rw-r--r--drivers/scsi/bfa/vfapi.c2
-rw-r--r--drivers/scsi/bfa/vport.c102
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c52
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c29
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c35
-rw-r--r--drivers/scsi/bvme6000_scsi.c1
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/constants.c21
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c22
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c66
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c11
-rw-r--r--drivers/scsi/dc395x.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c29
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c142
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c14
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c95
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c118
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt_i2o.c15
-rw-r--r--drivers/scsi/eata.c3
-rw-r--r--drivers/scsi/eata_pio.c1
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/fcoe/fcoe.c1522
-rw-r--r--drivers/scsi/fcoe/fcoe.h82
-rw-r--r--drivers/scsi/fcoe/libfcoe.c436
-rw-r--r--drivers/scsi/fd_mcs.c1
-rw-r--r--drivers/scsi/fdomain.c1
-rw-r--r--drivers/scsi/fnic/fnic.h27
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c500
-rw-r--r--drivers/scsi/fnic/fnic_isr.c18
-rw-r--r--drivers/scsi/fnic/fnic_main.c109
-rw-r--r--drivers/scsi/fnic/fnic_res.c5
-rw-r--r--drivers/scsi/fnic/fnic_res.h54
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c97
-rw-r--r--drivers/scsi/fnic/vnic_dev.c1
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h2
-rw-r--r--drivers/scsi/fnic/vnic_rq.c1
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h1
-rw-r--r--drivers/scsi/fnic/vnic_wq.c1
-rw-r--r--drivers/scsi/gdth.c431
-rw-r--r--drivers/scsi/gdth.h952
-rw-r--r--drivers/scsi/gdth_ioctl.h366
-rw-r--r--drivers/scsi/gdth_proc.c43
-rw-r--r--drivers/scsi/gdth_proc.h4
-rw-r--r--drivers/scsi/gvp11.c1
-rw-r--r--drivers/scsi/hosts.c18
-rw-r--r--drivers/scsi/hpsa.c3854
-rw-r--r--drivers/scsi/hpsa.h362
-rw-r--r--drivers/scsi/hpsa_cmd.h378
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c363
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c58
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c1
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c14
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c1771
-rw-r--r--drivers/scsi/ipr.h467
-rw-r--r--drivers/scsi/iscsi_tcp.c13
-rw-r--r--drivers/scsi/jazz_esp.c1
-rw-r--r--drivers/scsi/lasi700.c1
-rw-r--r--drivers/scsi/libfc/Makefile4
-rw-r--r--drivers/scsi/libfc/fc_disc.c86
-rw-r--r--drivers/scsi/libfc/fc_elsct.c79
-rw-r--r--drivers/scsi/libfc/fc_exch.c934
-rw-r--r--drivers/scsi/libfc/fc_fcp.c1071
-rw-r--r--drivers/scsi/libfc/fc_frame.c14
-rw-r--r--drivers/scsi/libfc/fc_libfc.c134
-rw-r--r--drivers/scsi/libfc/fc_libfc.h112
-rw-r--r--drivers/scsi/libfc/fc_lport.c864
-rw-r--r--drivers/scsi/libfc/fc_npiv.c161
-rw-r--r--drivers/scsi/libfc/fc_rport.c413
-rw-r--r--drivers/scsi/libiscsi.c487
-rw-r--r--drivers/scsi/libiscsi_tcp.c39
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c1
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c1
-rw-r--r--drivers/scsi/libsas/sas_init.c1
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c11
-rw-r--r--drivers/scsi/libsrp.c26
-rw-r--r--drivers/scsi/lpfc/lpfc.h41
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c390
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2811
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h110
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c67
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c354
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1321
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h57
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h399
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1348
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c147
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c490
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1911
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h146
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c15
-rw-r--r--drivers/scsi/mac_esp.c153
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c1017
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h179
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/mpt2sas/Kconfig1
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h17
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h297
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt93
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h24
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h91
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h14
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h6
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h16
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c171
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h47
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c52
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c210
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1078
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c283
-rw-r--r--drivers/scsi/mvme16x_scsi.c1
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/ncr53c8xx.c3
-rw-r--r--drivers/scsi/nsp32.c3
-rw-r--r--drivers/scsi/osd/osd_initiator.c181
-rw-r--r--drivers/scsi/osd/osd_uld.c261
-rw-r--r--drivers/scsi/osst.c1
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c42
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c44
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c9
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c45
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c42
-rw-r--r--drivers/scsi/pm8001/Makefile12
-rw-r--r--drivers/scsi/pm8001/pm8001_chips.h89
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c574
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h57
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h112
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4494
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h1029
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c901
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c1153
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h497
-rw-r--r--drivers/scsi/pmcraid.c58
-rw-r--r--drivers/scsi/pmcraid.h13
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/ps3rom.c1
-rw-r--r--drivers/scsi/qla1280.c158
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c773
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c78
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h174
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c91
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c444
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c274
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c350
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/qlogicpti.h2
-rw-r--r--drivers/scsi/raid_class.c3
-rw-r--r--drivers/scsi/scsi.c52
-rw-r--r--drivers/scsi/scsi_debug.c355
-rw-r--r--drivers/scsi/scsi_devinfo.c14
-rw-r--r--drivers/scsi/scsi_error.c81
-rw-r--r--drivers/scsi/scsi_ioctl.c3
-rw-r--r--drivers/scsi/scsi_lib.c30
-rw-r--r--drivers/scsi/scsi_lib_dma.c4
-rw-r--r--drivers/scsi/scsi_netlink.c3
-rw-r--r--drivers/scsi/scsi_proc.c2
-rw-r--r--drivers/scsi/scsi_sas_internal.h2
-rw-r--r--drivers/scsi/scsi_scan.c15
-rw-r--r--drivers/scsi/scsi_sysctl.c11
-rw-r--r--drivers/scsi/scsi_sysfs.c67
-rw-r--r--drivers/scsi/scsi_tgt_if.c1
-rw-r--r--drivers/scsi/scsi_tgt_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c176
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c14
-rw-r--r--drivers/scsi/scsi_transport_sas.c109
-rw-r--r--drivers/scsi/scsi_transport_spi.c1
-rw-r--r--drivers/scsi/scsicam.c1
-rw-r--r--drivers/scsi/sd.c167
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/ses.c15
-rw-r--r--drivers/scsi/sg.c7
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/sim710.c1
-rw-r--r--drivers/scsi/sni_53c710.c3
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/sr_ioctl.c1
-rw-r--r--drivers/scsi/sr_vendor.c1
-rw-r--r--drivers/scsi/st.c30
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c288
-rw-r--r--drivers/scsi/sun3_NCR5380.c1
-rw-r--r--drivers/scsi/sun3x_esp.c1
-rw-r--r--drivers/scsi/sun_esp.c1
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c6
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/scsi/tmscsim.c1
-rw-r--r--drivers/scsi/u14-34f.c3
-rw-r--r--drivers/scsi/vmw_pvscsi.c1409
-rw-r--r--drivers/scsi/vmw_pvscsi.h397
-rw-r--r--drivers/scsi/wd7000.c5
-rw-r--r--drivers/scsi/zorro7xx.c1
394 files changed, 46243 insertions, 11664 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 36c21b19e5d7..e9788f55ab13 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -76,6 +76,7 @@
76 Fix bug in twa_get_param() on 4GB+. 76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap(). 77 Use pci_resource_len() for ioremap().
78 2.26.02.012 - Add power management support. 78 2.26.02.012 - Add power management support.
79 2.26.02.013 - Fix bug in twa_load_sgl().
79*/ 80*/
80 81
81#include <linux/module.h> 82#include <linux/module.h>
@@ -90,6 +91,7 @@
90#include <linux/time.h> 91#include <linux/time.h>
91#include <linux/mutex.h> 92#include <linux/mutex.h>
92#include <linux/smp_lock.h> 93#include <linux/smp_lock.h>
94#include <linux/slab.h>
93#include <asm/io.h> 95#include <asm/io.h>
94#include <asm/irq.h> 96#include <asm/irq.h>
95#include <asm/uaccess.h> 97#include <asm/uaccess.h>
@@ -100,7 +102,7 @@
100#include "3w-9xxx.h" 102#include "3w-9xxx.h"
101 103
102/* Globals */ 104/* Globals */
103#define TW_DRIVER_VERSION "2.26.02.012" 105#define TW_DRIVER_VERSION "2.26.02.013"
104static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 106static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
105static unsigned int twa_device_extension_count; 107static unsigned int twa_device_extension_count;
106static int twa_major = -1; 108static int twa_major = -1;
@@ -186,8 +188,12 @@ static ssize_t twa_show_stats(struct device *dev,
186} /* End twa_show_stats() */ 188} /* End twa_show_stats() */
187 189
188/* This function will set a devices queue depth */ 190/* This function will set a devices queue depth */
189static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth) 191static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
192 int reason)
190{ 193{
194 if (reason != SCSI_QDEPTH_DEFAULT)
195 return -EOPNOTSUPP;
196
191 if (queue_depth > TW_Q_LENGTH-2) 197 if (queue_depth > TW_Q_LENGTH-2)
192 queue_depth = TW_Q_LENGTH-2; 198 queue_depth = TW_Q_LENGTH-2;
193 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 199 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
@@ -732,7 +738,7 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
732 break; 738 break;
733 case TW_IOCTL_GET_COMPATIBILITY_INFO: 739 case TW_IOCTL_GET_COMPATIBILITY_INFO:
734 tw_ioctl->driver_command.status = 0; 740 tw_ioctl->driver_command.status = 0;
735 /* Copy compatiblity struct into ioctl data buffer */ 741 /* Copy compatibility struct into ioctl data buffer */
736 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; 742 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
737 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); 743 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
738 break; 744 break;
@@ -1378,10 +1384,12 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
1378 newcommand = &full_command_packet->command.newcommand; 1384 newcommand = &full_command_packet->command.newcommand;
1379 newcommand->request_id__lunl = 1385 newcommand->request_id__lunl =
1380 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); 1386 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1381 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1387 if (length) {
1382 newcommand->sg_list[0].length = cpu_to_le32(length); 1388 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1389 newcommand->sg_list[0].length = cpu_to_le32(length);
1390 }
1383 newcommand->sgl_entries__lunh = 1391 newcommand->sgl_entries__lunh =
1384 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1)); 1392 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1385 } else { 1393 } else {
1386 oldcommand = &full_command_packet->command.oldcommand; 1394 oldcommand = &full_command_packet->command.oldcommand;
1387 oldcommand->request_id = request_id; 1395 oldcommand->request_id = request_id;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
new file mode 100644
index 000000000000..54c5ffb1eaa1
--- /dev/null
+++ b/drivers/scsi/3w-sas.c
@@ -0,0 +1,1925 @@
1/*
2 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@lsi.com>
5
6 Copyright (C) 2009 LSI Corporation.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; version 2 of the License.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 NO WARRANTY
18 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
19 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
20 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
21 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
22 solely responsible for determining the appropriateness of using and
23 distributing the Program and assumes all risks associated with its
24 exercise of rights under this Agreement, including but not limited to
25 the risks and costs of program errors, damage to or loss of data,
26 programs or equipment, and unavailability or interruption of operations.
27
28 DISCLAIMER OF LIABILITY
29 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
30 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
32 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
33 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
34 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
35 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
36
37 You should have received a copy of the GNU General Public License
38 along with this program; if not, write to the Free Software
39 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
40
41 Controllers supported by this driver:
42
43 LSI 3ware 9750 6Gb/s SAS/SATA-RAID
44
45 Bugs/Comments/Suggestions should be mailed to:
46 linuxraid@lsi.com
47
48 For more information, goto:
49 http://www.lsi.com
50
51 History
52 -------
53 3.26.02.000 - Initial driver release.
54*/
55
56#include <linux/module.h>
57#include <linux/reboot.h>
58#include <linux/spinlock.h>
59#include <linux/interrupt.h>
60#include <linux/moduleparam.h>
61#include <linux/errno.h>
62#include <linux/types.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/time.h>
66#include <linux/mutex.h>
67#include <linux/smp_lock.h>
68#include <linux/slab.h>
69#include <asm/io.h>
70#include <asm/irq.h>
71#include <asm/uaccess.h>
72#include <scsi/scsi.h>
73#include <scsi/scsi_host.h>
74#include <scsi/scsi_tcq.h>
75#include <scsi/scsi_cmnd.h>
76#include "3w-sas.h"
77
78/* Globals */
79#define TW_DRIVER_VERSION "3.26.02.000"
80static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT];
81static unsigned int twl_device_extension_count;
82static int twl_major = -1;
83extern struct timezone sys_tz;
84
85/* Module parameters */
86MODULE_AUTHOR ("LSI");
87MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver");
88MODULE_LICENSE("GPL");
89MODULE_VERSION(TW_DRIVER_VERSION);
90
91static int use_msi;
92module_param(use_msi, int, S_IRUGO);
93MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
94
95/* Function prototypes */
96static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
97
98/* Functions */
99
100/* This function returns AENs through sysfs */
101static ssize_t twl_sysfs_aen_read(struct kobject *kobj,
102 struct bin_attribute *bin_attr,
103 char *outbuf, loff_t offset, size_t count)
104{
105 struct device *dev = container_of(kobj, struct device, kobj);
106 struct Scsi_Host *shost = class_to_shost(dev);
107 TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
108 unsigned long flags = 0;
109 ssize_t ret;
110
111 if (!capable(CAP_SYS_ADMIN))
112 return -EACCES;
113
114 spin_lock_irqsave(tw_dev->host->host_lock, flags);
115 ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
116 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
117
118 return ret;
119} /* End twl_sysfs_aen_read() */
120
121/* aen_read sysfs attribute initializer */
122static struct bin_attribute twl_sysfs_aen_read_attr = {
123 .attr = {
124 .name = "3ware_aen_read",
125 .mode = S_IRUSR,
126 },
127 .size = 0,
128 .read = twl_sysfs_aen_read
129};
130
131/* This function returns driver compatibility info through sysfs */
132static ssize_t twl_sysfs_compat_info(struct kobject *kobj,
133 struct bin_attribute *bin_attr,
134 char *outbuf, loff_t offset, size_t count)
135{
136 struct device *dev = container_of(kobj, struct device, kobj);
137 struct Scsi_Host *shost = class_to_shost(dev);
138 TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
139 unsigned long flags = 0;
140 ssize_t ret;
141
142 if (!capable(CAP_SYS_ADMIN))
143 return -EACCES;
144
145 spin_lock_irqsave(tw_dev->host->host_lock, flags);
146 ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
147 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
148
149 return ret;
150} /* End twl_sysfs_compat_info() */
151
152/* compat_info sysfs attribute initializer */
153static struct bin_attribute twl_sysfs_compat_info_attr = {
154 .attr = {
155 .name = "3ware_compat_info",
156 .mode = S_IRUSR,
157 },
158 .size = 0,
159 .read = twl_sysfs_compat_info
160};
161
162/* Show some statistics about the card */
163static ssize_t twl_show_stats(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 struct Scsi_Host *host = class_to_shost(dev);
167 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
168 unsigned long flags = 0;
169 ssize_t len;
170
171 spin_lock_irqsave(tw_dev->host->host_lock, flags);
172 len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
173 "Current commands posted: %4d\n"
174 "Max commands posted: %4d\n"
175 "Last sgl length: %4d\n"
176 "Max sgl length: %4d\n"
177 "Last sector count: %4d\n"
178 "Max sector count: %4d\n"
179 "SCSI Host Resets: %4d\n"
180 "AEN's: %4d\n",
181 TW_DRIVER_VERSION,
182 tw_dev->posted_request_count,
183 tw_dev->max_posted_request_count,
184 tw_dev->sgl_entries,
185 tw_dev->max_sgl_entries,
186 tw_dev->sector_count,
187 tw_dev->max_sector_count,
188 tw_dev->num_resets,
189 tw_dev->aen_count);
190 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
191 return len;
192} /* End twl_show_stats() */
193
194/* This function will set a devices queue depth */
195static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
196 int reason)
197{
198 if (reason != SCSI_QDEPTH_DEFAULT)
199 return -EOPNOTSUPP;
200
201 if (queue_depth > TW_Q_LENGTH-2)
202 queue_depth = TW_Q_LENGTH-2;
203 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
204 return queue_depth;
205} /* End twl_change_queue_depth() */
206
207/* stats sysfs attribute initializer */
208static struct device_attribute twl_host_stats_attr = {
209 .attr = {
210 .name = "3ware_stats",
211 .mode = S_IRUGO,
212 },
213 .show = twl_show_stats
214};
215
216/* Host attributes initializer */
217static struct device_attribute *twl_host_attrs[] = {
218 &twl_host_stats_attr,
219 NULL,
220};
221
222/* This function will look up an AEN severity string */
223static char *twl_aen_severity_lookup(unsigned char severity_code)
224{
225 char *retval = NULL;
226
227 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
228 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
229 goto out;
230
231 retval = twl_aen_severity_table[severity_code];
232out:
233 return retval;
234} /* End twl_aen_severity_lookup() */
235
236/* This function will queue an event */
237static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
238{
239 u32 local_time;
240 struct timeval time;
241 TW_Event *event;
242 unsigned short aen;
243 char host[16];
244 char *error_str;
245
246 tw_dev->aen_count++;
247
248 /* Fill out event info */
249 event = tw_dev->event_queue[tw_dev->error_index];
250
251 host[0] = '\0';
252 if (tw_dev->host)
253 sprintf(host, " scsi%d:", tw_dev->host->host_no);
254
255 aen = le16_to_cpu(header->status_block.error);
256 memset(event, 0, sizeof(TW_Event));
257
258 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
259 do_gettimeofday(&time);
260 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
261 event->time_stamp_sec = local_time;
262 event->aen_code = aen;
263 event->retrieved = TW_AEN_NOT_RETRIEVED;
264 event->sequence_id = tw_dev->error_sequence_id;
265 tw_dev->error_sequence_id++;
266
267 /* Check for embedded error string */
268 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
269
270 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
271 event->parameter_len = strlen(header->err_specific_desc);
272 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str));
273 if (event->severity != TW_AEN_SEVERITY_DEBUG)
274 printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
275 host,
276 twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
277 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str,
278 header->err_specific_desc);
279 else
280 tw_dev->aen_count--;
281
282 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
283} /* End twl_aen_queue_event() */
284
285/* This function will attempt to post a command packet to the board */
286static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
287{
288 dma_addr_t command_que_value;
289
290 command_que_value = tw_dev->command_packet_phys[request_id];
291 command_que_value += TW_COMMAND_OFFSET;
292
293 /* First write upper 4 bytes */
294 writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev));
295 /* Then the lower 4 bytes */
296 writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev));
297
298 tw_dev->state[request_id] = TW_S_POSTED;
299 tw_dev->posted_request_count++;
300 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count)
301 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
302
303 return 0;
304} /* End twl_post_command_packet() */
305
306/* This function will perform a pci-dma mapping for a scatter gather list */
307static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
308{
309 int use_sg;
310 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
311
312 use_sg = scsi_dma_map(cmd);
313 if (!use_sg)
314 return 0;
315 else if (use_sg < 0) {
316 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
317 return 0;
318 }
319
320 cmd->SCp.phase = TW_PHASE_SGLIST;
321 cmd->SCp.have_data_in = use_sg;
322
323 return use_sg;
324} /* End twl_map_scsi_sg_data() */
325
326/* This function hands scsi cdb's to the firmware */
327static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
328{
329 TW_Command_Full *full_command_packet;
330 TW_Command_Apache *command_packet;
331 int i, sg_count;
332 struct scsi_cmnd *srb = NULL;
333 struct scatterlist *sglist = NULL, *sg;
334 int retval = 1;
335
336 if (tw_dev->srb[request_id]) {
337 srb = tw_dev->srb[request_id];
338 if (scsi_sglist(srb))
339 sglist = scsi_sglist(srb);
340 }
341
342 /* Initialize command packet */
343 full_command_packet = tw_dev->command_packet_virt[request_id];
344 full_command_packet->header.header_desc.size_header = 128;
345 full_command_packet->header.status_block.error = 0;
346 full_command_packet->header.status_block.severity__reserved = 0;
347
348 command_packet = &full_command_packet->command.newcommand;
349 command_packet->status = 0;
350 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
351
352 /* We forced 16 byte cdb use earlier */
353 if (!cdb)
354 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
355 else
356 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
357
358 if (srb) {
359 command_packet->unit = srb->device->id;
360 command_packet->request_id__lunl =
361 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
362 } else {
363 command_packet->request_id__lunl =
364 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
365 command_packet->unit = 0;
366 }
367
368 command_packet->sgl_offset = 16;
369
370 if (!sglistarg) {
371 /* Map sglist from scsi layer to cmd packet */
372 if (scsi_sg_count(srb)) {
373 sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
374 if (sg_count == 0)
375 goto out;
376
377 scsi_for_each_sg(srb, sg, sg_count, i) {
378 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
379 command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg));
380 }
381 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
382 }
383 } else {
384 /* Internal cdb post */
385 for (i = 0; i < use_sg; i++) {
386 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
387 command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
388 }
389 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
390 }
391
392 /* Update some stats */
393 if (srb) {
394 tw_dev->sector_count = scsi_bufflen(srb) / 512;
395 if (tw_dev->sector_count > tw_dev->max_sector_count)
396 tw_dev->max_sector_count = tw_dev->sector_count;
397 tw_dev->sgl_entries = scsi_sg_count(srb);
398 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
399 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
400 }
401
402 /* Now post the command to the board */
403 retval = twl_post_command_packet(tw_dev, request_id);
404
405out:
406 return retval;
407} /* End twl_scsiop_execute_scsi() */
408
409/* This function will read the aen queue from the isr */
410static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
411{
412 char cdb[TW_MAX_CDB_LEN];
413 TW_SG_Entry_ISO sglist[1];
414 TW_Command_Full *full_command_packet;
415 int retval = 1;
416
417 full_command_packet = tw_dev->command_packet_virt[request_id];
418 memset(full_command_packet, 0, sizeof(TW_Command_Full));
419
420 /* Initialize cdb */
421 memset(&cdb, 0, TW_MAX_CDB_LEN);
422 cdb[0] = REQUEST_SENSE; /* opcode */
423 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
424
425 /* Initialize sglist */
426 memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
427 sglist[0].length = TW_SECTOR_SIZE;
428 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
429
430 /* Mark internal command */
431 tw_dev->srb[request_id] = NULL;
432
433 /* Now post the command packet */
434 if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
435 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue");
436 goto out;
437 }
438 retval = 0;
439out:
440 return retval;
441} /* End twl_aen_read_queue() */
442
443/* This function will sync firmware time with the host time */
444static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
445{
446 u32 schedulertime;
447 struct timeval utc;
448 TW_Command_Full *full_command_packet;
449 TW_Command *command_packet;
450 TW_Param_Apache *param;
451 u32 local_time;
452
453 /* Fill out the command packet */
454 full_command_packet = tw_dev->command_packet_virt[request_id];
455 memset(full_command_packet, 0, sizeof(TW_Command_Full));
456 command_packet = &full_command_packet->command.oldcommand;
457 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
458 command_packet->request_id = request_id;
459 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
460 command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
461 command_packet->size = TW_COMMAND_SIZE;
462 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
463
464 /* Setup the param */
465 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
466 memset(param, 0, TW_SECTOR_SIZE);
467 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
468 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
469 param->parameter_size_bytes = cpu_to_le16(4);
470
471 /* Convert system time in UTC to local time seconds since last
472 Sunday 12:00AM */
473 do_gettimeofday(&utc);
474 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
475 schedulertime = local_time - (3 * 86400);
476 schedulertime = cpu_to_le32(schedulertime % 604800);
477
478 memcpy(param->data, &schedulertime, sizeof(u32));
479
480 /* Mark internal command */
481 tw_dev->srb[request_id] = NULL;
482
483 /* Now post the command */
484 twl_post_command_packet(tw_dev, request_id);
485} /* End twl_aen_sync_time() */
486
487/* This function will assign an available request id */
488static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
489{
490 *request_id = tw_dev->free_queue[tw_dev->free_head];
491 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
492 tw_dev->state[*request_id] = TW_S_STARTED;
493} /* End twl_get_request_id() */
494
495/* This function will free a request id */
496static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id)
497{
498 tw_dev->free_queue[tw_dev->free_tail] = request_id;
499 tw_dev->state[request_id] = TW_S_FINISHED;
500 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
501} /* End twl_free_request_id() */
502
503/* This function will complete an aen request from the isr */
504static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
505{
506 TW_Command_Full *full_command_packet;
507 TW_Command *command_packet;
508 TW_Command_Apache_Header *header;
509 unsigned short aen;
510 int retval = 1;
511
512 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
513 tw_dev->posted_request_count--;
514 aen = le16_to_cpu(header->status_block.error);
515 full_command_packet = tw_dev->command_packet_virt[request_id];
516 command_packet = &full_command_packet->command.oldcommand;
517
518 /* First check for internal completion of set param for time sync */
519 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
520 /* Keep reading the queue in case there are more aen's */
521 if (twl_aen_read_queue(tw_dev, request_id))
522 goto out2;
523 else {
524 retval = 0;
525 goto out;
526 }
527 }
528
529 switch (aen) {
530 case TW_AEN_QUEUE_EMPTY:
531 /* Quit reading the queue if this is the last one */
532 break;
533 case TW_AEN_SYNC_TIME_WITH_HOST:
534 twl_aen_sync_time(tw_dev, request_id);
535 retval = 0;
536 goto out;
537 default:
538 twl_aen_queue_event(tw_dev, header);
539
540 /* If there are more aen's, keep reading the queue */
541 if (twl_aen_read_queue(tw_dev, request_id))
542 goto out2;
543 else {
544 retval = 0;
545 goto out;
546 }
547 }
548 retval = 0;
549out2:
550 tw_dev->state[request_id] = TW_S_COMPLETED;
551 twl_free_request_id(tw_dev, request_id);
552 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
553out:
554 return retval;
555} /* End twl_aen_complete() */
556
557/* This function will poll for a response */
558static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
559{
560 unsigned long before;
561 dma_addr_t mfa;
562 u32 regh, regl;
563 u32 response;
564 int retval = 1;
565 int found = 0;
566
567 before = jiffies;
568
569 while (!found) {
570 if (sizeof(dma_addr_t) > 4) {
571 regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
572 regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
573 mfa = ((u64)regh << 32) | regl;
574 } else
575 mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
576
577 response = (u32)mfa;
578
579 if (TW_RESID_OUT(response) == request_id)
580 found = 1;
581
582 if (time_after(jiffies, before + HZ * seconds))
583 goto out;
584
585 msleep(50);
586 }
587 retval = 0;
588out:
589 return retval;
590} /* End twl_poll_response() */
591
592/* This function will drain the aen queue */
593static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
594{
595 int request_id = 0;
596 char cdb[TW_MAX_CDB_LEN];
597 TW_SG_Entry_ISO sglist[1];
598 int finished = 0, count = 0;
599 TW_Command_Full *full_command_packet;
600 TW_Command_Apache_Header *header;
601 unsigned short aen;
602 int first_reset = 0, queue = 0, retval = 1;
603
604 if (no_check_reset)
605 first_reset = 0;
606 else
607 first_reset = 1;
608
609 full_command_packet = tw_dev->command_packet_virt[request_id];
610 memset(full_command_packet, 0, sizeof(TW_Command_Full));
611
612 /* Initialize cdb */
613 memset(&cdb, 0, TW_MAX_CDB_LEN);
614 cdb[0] = REQUEST_SENSE; /* opcode */
615 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
616
617 /* Initialize sglist */
618 memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
619 sglist[0].length = TW_SECTOR_SIZE;
620 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
621
622 /* Mark internal command */
623 tw_dev->srb[request_id] = NULL;
624
625 do {
626 /* Send command to the board */
627 if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
628 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense");
629 goto out;
630 }
631
632 /* Now poll for completion */
633 if (twl_poll_response(tw_dev, request_id, 30)) {
634 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue");
635 tw_dev->posted_request_count--;
636 goto out;
637 }
638
639 tw_dev->posted_request_count--;
640 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
641 aen = le16_to_cpu(header->status_block.error);
642 queue = 0;
643 count++;
644
645 switch (aen) {
646 case TW_AEN_QUEUE_EMPTY:
647 if (first_reset != 1)
648 goto out;
649 else
650 finished = 1;
651 break;
652 case TW_AEN_SOFT_RESET:
653 if (first_reset == 0)
654 first_reset = 1;
655 else
656 queue = 1;
657 break;
658 case TW_AEN_SYNC_TIME_WITH_HOST:
659 break;
660 default:
661 queue = 1;
662 }
663
664 /* Now queue an event info */
665 if (queue)
666 twl_aen_queue_event(tw_dev, header);
667 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
668
669 if (count == TW_MAX_AEN_DRAIN)
670 goto out;
671
672 retval = 0;
673out:
674 tw_dev->state[request_id] = TW_S_INITIAL;
675 return retval;
676} /* End twl_aen_drain_queue() */
677
678/* This function will allocate memory and check if it is correctly aligned */
679static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
680{
681 int i;
682 dma_addr_t dma_handle;
683 unsigned long *cpu_addr;
684 int retval = 1;
685
686 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
687 if (!cpu_addr) {
688 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
689 goto out;
690 }
691
692 memset(cpu_addr, 0, size*TW_Q_LENGTH);
693
694 for (i = 0; i < TW_Q_LENGTH; i++) {
695 switch(which) {
696 case 0:
697 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
698 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
699 break;
700 case 1:
701 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
702 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
703 break;
704 case 2:
705 tw_dev->sense_buffer_phys[i] = dma_handle+(i*size);
706 tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size));
707 break;
708 }
709 }
710 retval = 0;
711out:
712 return retval;
713} /* End twl_allocate_memory() */
714
715/* This function will load the request id and various sgls for ioctls */
716static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
717{
718 TW_Command *oldcommand;
719 TW_Command_Apache *newcommand;
720 TW_SG_Entry_ISO *sgl;
721 unsigned int pae = 0;
722
723 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
724 pae = 1;
725
726 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
727 newcommand = &full_command_packet->command.newcommand;
728 newcommand->request_id__lunl =
729 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
730 if (length) {
731 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
732 newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
733 }
734 newcommand->sgl_entries__lunh =
735 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
736 } else {
737 oldcommand = &full_command_packet->command.oldcommand;
738 oldcommand->request_id = request_id;
739
740 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
741 /* Load the sg list */
742 sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
743 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
744 sgl->length = TW_CPU_TO_SGL(length);
745 oldcommand->size += pae;
746 oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
747 }
748 }
749} /* End twl_load_sgl() */
750
751/* This function handles ioctl for the character device
752 This interface is used by smartmontools open source software */
753static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
754{
755 long timeout;
756 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
757 dma_addr_t dma_handle;
758 int request_id = 0;
759 TW_Ioctl_Driver_Command driver_command;
760 TW_Ioctl_Buf_Apache *tw_ioctl;
761 TW_Command_Full *full_command_packet;
762 TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
763 int retval = -EFAULT;
764 void __user *argp = (void __user *)arg;
765
766 /* Only let one of these through at a time */
767 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
768 retval = -EINTR;
769 goto out;
770 }
771
772 /* First copy down the driver command */
773 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
774 goto out2;
775
776 /* Check data buffer size */
777 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
778 retval = -EINVAL;
779 goto out2;
780 }
781
782 /* Hardware can only do multiple of 512 byte transfers */
783 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
784
785 /* Now allocate ioctl buf memory */
786 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
787 if (!cpu_addr) {
788 retval = -ENOMEM;
789 goto out2;
790 }
791
792 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
793
794 /* Now copy down the entire ioctl */
795 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
796 goto out3;
797
798 /* See which ioctl we are doing */
799 switch (cmd) {
800 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
801 spin_lock_irqsave(tw_dev->host->host_lock, flags);
802 twl_get_request_id(tw_dev, &request_id);
803
804 /* Flag internal command */
805 tw_dev->srb[request_id] = NULL;
806
807 /* Flag chrdev ioctl */
808 tw_dev->chrdev_request_id = request_id;
809
810 full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command;
811
812 /* Load request id and sglist for both command types */
813 twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
814
815 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
816
817 /* Now post the command packet to the controller */
818 twl_post_command_packet(tw_dev, request_id);
819 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
820
821 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
822
823 /* Now wait for command to complete */
824 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
825
826 /* We timed out, and didn't get an interrupt */
827 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
828 /* Now we need to reset the board */
829 printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
830 tw_dev->host->host_no, TW_DRIVER, 0x6,
831 cmd);
832 retval = -EIO;
833 twl_reset_device_extension(tw_dev, 1);
834 goto out3;
835 }
836
837 /* Now copy in the command packet response */
838 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
839
840 /* Now complete the io */
841 spin_lock_irqsave(tw_dev->host->host_lock, flags);
842 tw_dev->posted_request_count--;
843 tw_dev->state[request_id] = TW_S_COMPLETED;
844 twl_free_request_id(tw_dev, request_id);
845 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
846 break;
847 default:
848 retval = -ENOTTY;
849 goto out3;
850 }
851
852 /* Now copy the entire response to userspace */
853 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
854 retval = 0;
855out3:
856 /* Now free ioctl buf memory */
857 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
858out2:
859 mutex_unlock(&tw_dev->ioctl_lock);
860out:
861 return retval;
862} /* End twl_chrdev_ioctl() */
863
864/* This function handles open for the character device */
865static int twl_chrdev_open(struct inode *inode, struct file *file)
866{
867 unsigned int minor_number;
868 int retval = -ENODEV;
869
870 if (!capable(CAP_SYS_ADMIN)) {
871 retval = -EACCES;
872 goto out;
873 }
874
875 cycle_kernel_lock();
876 minor_number = iminor(inode);
877 if (minor_number >= twl_device_extension_count)
878 goto out;
879 retval = 0;
880out:
881 return retval;
882} /* End twl_chrdev_open() */
883
884/* File operations struct for character device */
885static const struct file_operations twl_fops = {
886 .owner = THIS_MODULE,
887 .ioctl = twl_chrdev_ioctl,
888 .open = twl_chrdev_open,
889 .release = NULL
890};
891
892/* This function passes sense data from firmware to scsi layer */
893static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host)
894{
895 TW_Command_Apache_Header *header;
896 TW_Command_Full *full_command_packet;
897 unsigned short error;
898 char *error_str;
899 int retval = 1;
900
901 header = tw_dev->sense_buffer_virt[i];
902 full_command_packet = tw_dev->command_packet_virt[request_id];
903
904 /* Get embedded firmware error string */
905 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]);
906
907 /* Don't print error for Logical unit not supported during rollcall */
908 error = le16_to_cpu(header->status_block.error);
909 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) {
910 if (print_host)
911 printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
912 tw_dev->host->host_no,
913 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
914 header->status_block.error,
915 error_str,
916 header->err_specific_desc);
917 else
918 printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
919 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
920 header->status_block.error,
921 error_str,
922 header->err_specific_desc);
923 }
924
925 if (copy_sense) {
926 memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH);
927 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
928 goto out;
929 }
930out:
931 return retval;
932} /* End twl_fill_sense() */
933
934/* This function will free up device extension resources */
935static void twl_free_device_extension(TW_Device_Extension *tw_dev)
936{
937 if (tw_dev->command_packet_virt[0])
938 pci_free_consistent(tw_dev->tw_pci_dev,
939 sizeof(TW_Command_Full)*TW_Q_LENGTH,
940 tw_dev->command_packet_virt[0],
941 tw_dev->command_packet_phys[0]);
942
943 if (tw_dev->generic_buffer_virt[0])
944 pci_free_consistent(tw_dev->tw_pci_dev,
945 TW_SECTOR_SIZE*TW_Q_LENGTH,
946 tw_dev->generic_buffer_virt[0],
947 tw_dev->generic_buffer_phys[0]);
948
949 if (tw_dev->sense_buffer_virt[0])
950 pci_free_consistent(tw_dev->tw_pci_dev,
951 sizeof(TW_Command_Apache_Header)*
952 TW_Q_LENGTH,
953 tw_dev->sense_buffer_virt[0],
954 tw_dev->sense_buffer_phys[0]);
955
956 kfree(tw_dev->event_queue[0]);
957} /* End twl_free_device_extension() */
958
959/* This function will get parameter table entries from the firmware */
960static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
961{
962 TW_Command_Full *full_command_packet;
963 TW_Command *command_packet;
964 TW_Param_Apache *param;
965 void *retval = NULL;
966
967 /* Setup the command packet */
968 full_command_packet = tw_dev->command_packet_virt[request_id];
969 memset(full_command_packet, 0, sizeof(TW_Command_Full));
970 command_packet = &full_command_packet->command.oldcommand;
971
972 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
973 command_packet->size = TW_COMMAND_SIZE;
974 command_packet->request_id = request_id;
975 command_packet->byte6_offset.block_count = cpu_to_le16(1);
976
977 /* Now setup the param */
978 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
979 memset(param, 0, TW_SECTOR_SIZE);
980 param->table_id = cpu_to_le16(table_id | 0x8000);
981 param->parameter_id = cpu_to_le16(parameter_id);
982 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
983
984 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
985 command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
986
987 /* Post the command packet to the board */
988 twl_post_command_packet(tw_dev, request_id);
989
990 /* Poll for completion */
991 if (twl_poll_response(tw_dev, request_id, 30))
992 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param")
993 else
994 retval = (void *)&(param->data[0]);
995
996 tw_dev->posted_request_count--;
997 tw_dev->state[request_id] = TW_S_INITIAL;
998
999 return retval;
1000} /* End twl_get_param() */
1001
1002/* This function will send an initconnection command to controller */
1003static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1004 u32 set_features, unsigned short current_fw_srl,
1005 unsigned short current_fw_arch_id,
1006 unsigned short current_fw_branch,
1007 unsigned short current_fw_build,
1008 unsigned short *fw_on_ctlr_srl,
1009 unsigned short *fw_on_ctlr_arch_id,
1010 unsigned short *fw_on_ctlr_branch,
1011 unsigned short *fw_on_ctlr_build,
1012 u32 *init_connect_result)
1013{
1014 TW_Command_Full *full_command_packet;
1015 TW_Initconnect *tw_initconnect;
1016 int request_id = 0, retval = 1;
1017
1018 /* Initialize InitConnection command packet */
1019 full_command_packet = tw_dev->command_packet_virt[request_id];
1020 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1021 full_command_packet->header.header_desc.size_header = 128;
1022
1023 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1024 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1025 tw_initconnect->request_id = request_id;
1026 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1027 tw_initconnect->features = set_features;
1028
1029 /* Turn on 64-bit sgl support if we need to */
1030 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1031
1032 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1033
1034 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1035 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1036 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1037 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1038 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1039 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1040 } else
1041 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1042
1043 /* Send command packet to the board */
1044 twl_post_command_packet(tw_dev, request_id);
1045
1046 /* Poll for completion */
1047 if (twl_poll_response(tw_dev, request_id, 30)) {
1048 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection");
1049 } else {
1050 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1051 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1052 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1053 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1054 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1055 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1056 }
1057 retval = 0;
1058 }
1059
1060 tw_dev->posted_request_count--;
1061 tw_dev->state[request_id] = TW_S_INITIAL;
1062
1063 return retval;
1064} /* End twl_initconnection() */
1065
1066/* This function will initialize the fields of a device extension */
1067static int twl_initialize_device_extension(TW_Device_Extension *tw_dev)
1068{
1069 int i, retval = 1;
1070
1071 /* Initialize command packet buffers */
1072 if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1073 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed");
1074 goto out;
1075 }
1076
1077 /* Initialize generic buffer */
1078 if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1079 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed");
1080 goto out;
1081 }
1082
1083 /* Allocate sense buffers */
1084 if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) {
1085 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed");
1086 goto out;
1087 }
1088
1089 /* Allocate event info space */
1090 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1091 if (!tw_dev->event_queue[0]) {
1092 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed");
1093 goto out;
1094 }
1095
1096 for (i = 0; i < TW_Q_LENGTH; i++) {
1097 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1098 tw_dev->free_queue[i] = i;
1099 tw_dev->state[i] = TW_S_INITIAL;
1100 }
1101
1102 tw_dev->free_head = TW_Q_START;
1103 tw_dev->free_tail = TW_Q_START;
1104 tw_dev->error_sequence_id = 1;
1105 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1106
1107 mutex_init(&tw_dev->ioctl_lock);
1108 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1109
1110 retval = 0;
1111out:
1112 return retval;
1113} /* End twl_initialize_device_extension() */
1114
1115/* This function will perform a pci-dma unmap */
1116static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1117{
1118 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1119
1120 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1121 scsi_dma_unmap(cmd);
1122} /* End twl_unmap_scsi_data() */
1123
1124/* This function will handle attention interrupts */
1125static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
1126{
1127 int retval = 1;
1128 u32 request_id, doorbell;
1129
1130 /* Read doorbell status */
1131 doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev));
1132
1133 /* Check for controller errors */
1134 if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) {
1135 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing");
1136 goto out;
1137 }
1138
1139 /* Check if we need to perform an AEN drain */
1140 if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) {
1141 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1142 twl_get_request_id(tw_dev, &request_id);
1143 if (twl_aen_read_queue(tw_dev, request_id)) {
1144 tw_dev->state[request_id] = TW_S_COMPLETED;
1145 twl_free_request_id(tw_dev, request_id);
1146 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1147 }
1148 }
1149 }
1150
1151 retval = 0;
1152out:
1153 /* Clear doorbell interrupt */
1154 TWL_CLEAR_DB_INTERRUPT(tw_dev);
1155
1156 /* Make sure the clear was flushed by reading it back */
1157 readl(TWL_HOBDBC_REG_ADDR(tw_dev));
1158
1159 return retval;
1160} /* End twl_handle_attention_interrupt() */
1161
1162/* Interrupt service routine */
1163static irqreturn_t twl_interrupt(int irq, void *dev_instance)
1164{
1165 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1166 int i, handled = 0, error = 0;
1167 dma_addr_t mfa = 0;
1168 u32 reg, regl, regh, response, request_id = 0;
1169 struct scsi_cmnd *cmd;
1170 TW_Command_Full *full_command_packet;
1171
1172 spin_lock(tw_dev->host->host_lock);
1173
1174 /* Read host interrupt status */
1175 reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
1176
1177 /* Check if this is our interrupt, otherwise bail */
1178 if (!(reg & TWL_HISTATUS_VALID_INTERRUPT))
1179 goto twl_interrupt_bail;
1180
1181 handled = 1;
1182
1183 /* If we are resetting, bail */
1184 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1185 goto twl_interrupt_bail;
1186
1187 /* Attention interrupt */
1188 if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) {
1189 if (twl_handle_attention_interrupt(tw_dev)) {
1190 TWL_MASK_INTERRUPTS(tw_dev);
1191 goto twl_interrupt_bail;
1192 }
1193 }
1194
1195 /* Response interrupt */
1196 while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) {
1197 if (sizeof(dma_addr_t) > 4) {
1198 regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
1199 regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
1200 mfa = ((u64)regh << 32) | regl;
1201 } else
1202 mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
1203
1204 error = 0;
1205 response = (u32)mfa;
1206
1207 /* Check for command packet error */
1208 if (!TW_NOTMFA_OUT(response)) {
1209 for (i=0;i<TW_Q_LENGTH;i++) {
1210 if (tw_dev->sense_buffer_phys[i] == mfa) {
1211 request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id);
1212 if (tw_dev->srb[request_id] != NULL)
1213 error = twl_fill_sense(tw_dev, i, request_id, 1, 1);
1214 else {
1215 /* Skip ioctl error prints */
1216 if (request_id != tw_dev->chrdev_request_id)
1217 error = twl_fill_sense(tw_dev, i, request_id, 0, 1);
1218 else
1219 memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header));
1220 }
1221
1222 /* Now re-post the sense buffer */
1223 writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
1224 writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
1225 break;
1226 }
1227 }
1228 } else
1229 request_id = TW_RESID_OUT(response);
1230
1231 full_command_packet = tw_dev->command_packet_virt[request_id];
1232
1233 /* Check for correct state */
1234 if (tw_dev->state[request_id] != TW_S_POSTED) {
1235 if (tw_dev->srb[request_id] != NULL) {
1236 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted");
1237 TWL_MASK_INTERRUPTS(tw_dev);
1238 goto twl_interrupt_bail;
1239 }
1240 }
1241
1242 /* Check for internal command completion */
1243 if (tw_dev->srb[request_id] == NULL) {
1244 if (request_id != tw_dev->chrdev_request_id) {
1245 if (twl_aen_complete(tw_dev, request_id))
1246 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt");
1247 } else {
1248 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1249 wake_up(&tw_dev->ioctl_wqueue);
1250 }
1251 } else {
1252 cmd = tw_dev->srb[request_id];
1253
1254 if (!error)
1255 cmd->result = (DID_OK << 16);
1256
1257 /* Report residual bytes for single sgl */
1258 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1259 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1260 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1261 }
1262
1263 /* Now complete the io */
1264 tw_dev->state[request_id] = TW_S_COMPLETED;
1265 twl_free_request_id(tw_dev, request_id);
1266 tw_dev->posted_request_count--;
1267 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1268 twl_unmap_scsi_data(tw_dev, request_id);
1269 }
1270
1271 /* Check for another response interrupt */
1272 reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
1273 }
1274
1275twl_interrupt_bail:
1276 spin_unlock(tw_dev->host->host_lock);
1277 return IRQ_RETVAL(handled);
1278} /* End twl_interrupt() */
1279
1280/* This function will poll for a register change */
1281static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds)
1282{
1283 unsigned long before;
1284 int retval = 1;
1285 u32 reg_value;
1286
1287 reg_value = readl(reg);
1288 before = jiffies;
1289
1290 while ((reg_value & value) != result) {
1291 reg_value = readl(reg);
1292 if (time_after(jiffies, before + HZ * seconds))
1293 goto out;
1294 msleep(50);
1295 }
1296 retval = 0;
1297out:
1298 return retval;
1299} /* End twl_poll_register() */
1300
1301/* This function will reset a controller */
1302static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1303{
1304 int retval = 1;
1305 int i = 0;
1306 u32 status = 0;
1307 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
1308 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
1309 u32 init_connect_result = 0;
1310 int tries = 0;
1311 int do_soft_reset = soft_reset;
1312
1313 while (tries < TW_MAX_RESET_TRIES) {
1314 /* Do a soft reset if one is needed */
1315 if (do_soft_reset) {
1316 TWL_SOFT_RESET(tw_dev);
1317
1318 /* Make sure controller is in a good state */
1319 if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) {
1320 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence");
1321 tries++;
1322 continue;
1323 }
1324 if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) {
1325 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence");
1326 tries++;
1327 continue;
1328 }
1329 }
1330
1331 /* Initconnect */
1332 if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
1333 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
1334 TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
1335 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
1336 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1337 &fw_on_ctlr_build, &init_connect_result)) {
1338 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL");
1339 do_soft_reset = 1;
1340 tries++;
1341 continue;
1342 }
1343
1344 /* Load sense buffers */
1345 while (i < TW_Q_LENGTH) {
1346 writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
1347 writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
1348
1349 /* Check status for over-run after each write */
1350 status = readl(TWL_STATUS_REG_ADDR(tw_dev));
1351 if (!(status & TWL_STATUS_OVERRUN_SUBMIT))
1352 i++;
1353 }
1354
1355 /* Now check status */
1356 status = readl(TWL_STATUS_REG_ADDR(tw_dev));
1357 if (status) {
1358 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers");
1359 do_soft_reset = 1;
1360 tries++;
1361 continue;
1362 }
1363
1364 /* Drain the AEN queue */
1365 if (twl_aen_drain_queue(tw_dev, soft_reset)) {
1366 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence");
1367 do_soft_reset = 1;
1368 tries++;
1369 continue;
1370 }
1371
1372 /* Load rest of compatibility struct */
1373 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
1374 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
1375 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
1376 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
1377 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
1378 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
1379 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
1380 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
1381 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
1382 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
1383
1384 /* If we got here, controller is in a good state */
1385 retval = 0;
1386 goto out;
1387 }
1388out:
1389 return retval;
1390} /* End twl_reset_sequence() */
1391
1392/* This function will reset a device extension */
1393static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
1394{
1395 int i = 0, retval = 1;
1396 unsigned long flags = 0;
1397
1398 /* Block SCSI requests while we are resetting */
1399 if (ioctl_reset)
1400 scsi_block_requests(tw_dev->host);
1401
1402 set_bit(TW_IN_RESET, &tw_dev->flags);
1403 TWL_MASK_INTERRUPTS(tw_dev);
1404 TWL_CLEAR_DB_INTERRUPT(tw_dev);
1405
1406 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1407
1408 /* Abort all requests that are in progress */
1409 for (i = 0; i < TW_Q_LENGTH; i++) {
1410 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1411 (tw_dev->state[i] != TW_S_INITIAL) &&
1412 (tw_dev->state[i] != TW_S_COMPLETED)) {
1413 if (tw_dev->srb[i]) {
1414 tw_dev->srb[i]->result = (DID_RESET << 16);
1415 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1416 twl_unmap_scsi_data(tw_dev, i);
1417 }
1418 }
1419 }
1420
1421 /* Reset queues and counts */
1422 for (i = 0; i < TW_Q_LENGTH; i++) {
1423 tw_dev->free_queue[i] = i;
1424 tw_dev->state[i] = TW_S_INITIAL;
1425 }
1426 tw_dev->free_head = TW_Q_START;
1427 tw_dev->free_tail = TW_Q_START;
1428 tw_dev->posted_request_count = 0;
1429
1430 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1431
1432 if (twl_reset_sequence(tw_dev, 1))
1433 goto out;
1434
1435 TWL_UNMASK_INTERRUPTS(tw_dev);
1436
1437 clear_bit(TW_IN_RESET, &tw_dev->flags);
1438 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1439
1440 retval = 0;
1441out:
1442 if (ioctl_reset)
1443 scsi_unblock_requests(tw_dev->host);
1444 return retval;
1445} /* End twl_reset_device_extension() */
1446
1447/* This funciton returns unit geometry in cylinders/heads/sectors */
1448static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1449{
1450 int heads, sectors;
1451 TW_Device_Extension *tw_dev;
1452
1453 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1454
1455 if (capacity >= 0x200000) {
1456 heads = 255;
1457 sectors = 63;
1458 } else {
1459 heads = 64;
1460 sectors = 32;
1461 }
1462
1463 geom[0] = heads;
1464 geom[1] = sectors;
1465 geom[2] = sector_div(capacity, heads * sectors); /* cylinders */
1466
1467 return 0;
1468} /* End twl_scsi_biosparam() */
1469
1470/* This is the new scsi eh reset function */
1471static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1472{
1473 TW_Device_Extension *tw_dev = NULL;
1474 int retval = FAILED;
1475
1476 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1477
1478 tw_dev->num_resets++;
1479
1480 sdev_printk(KERN_WARNING, SCpnt->device,
1481 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1482 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1483
1484 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1485 mutex_lock(&tw_dev->ioctl_lock);
1486
1487 /* Now reset the card and some of the device extension data */
1488 if (twl_reset_device_extension(tw_dev, 0)) {
1489 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset");
1490 goto out;
1491 }
1492
1493 retval = SUCCESS;
1494out:
1495 mutex_unlock(&tw_dev->ioctl_lock);
1496 return retval;
1497} /* End twl_scsi_eh_reset() */
1498
1499/* This is the main scsi queue function to handle scsi opcodes */
1500static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1501{
1502 int request_id, retval;
1503 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1504
1505 /* If we are resetting due to timed out ioctl, report as busy */
1506 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1507 retval = SCSI_MLQUEUE_HOST_BUSY;
1508 goto out;
1509 }
1510
1511 /* Save done function into scsi_cmnd struct */
1512 SCpnt->scsi_done = done;
1513
1514 /* Get a free request id */
1515 twl_get_request_id(tw_dev, &request_id);
1516
1517 /* Save the scsi command for use by the ISR */
1518 tw_dev->srb[request_id] = SCpnt;
1519
1520 /* Initialize phase to zero */
1521 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1522
1523 retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1524 if (retval) {
1525 tw_dev->state[request_id] = TW_S_COMPLETED;
1526 twl_free_request_id(tw_dev, request_id);
1527 SCpnt->result = (DID_ERROR << 16);
1528 done(SCpnt);
1529 retval = 0;
1530 }
1531out:
1532 return retval;
1533} /* End twl_scsi_queue() */
1534
1535/* This function tells the controller to shut down */
1536static void __twl_shutdown(TW_Device_Extension *tw_dev)
1537{
1538 /* Disable interrupts */
1539 TWL_MASK_INTERRUPTS(tw_dev);
1540
1541 /* Free up the IRQ */
1542 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1543
1544 printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no);
1545
1546 /* Tell the card we are shutting down */
1547 if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1548 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed");
1549 } else {
1550 printk(KERN_WARNING "3w-sas: Shutdown complete.\n");
1551 }
1552
1553 /* Clear doorbell interrupt just before exit */
1554 TWL_CLEAR_DB_INTERRUPT(tw_dev);
1555} /* End __twl_shutdown() */
1556
1557/* Wrapper for __twl_shutdown */
1558static void twl_shutdown(struct pci_dev *pdev)
1559{
1560 struct Scsi_Host *host = pci_get_drvdata(pdev);
1561 TW_Device_Extension *tw_dev;
1562
1563 if (!host)
1564 return;
1565
1566 tw_dev = (TW_Device_Extension *)host->hostdata;
1567
1568 if (tw_dev->online)
1569 __twl_shutdown(tw_dev);
1570} /* End twl_shutdown() */
1571
1572/* This function configures unit settings when a unit is coming on-line */
1573static int twl_slave_configure(struct scsi_device *sdev)
1574{
1575 /* Force 60 second timeout */
1576 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1577
1578 return 0;
1579} /* End twl_slave_configure() */
1580
1581/* scsi_host_template initializer */
1582static struct scsi_host_template driver_template = {
1583 .module = THIS_MODULE,
1584 .name = "3w-sas",
1585 .queuecommand = twl_scsi_queue,
1586 .eh_host_reset_handler = twl_scsi_eh_reset,
1587 .bios_param = twl_scsi_biosparam,
1588 .change_queue_depth = twl_change_queue_depth,
1589 .can_queue = TW_Q_LENGTH-2,
1590 .slave_configure = twl_slave_configure,
1591 .this_id = -1,
1592 .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
1593 .max_sectors = TW_MAX_SECTORS,
1594 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1595 .use_clustering = ENABLE_CLUSTERING,
1596 .shost_attrs = twl_host_attrs,
1597 .emulated = 1
1598};
1599
1600/* This function will probe and initialize a card */
1601static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1602{
1603 struct Scsi_Host *host = NULL;
1604 TW_Device_Extension *tw_dev;
1605 int retval = -ENODEV;
1606 int *ptr_phycount, phycount=0;
1607
1608 retval = pci_enable_device(pdev);
1609 if (retval) {
1610 TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device");
1611 goto out_disable_device;
1612 }
1613
1614 pci_set_master(pdev);
1615 pci_try_set_mwi(pdev);
1616
1617 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1618 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1619 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1620 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1621 TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
1622 retval = -ENODEV;
1623 goto out_disable_device;
1624 }
1625
1626 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
1627 if (!host) {
1628 TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension");
1629 retval = -ENOMEM;
1630 goto out_disable_device;
1631 }
1632 tw_dev = shost_priv(host);
1633
1634 /* Save values to device extension */
1635 tw_dev->host = host;
1636 tw_dev->tw_pci_dev = pdev;
1637
1638 if (twl_initialize_device_extension(tw_dev)) {
1639 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
1640 goto out_free_device_extension;
1641 }
1642
1643 /* Request IO regions */
1644 retval = pci_request_regions(pdev, "3w-sas");
1645 if (retval) {
1646 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region");
1647 goto out_free_device_extension;
1648 }
1649
1650 /* Save base address, use region 1 */
1651 tw_dev->base_addr = pci_iomap(pdev, 1, 0);
1652 if (!tw_dev->base_addr) {
1653 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
1654 goto out_release_mem_region;
1655 }
1656
1657 /* Disable interrupts on the card */
1658 TWL_MASK_INTERRUPTS(tw_dev);
1659
1660 /* Initialize the card */
1661 if (twl_reset_sequence(tw_dev, 0)) {
1662 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
1663 goto out_iounmap;
1664 }
1665
1666 /* Set host specific parameters */
1667 host->max_id = TW_MAX_UNITS;
1668 host->max_cmd_len = TW_MAX_CDB_LEN;
1669 host->max_lun = TW_MAX_LUNS;
1670 host->max_channel = 0;
1671
1672 /* Register the card with the kernel SCSI layer */
1673 retval = scsi_add_host(host, &pdev->dev);
1674 if (retval) {
1675 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed");
1676 goto out_iounmap;
1677 }
1678
1679 pci_set_drvdata(pdev, host);
1680
1681 printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n",
1682 host->host_no,
1683 (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
1684 TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH),
1685 (u64)pci_resource_start(pdev, 1), pdev->irq);
1686
1687 ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE,
1688 TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH);
1689 if (ptr_phycount)
1690 phycount = le32_to_cpu(*(int *)ptr_phycount);
1691
1692 printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n",
1693 host->host_no,
1694 (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
1695 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
1696 (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE,
1697 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
1698 phycount);
1699
1700 /* Try to enable MSI */
1701 if (use_msi && !pci_enable_msi(pdev))
1702 set_bit(TW_USING_MSI, &tw_dev->flags);
1703
1704 /* Now setup the interrupt handler */
1705 retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
1706 if (retval) {
1707 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ");
1708 goto out_remove_host;
1709 }
1710
1711 twl_device_extension_list[twl_device_extension_count] = tw_dev;
1712 twl_device_extension_count++;
1713
1714 /* Re-enable interrupts on the card */
1715 TWL_UNMASK_INTERRUPTS(tw_dev);
1716
1717 /* Finally, scan the host */
1718 scsi_scan_host(host);
1719
1720 /* Add sysfs binary files */
1721 if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr))
1722 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read");
1723 if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr))
1724 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info");
1725
1726 if (twl_major == -1) {
1727 if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0)
1728 TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device");
1729 }
1730 tw_dev->online = 1;
1731 return 0;
1732
1733out_remove_host:
1734 if (test_bit(TW_USING_MSI, &tw_dev->flags))
1735 pci_disable_msi(pdev);
1736 scsi_remove_host(host);
1737out_iounmap:
1738 iounmap(tw_dev->base_addr);
1739out_release_mem_region:
1740 pci_release_regions(pdev);
1741out_free_device_extension:
1742 twl_free_device_extension(tw_dev);
1743 scsi_host_put(host);
1744out_disable_device:
1745 pci_disable_device(pdev);
1746
1747 return retval;
1748} /* End twl_probe() */
1749
1750/* This function is called to remove a device */
1751static void twl_remove(struct pci_dev *pdev)
1752{
1753 struct Scsi_Host *host = pci_get_drvdata(pdev);
1754 TW_Device_Extension *tw_dev;
1755
1756 if (!host)
1757 return;
1758
1759 tw_dev = (TW_Device_Extension *)host->hostdata;
1760
1761 if (!tw_dev->online)
1762 return;
1763
1764 /* Remove sysfs binary files */
1765 sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr);
1766 sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr);
1767
1768 scsi_remove_host(tw_dev->host);
1769
1770 /* Unregister character device */
1771 if (twl_major >= 0) {
1772 unregister_chrdev(twl_major, "twl");
1773 twl_major = -1;
1774 }
1775
1776 /* Shutdown the card */
1777 __twl_shutdown(tw_dev);
1778
1779 /* Disable MSI if enabled */
1780 if (test_bit(TW_USING_MSI, &tw_dev->flags))
1781 pci_disable_msi(pdev);
1782
1783 /* Free IO remapping */
1784 iounmap(tw_dev->base_addr);
1785
1786 /* Free up the mem region */
1787 pci_release_regions(pdev);
1788
1789 /* Free up device extension resources */
1790 twl_free_device_extension(tw_dev);
1791
1792 scsi_host_put(tw_dev->host);
1793 pci_disable_device(pdev);
1794 twl_device_extension_count--;
1795} /* End twl_remove() */
1796
1797#ifdef CONFIG_PM
1798/* This function is called on PCI suspend */
1799static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
1800{
1801 struct Scsi_Host *host = pci_get_drvdata(pdev);
1802 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1803
1804 printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
1805 /* Disable interrupts */
1806 TWL_MASK_INTERRUPTS(tw_dev);
1807
1808 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1809
1810 /* Tell the card we are shutting down */
1811 if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1812 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend");
1813 } else {
1814 printk(KERN_WARNING "3w-sas: Suspend complete.\n");
1815 }
1816
1817 /* Clear doorbell interrupt */
1818 TWL_CLEAR_DB_INTERRUPT(tw_dev);
1819
1820 pci_save_state(pdev);
1821 pci_disable_device(pdev);
1822 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1823
1824 return 0;
1825} /* End twl_suspend() */
1826
1827/* This function is called on PCI resume */
1828static int twl_resume(struct pci_dev *pdev)
1829{
1830 int retval = 0;
1831 struct Scsi_Host *host = pci_get_drvdata(pdev);
1832 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1833
1834 printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
1835 pci_set_power_state(pdev, PCI_D0);
1836 pci_enable_wake(pdev, PCI_D0, 0);
1837 pci_restore_state(pdev);
1838
1839 retval = pci_enable_device(pdev);
1840 if (retval) {
1841 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
1842 return retval;
1843 }
1844
1845 pci_set_master(pdev);
1846 pci_try_set_mwi(pdev);
1847
1848 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1849 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1850 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1851 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1852 TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
1853 retval = -ENODEV;
1854 goto out_disable_device;
1855 }
1856
1857 /* Initialize the card */
1858 if (twl_reset_sequence(tw_dev, 0)) {
1859 retval = -ENODEV;
1860 goto out_disable_device;
1861 }
1862
1863 /* Now setup the interrupt handler */
1864 retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
1865 if (retval) {
1866 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume");
1867 retval = -ENODEV;
1868 goto out_disable_device;
1869 }
1870
1871 /* Now enable MSI if enabled */
1872 if (test_bit(TW_USING_MSI, &tw_dev->flags))
1873 pci_enable_msi(pdev);
1874
1875 /* Re-enable interrupts on the card */
1876 TWL_UNMASK_INTERRUPTS(tw_dev);
1877
1878 printk(KERN_WARNING "3w-sas: Resume complete.\n");
1879 return 0;
1880
1881out_disable_device:
1882 scsi_remove_host(host);
1883 pci_disable_device(pdev);
1884
1885 return retval;
1886} /* End twl_resume() */
1887#endif
1888
1889/* PCI Devices supported by this driver */
1890static struct pci_device_id twl_pci_tbl[] __devinitdata = {
1891 { PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) },
1892 { }
1893};
1894MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
1895
1896/* pci_driver initializer */
1897static struct pci_driver twl_driver = {
1898 .name = "3w-sas",
1899 .id_table = twl_pci_tbl,
1900 .probe = twl_probe,
1901 .remove = twl_remove,
1902#ifdef CONFIG_PM
1903 .suspend = twl_suspend,
1904 .resume = twl_resume,
1905#endif
1906 .shutdown = twl_shutdown
1907};
1908
1909/* This function is called on driver initialization */
1910static int __init twl_init(void)
1911{
1912 printk(KERN_INFO "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
1913
1914 return pci_register_driver(&twl_driver);
1915} /* End twl_init() */
1916
1917/* This function is called on driver exit */
1918static void __exit twl_exit(void)
1919{
1920 pci_unregister_driver(&twl_driver);
1921} /* End twl_exit() */
1922
1923module_init(twl_init);
1924module_exit(twl_exit);
1925
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
new file mode 100644
index 000000000000..d474892701d4
--- /dev/null
+++ b/drivers/scsi/3w-sas.h
@@ -0,0 +1,396 @@
1/*
2 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@lsi.com>
5
6 Copyright (C) 2009 LSI Corporation.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; version 2 of the License.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 NO WARRANTY
18 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
19 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
20 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
21 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
22 solely responsible for determining the appropriateness of using and
23 distributing the Program and assumes all risks associated with its
24 exercise of rights under this Agreement, including but not limited to
25 the risks and costs of program errors, damage to or loss of data,
26 programs or equipment, and unavailability or interruption of operations.
27
28 DISCLAIMER OF LIABILITY
29 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
30 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
32 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
33 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
34 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
35 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
36
37 You should have received a copy of the GNU General Public License
38 along with this program; if not, write to the Free Software
39 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
40
41 Bugs/Comments/Suggestions should be mailed to:
42 linuxraid@lsi.com
43
44 For more information, goto:
45 http://www.lsi.com
46*/
47
48#ifndef _3W_SAS_H
49#define _3W_SAS_H
50
51/* AEN severity table */
52static char *twl_aen_severity_table[] =
53{
54 "None", "ERROR", "WARNING", "INFO", "DEBUG", NULL
55};
56
57/* Liberator register offsets */
58#define TWL_STATUS 0x0 /* Status */
59#define TWL_HIBDB 0x20 /* Inbound doorbell */
60#define TWL_HISTAT 0x30 /* Host interrupt status */
61#define TWL_HIMASK 0x34 /* Host interrupt mask */
62#define TWL_HOBDB 0x9C /* Outbound doorbell */
63#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
64#define TWL_SCRPD3 0xBC /* Scratchpad */
65#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
66#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
67#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
68#define TWL_HOBQPH 0xCC /* Host outbound Q high */
69#define TWL_HISTATUS_VALID_INTERRUPT 0xC
70#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
71#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
72#define TWL_STATUS_OVERRUN_SUBMIT 0x2000
73#define TWL_ISSUE_SOFT_RESET 0x100
74#define TWL_CONTROLLER_READY 0x2000
75#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000
76#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000
77#define TWL_PULL_MODE 0x1
78
79/* Command packet opcodes used by the driver */
80#define TW_OP_INIT_CONNECTION 0x1
81#define TW_OP_GET_PARAM 0x12
82#define TW_OP_SET_PARAM 0x13
83#define TW_OP_EXECUTE_SCSI 0x10
84
85/* Asynchronous Event Notification (AEN) codes used by the driver */
86#define TW_AEN_QUEUE_EMPTY 0x0000
87#define TW_AEN_SOFT_RESET 0x0001
88#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
89#define TW_AEN_SEVERITY_ERROR 0x1
90#define TW_AEN_SEVERITY_DEBUG 0x4
91#define TW_AEN_NOT_RETRIEVED 0x1
92
93/* Command state defines */
94#define TW_S_INITIAL 0x1 /* Initial state */
95#define TW_S_STARTED 0x2 /* Id in use */
96#define TW_S_POSTED 0x4 /* Posted to the controller */
97#define TW_S_COMPLETED 0x8 /* Completed by isr */
98#define TW_S_FINISHED 0x10 /* I/O completely done */
99
100/* Compatibility defines */
101#define TW_9750_ARCH_ID 10
102#define TW_CURRENT_DRIVER_SRL 40
103#define TW_CURRENT_DRIVER_BUILD 0
104#define TW_CURRENT_DRIVER_BRANCH 0
105
106/* Phase defines */
107#define TW_PHASE_INITIAL 0
108#define TW_PHASE_SGLIST 2
109
110/* Misc defines */
111#define TW_SECTOR_SIZE 512
112#define TW_MAX_UNITS 32
113#define TW_INIT_MESSAGE_CREDITS 0x100
114#define TW_INIT_COMMAND_PACKET_SIZE 0x3
115#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6
116#define TW_EXTENDED_INIT_CONNECT 0x2
117#define TW_BASE_FW_SRL 24
118#define TW_BASE_FW_BRANCH 0
119#define TW_BASE_FW_BUILD 1
120#define TW_Q_LENGTH 256
121#define TW_Q_START 0
122#define TW_MAX_SLOT 32
123#define TW_MAX_RESET_TRIES 2
124#define TW_MAX_CMDS_PER_LUN 254
125#define TW_MAX_AEN_DRAIN 255
126#define TW_IN_RESET 2
127#define TW_USING_MSI 3
128#define TW_IN_ATTENTION_LOOP 4
129#define TW_MAX_SECTORS 256
130#define TW_MAX_CDB_LEN 16
131#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
132#define TW_IOCTL_CHRDEV_FREE -1
133#define TW_COMMAND_OFFSET 128 /* 128 bytes */
134#define TW_VERSION_TABLE 0x0402
135#define TW_TIMEKEEP_TABLE 0x040A
136#define TW_INFORMATION_TABLE 0x0403
137#define TW_PARAM_FWVER 3
138#define TW_PARAM_FWVER_LENGTH 16
139#define TW_PARAM_BIOSVER 4
140#define TW_PARAM_BIOSVER_LENGTH 16
141#define TW_PARAM_MODEL 8
142#define TW_PARAM_MODEL_LENGTH 16
143#define TW_PARAM_PHY_SUMMARY_TABLE 1
144#define TW_PARAM_PHYCOUNT 2
145#define TW_PARAM_PHYCOUNT_LENGTH 1
146#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
147#define TW_ALLOCATION_LENGTH 128
148#define TW_SENSE_DATA_LENGTH 18
149#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
150#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
151#define TW_ERROR_UNIT_OFFLINE 0x128
152#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
153#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
154#define TW_DRIVER 6
155#ifndef PCI_DEVICE_ID_3WARE_9750
156#define PCI_DEVICE_ID_3WARE_9750 0x1010
157#endif
158
159/* Bitmask macros to eliminate bitfields */
160
161/* opcode: 5, reserved: 3 */
162#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
163#define TW_OP_OUT(x) (x & 0x1f)
164
165/* opcode: 5, sgloffset: 3 */
166#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
167#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
168
169/* severity: 3, reserved: 5 */
170#define TW_SEV_OUT(x) (x & 0x7)
171
172/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */
173#define TW_RESID_OUT(x) ((x >> 16) & 0xffff)
174#define TW_NOTMFA_OUT(x) (x & 0x1)
175
176/* request_id: 12, lun: 4 */
177#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
178#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
179
180/* Register access macros */
181#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
182#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
183#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
184#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
185#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
186#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
187#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
188#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
189#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
190#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
191#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
192#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
193#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
194#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
195#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
196
197/* Macros */
198#define TW_PRINTK(h,a,b,c) { \
199if (h) \
200printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
201else \
202printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
203}
204#define TW_MAX_LUNS 16
205#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4)
206#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92)
207#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94)
208#define TW_PADDING_LENGTH_LIBERATOR 136
209#define TW_PADDING_LENGTH_LIBERATOR_OLD 132
210#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
211
212#pragma pack(1)
213
214/* SGL entry */
215typedef struct TAG_TW_SG_Entry_ISO {
216 dma_addr_t address;
217 dma_addr_t length;
218} TW_SG_Entry_ISO;
219
220/* Old Command Packet with ISO SGL */
221typedef struct TW_Command {
222 unsigned char opcode__sgloffset;
223 unsigned char size;
224 unsigned char request_id;
225 unsigned char unit__hostid;
226 /* Second DWORD */
227 unsigned char status;
228 unsigned char flags;
229 union {
230 unsigned short block_count;
231 unsigned short parameter_count;
232 } byte6_offset;
233 union {
234 struct {
235 u32 lba;
236 TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
237 unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD];
238 } io;
239 struct {
240 TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
241 u32 padding;
242 unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD];
243 } param;
244 } byte8_offset;
245} TW_Command;
246
247/* New Command Packet with ISO SGL */
248typedef struct TAG_TW_Command_Apache {
249 unsigned char opcode__reserved;
250 unsigned char unit;
251 unsigned short request_id__lunl;
252 unsigned char status;
253 unsigned char sgl_offset;
254 unsigned short sgl_entries__lunh;
255 unsigned char cdb[16];
256 TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH];
257 unsigned char padding[TW_PADDING_LENGTH_LIBERATOR];
258} TW_Command_Apache;
259
260/* New command packet header */
261typedef struct TAG_TW_Command_Apache_Header {
262 unsigned char sense_data[TW_SENSE_DATA_LENGTH];
263 struct {
264 char reserved[4];
265 unsigned short error;
266 unsigned char padding;
267 unsigned char severity__reserved;
268 } status_block;
269 unsigned char err_specific_desc[98];
270 struct {
271 unsigned char size_header;
272 unsigned short request_id;
273 unsigned char size_sense;
274 } header_desc;
275} TW_Command_Apache_Header;
276
277/* This struct is a union of the 2 command packets */
278typedef struct TAG_TW_Command_Full {
279 TW_Command_Apache_Header header;
280 union {
281 TW_Command oldcommand;
282 TW_Command_Apache newcommand;
283 } command;
284} TW_Command_Full;
285
286/* Initconnection structure */
287typedef struct TAG_TW_Initconnect {
288 unsigned char opcode__reserved;
289 unsigned char size;
290 unsigned char request_id;
291 unsigned char res2;
292 unsigned char status;
293 unsigned char flags;
294 unsigned short message_credits;
295 u32 features;
296 unsigned short fw_srl;
297 unsigned short fw_arch_id;
298 unsigned short fw_branch;
299 unsigned short fw_build;
300 u32 result;
301} TW_Initconnect;
302
303/* Event info structure */
304typedef struct TAG_TW_Event
305{
306 unsigned int sequence_id;
307 unsigned int time_stamp_sec;
308 unsigned short aen_code;
309 unsigned char severity;
310 unsigned char retrieved;
311 unsigned char repeat_count;
312 unsigned char parameter_len;
313 unsigned char parameter_data[98];
314} TW_Event;
315
316typedef struct TAG_TW_Ioctl_Driver_Command {
317 unsigned int control_code;
318 unsigned int status;
319 unsigned int unique_id;
320 unsigned int sequence_id;
321 unsigned int os_specific;
322 unsigned int buffer_length;
323} TW_Ioctl_Driver_Command;
324
325typedef struct TAG_TW_Ioctl_Apache {
326 TW_Ioctl_Driver_Command driver_command;
327 char padding[488];
328 TW_Command_Full firmware_command;
329 char data_buffer[1];
330} TW_Ioctl_Buf_Apache;
331
332/* GetParam descriptor */
333typedef struct {
334 unsigned short table_id;
335 unsigned short parameter_id;
336 unsigned short parameter_size_bytes;
337 unsigned short actual_parameter_size_bytes;
338 unsigned char data[1];
339} TW_Param_Apache;
340
341/* Compatibility information structure */
342typedef struct TAG_TW_Compatibility_Info
343{
344 char driver_version[32];
345 unsigned short working_srl;
346 unsigned short working_branch;
347 unsigned short working_build;
348 unsigned short driver_srl_high;
349 unsigned short driver_branch_high;
350 unsigned short driver_build_high;
351 unsigned short driver_srl_low;
352 unsigned short driver_branch_low;
353 unsigned short driver_build_low;
354 unsigned short fw_on_ctlr_srl;
355 unsigned short fw_on_ctlr_branch;
356 unsigned short fw_on_ctlr_build;
357} TW_Compatibility_Info;
358
359#pragma pack()
360
361typedef struct TAG_TW_Device_Extension {
362 void __iomem *base_addr;
363 unsigned long *generic_buffer_virt[TW_Q_LENGTH];
364 dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
365 TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
366 dma_addr_t command_packet_phys[TW_Q_LENGTH];
367 TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
368 dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
369 struct pci_dev *tw_pci_dev;
370 struct scsi_cmnd *srb[TW_Q_LENGTH];
371 unsigned char free_queue[TW_Q_LENGTH];
372 unsigned char free_head;
373 unsigned char free_tail;
374 int state[TW_Q_LENGTH];
375 unsigned int posted_request_count;
376 unsigned int max_posted_request_count;
377 unsigned int max_sgl_entries;
378 unsigned int sgl_entries;
379 unsigned int num_resets;
380 unsigned int sector_count;
381 unsigned int max_sector_count;
382 unsigned int aen_count;
383 struct Scsi_Host *host;
384 long flags;
385 TW_Event *event_queue[TW_Q_LENGTH];
386 unsigned char error_index;
387 unsigned int error_sequence_id;
388 int chrdev_request_id;
389 wait_queue_head_t ioctl_wqueue;
390 struct mutex ioctl_lock;
391 TW_Compatibility_Info tw_compat_info;
392 char online;
393} TW_Device_Extension;
394
395#endif /* _3W_SAS_H */
396
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index faa0fcfed71e..5faf903ca8c8 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -8,7 +8,7 @@
8 8
9 Copyright (C) 1999-2009 3ware Inc. 9 Copyright (C) 1999-2009 3ware Inc.
10 10
11 Kernel compatiblity By: Andre Hedrick <andre@suse.com> 11 Kernel compatibility By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
13 13
14 Further tiny build fixes and trivial hoovering Alan Cox 14 Further tiny build fixes and trivial hoovering Alan Cox
@@ -205,6 +205,7 @@
205#include <linux/errno.h> 205#include <linux/errno.h>
206#include <linux/types.h> 206#include <linux/types.h>
207#include <linux/delay.h> 207#include <linux/delay.h>
208#include <linux/gfp.h>
208#include <linux/pci.h> 209#include <linux/pci.h>
209#include <linux/time.h> 210#include <linux/time.h>
210#include <linux/mutex.h> 211#include <linux/mutex.h>
@@ -521,8 +522,12 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
521} /* End tw_show_stats() */ 522} /* End tw_show_stats() */
522 523
523/* This function will set a devices queue depth */ 524/* This function will set a devices queue depth */
524static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth) 525static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth,
526 int reason)
525{ 527{
528 if (reason != SCSI_QDEPTH_DEFAULT)
529 return -EOPNOTSUPP;
530
526 if (queue_depth > TW_Q_LENGTH-2) 531 if (queue_depth > TW_Q_LENGTH-2)
527 queue_depth = TW_Q_LENGTH-2; 532 queue_depth = TW_Q_LENGTH-2;
528 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 533 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index f5a9addb7050..80dc3ac12cde 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -117,6 +117,7 @@
117#include <linux/kernel.h> 117#include <linux/kernel.h>
118#include <linux/types.h> 118#include <linux/types.h>
119#include <linux/string.h> 119#include <linux/string.h>
120#include <linux/slab.h>
120#include <linux/ioport.h> 121#include <linux/ioport.h>
121#include <linux/delay.h> 122#include <linux/delay.h>
122#include <linux/spinlock.h> 123#include <linux/spinlock.h>
@@ -175,7 +176,7 @@ STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); 176STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); 177STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 178STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); 179static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason);
179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth); 180static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
180 181
181STATIC struct device_attribute *NCR_700_dev_attrs[]; 182STATIC struct device_attribute *NCR_700_dev_attrs[];
@@ -1491,7 +1492,7 @@ NCR_700_intr(int irq, void *dev_id)
1491 unsigned long flags; 1492 unsigned long flags;
1492 int handled = 0; 1493 int handled = 0;
1493 1494
1494 /* Use the host lock to serialise acess to the 53c700 1495 /* Use the host lock to serialise access to the 53c700
1495 * hardware. Note: In future, we may need to take the queue 1496 * hardware. Note: In future, we may need to take the queue
1496 * lock to enter the done routines. When that happens, we 1497 * lock to enter the done routines. When that happens, we
1497 * need to ensure that for this driver, the host lock and the 1498 * need to ensure that for this driver, the host lock and the
@@ -2082,8 +2083,11 @@ NCR_700_slave_destroy(struct scsi_device *SDp)
2082} 2083}
2083 2084
2084static int 2085static int
2085NCR_700_change_queue_depth(struct scsi_device *SDp, int depth) 2086NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
2086{ 2087{
2088 if (reason != SCSI_QDEPTH_DEFAULT)
2089 return -EOPNOTSUPP;
2090
2087 if (depth > NCR_700_MAX_TAGS) 2091 if (depth > NCR_700_MAX_TAGS)
2088 depth = NCR_700_MAX_TAGS; 2092 depth = NCR_700_MAX_TAGS;
2089 2093
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 1ddcf4031d4c..fc0b4b81d552 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -42,6 +42,7 @@
42#include <linux/spinlock.h> 42#include <linux/spinlock.h>
43#include <linux/jiffies.h> 43#include <linux/jiffies.h>
44#include <linux/dma-mapping.h> 44#include <linux/dma-mapping.h>
45#include <linux/slab.h>
45#include <scsi/scsicam.h> 46#include <scsi/scsicam.h>
46 47
47#include <asm/dma.h> 48#include <asm/dma.h>
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index b898d382b7b0..e40cdfb7541f 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -3924,7 +3924,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
3924{ 3924{
3925 struct sccb_mgr_tar_info *currTar_Info; 3925 struct sccb_mgr_tar_info *currTar_Info;
3926 3926
3927 if ((p_sccb->TargID > MAX_SCSI_TAR) || (p_sccb->Lun > MAX_LUN)) { 3927 if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) {
3928 return; 3928 return;
3929 } 3929 }
3930 currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; 3930 currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e11cca4c784c..75f2336807cb 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1,9 +1,15 @@
1menu "SCSI device support" 1menu "SCSI device support"
2 2
3config SCSI_MOD
4 tristate
5 default y if SCSI=n || SCSI=y
6 default m if SCSI=m
7
3config RAID_ATTRS 8config RAID_ATTRS
4 tristate "RAID Transport Class" 9 tristate "RAID Transport Class"
5 default n 10 default n
6 depends on BLOCK 11 depends on BLOCK
12 depends on SCSI_MOD
7 ---help--- 13 ---help---
8 Provides RAID 14 Provides RAID
9 15
@@ -388,6 +394,16 @@ config BLK_DEV_3W_XXXX_RAID
388 Please read the comments at the top of 394 Please read the comments at the top of
389 <file:drivers/scsi/3w-xxxx.c>. 395 <file:drivers/scsi/3w-xxxx.c>.
390 396
397config SCSI_HPSA
398 tristate "HP Smart Array SCSI driver"
399 depends on PCI && SCSI
400 help
401 This driver supports HP Smart Array Controllers (circa 2009).
402 It is a SCSI alternative to the cciss driver, which is a block
403 driver. Anyone wishing to use HP Smart Array controllers who
404 would prefer the devices be presented to linux as SCSI devices,
405 rather than as generic block devices should say Y here.
406
391config SCSI_3W_9XXX 407config SCSI_3W_9XXX
392 tristate "3ware 9xxx SATA-RAID support" 408 tristate "3ware 9xxx SATA-RAID support"
393 depends on PCI && SCSI 409 depends on PCI && SCSI
@@ -399,6 +415,17 @@ config SCSI_3W_9XXX
399 Please read the comments at the top of 415 Please read the comments at the top of
400 <file:drivers/scsi/3w-9xxx.c>. 416 <file:drivers/scsi/3w-9xxx.c>.
401 417
418config SCSI_3W_SAS
419 tristate "3ware 97xx SAS/SATA-RAID support"
420 depends on PCI && SCSI
421 help
422 This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards.
423
424 <http://www.lsi.com>
425
426 Please read the comments at the top of
427 <file:drivers/scsi/3w-sas.c>.
428
402config SCSI_7000FASST 429config SCSI_7000FASST
403 tristate "7000FASST SCSI support" 430 tristate "7000FASST SCSI support"
404 depends on ISA && SCSI && ISA_DMA_API 431 depends on ISA && SCSI && ISA_DMA_API
@@ -621,6 +648,14 @@ config SCSI_FLASHPOINT
621 substantial, so users of MultiMaster Host Adapters may not 648 substantial, so users of MultiMaster Host Adapters may not
622 wish to include it. 649 wish to include it.
623 650
651config VMWARE_PVSCSI
652 tristate "VMware PVSCSI driver support"
653 depends on PCI && SCSI && X86
654 help
655 This driver supports VMware's para virtualized SCSI HBA.
656 To compile this driver as a module, choose M here: the
657 module will be called vmw_pvscsi.
658
624config LIBFC 659config LIBFC
625 tristate "LibFC module" 660 tristate "LibFC module"
626 select SCSI_FC_ATTRS 661 select SCSI_FC_ATTRS
@@ -644,7 +679,7 @@ config FCOE
644config FCOE_FNIC 679config FCOE_FNIC
645 tristate "Cisco FNIC Driver" 680 tristate "Cisco FNIC Driver"
646 depends on PCI && X86 681 depends on PCI && X86
647 select LIBFC 682 select LIBFCOE
648 help 683 help
649 This is support for the Cisco PCI-Express FCoE HBA. 684 This is support for the Cisco PCI-Express FCoE HBA.
650 685
@@ -1818,6 +1853,14 @@ config SCSI_PMCRAID
1818 ---help--- 1853 ---help---
1819 This driver supports the PMC SIERRA MaxRAID adapters. 1854 This driver supports the PMC SIERRA MaxRAID adapters.
1820 1855
1856config SCSI_PM8001
1857 tristate "PMC-Sierra SPC 8001 SAS/SATA Based Host Adapter driver"
1858 depends on PCI && SCSI
1859 select SCSI_SAS_LIBSAS
1860 help
1861 This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip
1862 based host adapters.
1863
1821config SCSI_SRP 1864config SCSI_SRP
1822 tristate "SCSI RDMA Protocol helper library" 1865 tristate "SCSI RDMA Protocol helper library"
1823 depends on SCSI && PCI 1866 depends on SCSI && PCI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 3ad61db5e3fa..92a8c500b23d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/
70obj-$(CONFIG_SCSI_AACRAID) += aacraid/ 70obj-$(CONFIG_SCSI_AACRAID) += aacraid/
71obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o 71obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
72obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ 72obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
73obj-$(CONFIG_SCSI_PM8001) += pm8001/
73obj-$(CONFIG_SCSI_IPS) += ips.o 74obj-$(CONFIG_SCSI_IPS) += ips.o
74obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o 75obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
75obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o 76obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
@@ -90,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
90obj-$(CONFIG_SCSI_PAS16) += pas16.o 91obj-$(CONFIG_SCSI_PAS16) += pas16.o
91obj-$(CONFIG_SCSI_T128) += t128.o 92obj-$(CONFIG_SCSI_T128) += t128.o
92obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o 93obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
94obj-$(CONFIG_SCSI_HPSA) += hpsa.o
93obj-$(CONFIG_SCSI_DTC3280) += dtc.o 95obj-$(CONFIG_SCSI_DTC3280) += dtc.o
94obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ 96obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
95obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o 97obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
@@ -113,6 +115,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o
113obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o 115obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
114obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o 116obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
115obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o 117obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
118obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
116obj-$(CONFIG_SCSI_PPA) += ppa.o 119obj-$(CONFIG_SCSI_PPA) += ppa.o
117obj-$(CONFIG_SCSI_IMM) += imm.o 120obj-$(CONFIG_SCSI_IMM) += imm.o
118obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o 121obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
@@ -133,6 +136,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
133obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 136obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
134obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ 137obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
135obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o 138obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
139obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
136 140
137obj-$(CONFIG_ARM) += arm/ 141obj-$(CONFIG_ARM) += arm/
138 142
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 1cdf09a4779a..8647256ad66d 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -97,6 +97,7 @@
97#include <linux/kernel.h> 97#include <linux/kernel.h>
98#include <linux/module.h> 98#include <linux/module.h>
99#include <linux/mca.h> 99#include <linux/mca.h>
100#include <linux/slab.h>
100#include <asm/io.h> 101#include <asm/io.h>
101#include <scsi/scsi_host.h> 102#include <scsi/scsi_host.h>
102#include <scsi/scsi_device.h> 103#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
index a8bbdc2273b8..afdbb9addf18 100644
--- a/drivers/scsi/NCR_Q720.c
+++ b/drivers/scsi/NCR_Q720.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/mca.h> 12#include <linux/mca.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 208d6df9ed59..dbbc601948e5 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -69,7 +69,6 @@
69#include <linux/kernel.h> 69#include <linux/kernel.h>
70#include <linux/string.h> 70#include <linux/string.h>
71#include <linux/ioport.h> 71#include <linux/ioport.h>
72#include <linux/slab.h>
73#include <linux/dma-mapping.h> 72#include <linux/dma-mapping.h>
74 73
75#include <asm/io.h> 74#include <asm/io.h>
@@ -492,7 +491,7 @@ static void init_alloc_map(struct orc_host * host)
492 * init_orchid - initialise the host adapter 491 * init_orchid - initialise the host adapter
493 * @host:host adapter to initialise 492 * @host:host adapter to initialise
494 * 493 *
495 * Initialise the controller and if neccessary load the firmware. 494 * Initialise the controller and if necessary load the firmware.
496 * 495 *
497 * Returns -1 if the initialisation fails. 496 * Returns -1 if the initialisation fails.
498 */ 497 */
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 4b38c4750f77..d8fe5b76fee0 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -1,5 +1,6 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h> 2#include <linux/mm.h>
3#include <linux/slab.h>
3#include <linux/blkdev.h> 4#include <linux/blkdev.h>
4#include <linux/init.h> 5#include <linux/init.h>
5#include <linux/interrupt.h> 6#include <linux/interrupt.h>
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 6970ce82c4ac..c35fc55f1c96 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -1,5 +1,6 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h> 2#include <linux/mm.h>
3#include <linux/slab.h>
3#include <linux/blkdev.h> 4#include <linux/blkdev.h>
4#include <linux/ioport.h> 5#include <linux/ioport.h>
5#include <linux/init.h> 6#include <linux/init.h>
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index e3519fa5a3ba..11ae6be8aeaf 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -12,6 +12,7 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/slab.h>
15#include <asm/amigahw.h> 16#include <asm/amigahw.h>
16#include <asm/amigaints.h> 17#include <asm/amigaints.h>
17#include <scsi/scsi_host.h> 18#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 2a889853a106..7e26ebc26661 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -293,7 +293,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
293 status = -EINVAL; 293 status = -EINVAL;
294 } 294 }
295 } 295 }
296 aac_fib_complete(fibptr); 296 /* Do not set XferState to zero unless receives a response from F/W */
297 if (status >= 0)
298 aac_fib_complete(fibptr);
299
297 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 300 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
298 if (status >= 0) { 301 if (status >= 0) {
299 if ((aac_commit == 1) || commit_flag) { 302 if ((aac_commit == 1) || commit_flag) {
@@ -310,13 +313,18 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
310 FsaNormal, 313 FsaNormal,
311 1, 1, 314 1, 1,
312 NULL, NULL); 315 NULL, NULL);
313 aac_fib_complete(fibptr); 316 /* Do not set XferState to zero unless
317 * receives a response from F/W */
318 if (status >= 0)
319 aac_fib_complete(fibptr);
314 } else if (aac_commit == 0) { 320 } else if (aac_commit == 0) {
315 printk(KERN_WARNING 321 printk(KERN_WARNING
316 "aac_get_config_status: Foreign device configurations are being ignored\n"); 322 "aac_get_config_status: Foreign device configurations are being ignored\n");
317 } 323 }
318 } 324 }
319 aac_fib_free(fibptr); 325 /* FIB should be freed only after getting the response from the F/W */
326 if (status != -ERESTARTSYS)
327 aac_fib_free(fibptr);
320 return status; 328 return status;
321} 329}
322 330
@@ -355,7 +363,9 @@ int aac_get_containers(struct aac_dev *dev)
355 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 363 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
356 aac_fib_complete(fibptr); 364 aac_fib_complete(fibptr);
357 } 365 }
358 aac_fib_free(fibptr); 366 /* FIB should be freed only after getting the response from the F/W */
367 if (status != -ERESTARTSYS)
368 aac_fib_free(fibptr);
359 369
360 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 370 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
361 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 371 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
@@ -1245,8 +1255,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1245 NULL); 1255 NULL);
1246 1256
1247 if (rcode < 0) { 1257 if (rcode < 0) {
1248 aac_fib_complete(fibptr); 1258 /* FIB should be freed only after
1249 aac_fib_free(fibptr); 1259 * getting the response from the F/W */
1260 if (rcode != -ERESTARTSYS) {
1261 aac_fib_complete(fibptr);
1262 aac_fib_free(fibptr);
1263 }
1250 return rcode; 1264 return rcode;
1251 } 1265 }
1252 memcpy(&dev->adapter_info, info, sizeof(*info)); 1266 memcpy(&dev->adapter_info, info, sizeof(*info));
@@ -1270,6 +1284,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1270 1284
1271 if (rcode >= 0) 1285 if (rcode >= 0)
1272 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); 1286 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
1287 if (rcode == -ERESTARTSYS) {
1288 fibptr = aac_fib_alloc(dev);
1289 if (!fibptr)
1290 return -ENOMEM;
1291 }
1292
1273 } 1293 }
1274 1294
1275 1295
@@ -1470,9 +1490,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
1470 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 1490 (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
1471 } 1491 }
1472 } 1492 }
1473 1493 /* FIB should be freed only after getting the response from the F/W */
1474 aac_fib_complete(fibptr); 1494 if (rcode != -ERESTARTSYS) {
1475 aac_fib_free(fibptr); 1495 aac_fib_complete(fibptr);
1496 aac_fib_free(fibptr);
1497 }
1476 1498
1477 return rcode; 1499 return rcode;
1478} 1500}
@@ -1633,6 +1655,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
1633 * Alocate and initialize a Fib 1655 * Alocate and initialize a Fib
1634 */ 1656 */
1635 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1657 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1658 printk(KERN_WARNING "aac_read: fib allocation failed\n");
1636 return -1; 1659 return -1;
1637 } 1660 }
1638 1661
@@ -1712,9 +1735,14 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1712 * Allocate and initialize a Fib then setup a BlockWrite command 1735 * Allocate and initialize a Fib then setup a BlockWrite command
1713 */ 1736 */
1714 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1737 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1715 scsicmd->result = DID_ERROR << 16; 1738 /* FIB temporarily unavailable,not catastrophic failure */
1716 scsicmd->scsi_done(scsicmd); 1739
1717 return 0; 1740 /* scsicmd->result = DID_ERROR << 16;
1741 * scsicmd->scsi_done(scsicmd);
1742 * return 0;
1743 */
1744 printk(KERN_WARNING "aac_write: fib allocation failed\n");
1745 return -1;
1718 } 1746 }
1719 1747
1720 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); 1748 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index cdbdec9f4fb2..619c02d9c862 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2461 15# define AAC_DRIVER_BUILD 24702
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -526,10 +526,10 @@ struct aac_driver_ident
526 526
527/* 527/*
528 * The adapter interface specs all queues to be located in the same 528 * The adapter interface specs all queues to be located in the same
529 * physically contigous block. The host structure that defines the 529 * physically contiguous block. The host structure that defines the
530 * commuication queues will assume they are each a separate physically 530 * commuication queues will assume they are each a separate physically
531 * contigous memory region that will support them all being one big 531 * contiguous memory region that will support them all being one big
532 * contigous block. 532 * contiguous block.
533 * There is a command and response queue for each level and direction of 533 * There is a command and response queue for each level and direction of
534 * commuication. These regions are accessed by both the host and adapter. 534 * commuication. These regions are accessed by both the host and adapter.
535 */ 535 */
@@ -1036,6 +1036,9 @@ struct aac_dev
1036 u8 printf_enabled; 1036 u8 printf_enabled;
1037 u8 in_reset; 1037 u8 in_reset;
1038 u8 msi; 1038 u8 msi;
1039 int management_fib_count;
1040 spinlock_t manage_lock;
1041
1039}; 1042};
1040 1043
1041#define aac_adapter_interrupt(dev) \ 1044#define aac_adapter_interrupt(dev) \
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 0391d759dfdb..9c0c91178538 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -153,7 +153,7 @@ cleanup:
153 fibptr->hw_fib_pa = hw_fib_pa; 153 fibptr->hw_fib_pa = hw_fib_pa;
154 fibptr->hw_fib_va = hw_fib; 154 fibptr->hw_fib_va = hw_fib;
155 } 155 }
156 if (retval != -EINTR) 156 if (retval != -ERESTARTSYS)
157 aac_fib_free(fibptr); 157 aac_fib_free(fibptr);
158 return retval; 158 return retval;
159} 159}
@@ -322,7 +322,7 @@ return_fib:
322 } 322 }
323 if (f.wait) { 323 if (f.wait) {
324 if(down_interruptible(&fibctx->wait_sem) < 0) { 324 if(down_interruptible(&fibctx->wait_sem) < 0) {
325 status = -EINTR; 325 status = -ERESTARTSYS;
326 } else { 326 } else {
327 /* Lock again and retry */ 327 /* Lock again and retry */
328 spin_lock_irqsave(&dev->fib_lock, flags); 328 spin_lock_irqsave(&dev->fib_lock, flags);
@@ -593,10 +593,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
593 u64 addr; 593 u64 addr;
594 void* p; 594 void* p;
595 if (upsg->sg[i].count > 595 if (upsg->sg[i].count >
596 (dev->adapter_info.options & 596 ((dev->adapter_info.options &
597 AAC_OPT_NEW_COMM) ? 597 AAC_OPT_NEW_COMM) ?
598 (dev->scsi_host_ptr->max_sectors << 9) : 598 (dev->scsi_host_ptr->max_sectors << 9) :
599 65536) { 599 65536)) {
600 rcode = -EINVAL; 600 rcode = -EINVAL;
601 goto cleanup; 601 goto cleanup;
602 } 602 }
@@ -645,10 +645,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
645 u64 addr; 645 u64 addr;
646 void* p; 646 void* p;
647 if (usg->sg[i].count > 647 if (usg->sg[i].count >
648 (dev->adapter_info.options & 648 ((dev->adapter_info.options &
649 AAC_OPT_NEW_COMM) ? 649 AAC_OPT_NEW_COMM) ?
650 (dev->scsi_host_ptr->max_sectors << 9) : 650 (dev->scsi_host_ptr->max_sectors << 9) :
651 65536) { 651 65536)) {
652 rcode = -EINVAL; 652 rcode = -EINVAL;
653 goto cleanup; 653 goto cleanup;
654 } 654 }
@@ -695,10 +695,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
695 uintptr_t addr; 695 uintptr_t addr;
696 void* p; 696 void* p;
697 if (usg->sg[i].count > 697 if (usg->sg[i].count >
698 (dev->adapter_info.options & 698 ((dev->adapter_info.options &
699 AAC_OPT_NEW_COMM) ? 699 AAC_OPT_NEW_COMM) ?
700 (dev->scsi_host_ptr->max_sectors << 9) : 700 (dev->scsi_host_ptr->max_sectors << 9) :
701 65536) { 701 65536)) {
702 rcode = -EINVAL; 702 rcode = -EINVAL;
703 goto cleanup; 703 goto cleanup;
704 } 704 }
@@ -734,10 +734,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
734 dma_addr_t addr; 734 dma_addr_t addr;
735 void* p; 735 void* p;
736 if (upsg->sg[i].count > 736 if (upsg->sg[i].count >
737 (dev->adapter_info.options & 737 ((dev->adapter_info.options &
738 AAC_OPT_NEW_COMM) ? 738 AAC_OPT_NEW_COMM) ?
739 (dev->scsi_host_ptr->max_sectors << 9) : 739 (dev->scsi_host_ptr->max_sectors << 9) :
740 65536) { 740 65536)) {
741 rcode = -EINVAL; 741 rcode = -EINVAL;
742 goto cleanup; 742 goto cleanup;
743 } 743 }
@@ -772,8 +772,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
772 psg->count = cpu_to_le32(sg_indx+1); 772 psg->count = cpu_to_le32(sg_indx+1);
773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
774 } 774 }
775 if (status == -EINTR) { 775 if (status == -ERESTARTSYS) {
776 rcode = -EINTR; 776 rcode = -ERESTARTSYS;
777 goto cleanup; 777 goto cleanup;
778 } 778 }
779 779
@@ -810,7 +810,7 @@ cleanup:
810 for(i=0; i <= sg_indx; i++){ 810 for(i=0; i <= sg_indx; i++){
811 kfree(sg_list[i]); 811 kfree(sg_list[i]);
812 } 812 }
813 if (rcode != -EINTR) { 813 if (rcode != -ERESTARTSYS) {
814 aac_fib_complete(srbfib); 814 aac_fib_complete(srbfib);
815 aac_fib_free(srbfib); 815 aac_fib_free(srbfib);
816 } 816 }
@@ -848,7 +848,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
848 */ 848 */
849 849
850 status = aac_dev_ioctl(dev, cmd, arg); 850 status = aac_dev_ioctl(dev, cmd, arg);
851 if(status != -ENOTTY) 851 if (status != -ENOTTY)
852 return status; 852 return status;
853 853
854 switch (cmd) { 854 switch (cmd) {
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index d598eba630d0..a7261486ccd4 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -194,7 +194,9 @@ int aac_send_shutdown(struct aac_dev * dev)
194 194
195 if (status >= 0) 195 if (status >= 0)
196 aac_fib_complete(fibctx); 196 aac_fib_complete(fibctx);
197 aac_fib_free(fibctx); 197 /* FIB should be freed only after getting the response from the F/W */
198 if (status != -ERESTARTSYS)
199 aac_fib_free(fibctx);
198 return status; 200 return status;
199} 201}
200 202
@@ -226,7 +228,7 @@ static int aac_comm_init(struct aac_dev * dev)
226 spin_lock_init(&dev->fib_lock); 228 spin_lock_init(&dev->fib_lock);
227 229
228 /* 230 /*
229 * Allocate the physically contigous space for the commuication 231 * Allocate the physically contiguous space for the commuication
230 * queue headers. 232 * queue headers.
231 */ 233 */
232 234
@@ -304,6 +306,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
304 /* 306 /*
305 * Check the preferred comm settings, defaults from template. 307 * Check the preferred comm settings, defaults from template.
306 */ 308 */
309 dev->management_fib_count = 0;
310 spin_lock_init(&dev->manage_lock);
307 dev->max_fib_size = sizeof(struct hw_fib); 311 dev->max_fib_size = sizeof(struct hw_fib);
308 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 312 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
309 - sizeof(struct aac_fibhdr) 313 - sizeof(struct aac_fibhdr)
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 956261f25181..94d2954d79ae 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -189,7 +189,14 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
189 189
190void aac_fib_free(struct fib *fibptr) 190void aac_fib_free(struct fib *fibptr)
191{ 191{
192 unsigned long flags; 192 unsigned long flags, flagsv;
193
194 spin_lock_irqsave(&fibptr->event_lock, flagsv);
195 if (fibptr->done == 2) {
196 spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
197 return;
198 }
199 spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
193 200
194 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 201 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
195 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 202 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
@@ -390,6 +397,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
390 struct hw_fib * hw_fib = fibptr->hw_fib_va; 397 struct hw_fib * hw_fib = fibptr->hw_fib_va;
391 unsigned long flags = 0; 398 unsigned long flags = 0;
392 unsigned long qflags; 399 unsigned long qflags;
400 unsigned long mflags = 0;
401
393 402
394 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 403 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
395 return -EBUSY; 404 return -EBUSY;
@@ -471,9 +480,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
471 if (!dev->queues) 480 if (!dev->queues)
472 return -EBUSY; 481 return -EBUSY;
473 482
474 if(wait) 483 if (wait) {
484
485 spin_lock_irqsave(&dev->manage_lock, mflags);
486 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
487 printk(KERN_INFO "No management Fibs Available:%d\n",
488 dev->management_fib_count);
489 spin_unlock_irqrestore(&dev->manage_lock, mflags);
490 return -EBUSY;
491 }
492 dev->management_fib_count++;
493 spin_unlock_irqrestore(&dev->manage_lock, mflags);
475 spin_lock_irqsave(&fibptr->event_lock, flags); 494 spin_lock_irqsave(&fibptr->event_lock, flags);
476 aac_adapter_deliver(fibptr); 495 }
496
497 if (aac_adapter_deliver(fibptr) != 0) {
498 printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
499 if (wait) {
500 spin_unlock_irqrestore(&fibptr->event_lock, flags);
501 spin_lock_irqsave(&dev->manage_lock, mflags);
502 dev->management_fib_count--;
503 spin_unlock_irqrestore(&dev->manage_lock, mflags);
504 }
505 return -EBUSY;
506 }
507
477 508
478 /* 509 /*
479 * If the caller wanted us to wait for response wait now. 510 * If the caller wanted us to wait for response wait now.
@@ -516,14 +547,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
516 udelay(5); 547 udelay(5);
517 } 548 }
518 } else if (down_interruptible(&fibptr->event_wait)) { 549 } else if (down_interruptible(&fibptr->event_wait)) {
519 fibptr->done = 2; 550 /* Do nothing ... satisfy
520 up(&fibptr->event_wait); 551 * down_interruptible must_check */
521 } 552 }
553
522 spin_lock_irqsave(&fibptr->event_lock, flags); 554 spin_lock_irqsave(&fibptr->event_lock, flags);
523 if ((fibptr->done == 0) || (fibptr->done == 2)) { 555 if (fibptr->done == 0) {
524 fibptr->done = 2; /* Tell interrupt we aborted */ 556 fibptr->done = 2; /* Tell interrupt we aborted */
525 spin_unlock_irqrestore(&fibptr->event_lock, flags); 557 spin_unlock_irqrestore(&fibptr->event_lock, flags);
526 return -EINTR; 558 return -ERESTARTSYS;
527 } 559 }
528 spin_unlock_irqrestore(&fibptr->event_lock, flags); 560 spin_unlock_irqrestore(&fibptr->event_lock, flags);
529 BUG_ON(fibptr->done == 0); 561 BUG_ON(fibptr->done == 0);
@@ -689,6 +721,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
689 721
690int aac_fib_complete(struct fib *fibptr) 722int aac_fib_complete(struct fib *fibptr)
691{ 723{
724 unsigned long flags;
692 struct hw_fib * hw_fib = fibptr->hw_fib_va; 725 struct hw_fib * hw_fib = fibptr->hw_fib_va;
693 726
694 /* 727 /*
@@ -709,6 +742,13 @@ int aac_fib_complete(struct fib *fibptr)
709 * command is complete that we had sent to the adapter and this 742 * command is complete that we had sent to the adapter and this
710 * cdb could be reused. 743 * cdb could be reused.
711 */ 744 */
745 spin_lock_irqsave(&fibptr->event_lock, flags);
746 if (fibptr->done == 2) {
747 spin_unlock_irqrestore(&fibptr->event_lock, flags);
748 return 0;
749 }
750 spin_unlock_irqrestore(&fibptr->event_lock, flags);
751
712 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 752 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
713 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 753 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
714 { 754 {
@@ -1355,7 +1395,10 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
1355 1395
1356 if (status >= 0) 1396 if (status >= 0)
1357 aac_fib_complete(fibctx); 1397 aac_fib_complete(fibctx);
1358 aac_fib_free(fibctx); 1398 /* FIB should be freed only after getting
1399 * the response from the F/W */
1400 if (status != -ERESTARTSYS)
1401 aac_fib_free(fibctx);
1359 } 1402 }
1360 } 1403 }
1361 1404
@@ -1759,6 +1802,7 @@ int aac_command_thread(void *data)
1759 struct fib *fibptr; 1802 struct fib *fibptr;
1760 1803
1761 if ((fibptr = aac_fib_alloc(dev))) { 1804 if ((fibptr = aac_fib_alloc(dev))) {
1805 int status;
1762 __le32 *info; 1806 __le32 *info;
1763 1807
1764 aac_fib_init(fibptr); 1808 aac_fib_init(fibptr);
@@ -1769,15 +1813,21 @@ int aac_command_thread(void *data)
1769 1813
1770 *info = cpu_to_le32(now.tv_sec); 1814 *info = cpu_to_le32(now.tv_sec);
1771 1815
1772 (void)aac_fib_send(SendHostTime, 1816 status = aac_fib_send(SendHostTime,
1773 fibptr, 1817 fibptr,
1774 sizeof(*info), 1818 sizeof(*info),
1775 FsaNormal, 1819 FsaNormal,
1776 1, 1, 1820 1, 1,
1777 NULL, 1821 NULL,
1778 NULL); 1822 NULL);
1779 aac_fib_complete(fibptr); 1823 /* Do not set XferState to zero unless
1780 aac_fib_free(fibptr); 1824 * receives a response from F/W */
1825 if (status >= 0)
1826 aac_fib_complete(fibptr);
1827 /* FIB should be freed only after
1828 * getting the response from the F/W */
1829 if (status != -ERESTARTSYS)
1830 aac_fib_free(fibptr);
1781 } 1831 }
1782 difference = (long)(unsigned)update_interval*HZ; 1832 difference = (long)(unsigned)update_interval*HZ;
1783 } else { 1833 } else {
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index abc9ef5d1b10..9c7408fe8c7d 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -57,9 +57,9 @@ unsigned int aac_response_normal(struct aac_queue * q)
57 struct hw_fib * hwfib; 57 struct hw_fib * hwfib;
58 struct fib * fib; 58 struct fib * fib;
59 int consumed = 0; 59 int consumed = 0;
60 unsigned long flags; 60 unsigned long flags, mflags;
61 61
62 spin_lock_irqsave(q->lock, flags); 62 spin_lock_irqsave(q->lock, flags);
63 /* 63 /*
64 * Keep pulling response QEs off the response queue and waking 64 * Keep pulling response QEs off the response queue and waking
65 * up the waiters until there are no more QEs. We then return 65 * up the waiters until there are no more QEs. We then return
@@ -125,12 +125,21 @@ unsigned int aac_response_normal(struct aac_queue * q)
125 } else { 125 } else {
126 unsigned long flagv; 126 unsigned long flagv;
127 spin_lock_irqsave(&fib->event_lock, flagv); 127 spin_lock_irqsave(&fib->event_lock, flagv);
128 if (!fib->done) 128 if (!fib->done) {
129 fib->done = 1; 129 fib->done = 1;
130 up(&fib->event_wait); 130 up(&fib->event_wait);
131 }
131 spin_unlock_irqrestore(&fib->event_lock, flagv); 132 spin_unlock_irqrestore(&fib->event_lock, flagv);
133
134 spin_lock_irqsave(&dev->manage_lock, mflags);
135 dev->management_fib_count--;
136 spin_unlock_irqrestore(&dev->manage_lock, mflags);
137
132 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 138 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
133 if (fib->done == 2) { 139 if (fib->done == 2) {
140 spin_lock_irqsave(&fib->event_lock, flagv);
141 fib->done = 0;
142 spin_unlock_irqrestore(&fib->event_lock, flagv);
134 aac_fib_complete(fib); 143 aac_fib_complete(fib);
135 aac_fib_free(fib); 144 aac_fib_free(fib);
136 } 145 }
@@ -232,6 +241,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
232 241
233unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) 242unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
234{ 243{
244 unsigned long mflags;
235 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 245 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
236 if ((index & 0x00000002L)) { 246 if ((index & 0x00000002L)) {
237 struct hw_fib * hw_fib; 247 struct hw_fib * hw_fib;
@@ -320,11 +330,25 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
320 unsigned long flagv; 330 unsigned long flagv;
321 dprintk((KERN_INFO "event_wait up\n")); 331 dprintk((KERN_INFO "event_wait up\n"));
322 spin_lock_irqsave(&fib->event_lock, flagv); 332 spin_lock_irqsave(&fib->event_lock, flagv);
323 if (!fib->done) 333 if (!fib->done) {
324 fib->done = 1; 334 fib->done = 1;
325 up(&fib->event_wait); 335 up(&fib->event_wait);
336 }
326 spin_unlock_irqrestore(&fib->event_lock, flagv); 337 spin_unlock_irqrestore(&fib->event_lock, flagv);
338
339 spin_lock_irqsave(&dev->manage_lock, mflags);
340 dev->management_fib_count--;
341 spin_unlock_irqrestore(&dev->manage_lock, mflags);
342
327 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 343 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
344 if (fib->done == 2) {
345 spin_lock_irqsave(&fib->event_lock, flagv);
346 fib->done = 0;
347 spin_unlock_irqrestore(&fib->event_lock, flagv);
348 aac_fib_complete(fib);
349 aac_fib_free(fib);
350 }
351
328 } 352 }
329 return 0; 353 return 0;
330 } 354 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9b97c3e016fe..e9373a2d14fa 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -472,8 +472,12 @@ static int aac_slave_configure(struct scsi_device *sdev)
472 * total capacity and the queue depth supported by the target device. 472 * total capacity and the queue depth supported by the target device.
473 */ 473 */
474 474
475static int aac_change_queue_depth(struct scsi_device *sdev, int depth) 475static int aac_change_queue_depth(struct scsi_device *sdev, int depth,
476 int reason)
476{ 477{
478 if (reason != SCSI_QDEPTH_DEFAULT)
479 return -EOPNOTSUPP;
480
477 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 481 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
478 (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 482 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
479 struct scsi_device * dev; 483 struct scsi_device * dev;
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index f70d9f8e79e5..04057ab72a8b 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -33,7 +33,6 @@
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/blkdev.h> 36#include <linux/blkdev.h>
38#include <linux/delay.h> 37#include <linux/delay.h>
39#include <linux/completion.h> 38#include <linux/completion.h>
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index b6a3c5c187b6..622c21c68e65 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -33,7 +33,6 @@
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/blkdev.h> 36#include <linux/blkdev.h>
38#include <linux/delay.h> 37#include <linux/delay.h>
39#include <linux/completion.h> 38#include <linux/completion.h>
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index b756041f0b26..7f87979da22d 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -4724,6 +4724,10 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
4724 BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); 4724 BUG_ON((unsigned long)asc_dvc->overrun_buf & 7);
4725 asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, 4725 asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf,
4726 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); 4726 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
4727 if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) {
4728 warn_code = -ENOMEM;
4729 goto err_dma_map;
4730 }
4727 phy_addr = cpu_to_le32(asc_dvc->overrun_dma); 4731 phy_addr = cpu_to_le32(asc_dvc->overrun_dma);
4728 AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, 4732 AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D,
4729 (uchar *)&phy_addr, 1); 4733 (uchar *)&phy_addr, 1);
@@ -4739,14 +4743,23 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
4739 AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); 4743 AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
4740 if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { 4744 if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
4741 asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; 4745 asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
4742 return warn_code; 4746 warn_code = UW_ERR;
4747 goto err_mcode_start;
4743 } 4748 }
4744 if (AscStartChip(iop_base) != 1) { 4749 if (AscStartChip(iop_base) != 1) {
4745 asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; 4750 asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
4746 return warn_code; 4751 warn_code = UW_ERR;
4752 goto err_mcode_start;
4747 } 4753 }
4748 4754
4749 return warn_code; 4755 return warn_code;
4756
4757err_mcode_start:
4758 dma_unmap_single(board->dev, asc_dvc->overrun_dma,
4759 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
4760err_dma_map:
4761 asc_dvc->overrun_dma = 0;
4762 return warn_code;
4750} 4763}
4751 4764
4752static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) 4765static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
@@ -4781,12 +4794,14 @@ static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
4781 if (err) { 4794 if (err) {
4782 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 4795 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
4783 fwname, err); 4796 fwname, err);
4797 asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
4784 return err; 4798 return err;
4785 } 4799 }
4786 if (fw->size < 4) { 4800 if (fw->size < 4) {
4787 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", 4801 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
4788 fw->size, fwname); 4802 fw->size, fwname);
4789 release_firmware(fw); 4803 release_firmware(fw);
4804 asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
4790 return -EINVAL; 4805 return -EINVAL;
4791 } 4806 }
4792 chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | 4807 chksum = (fw->data[3] << 24) | (fw->data[2] << 16) |
@@ -4800,6 +4815,8 @@ static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
4800 } 4815 }
4801 release_firmware(fw); 4816 release_firmware(fw);
4802 warn_code |= AscInitMicroCodeVar(asc_dvc); 4817 warn_code |= AscInitMicroCodeVar(asc_dvc);
4818 if (!asc_dvc->overrun_dma)
4819 return warn_code;
4803 asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; 4820 asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC;
4804 AscEnableInterrupt(iop_base); 4821 AscEnableInterrupt(iop_base);
4805 return warn_code; 4822 return warn_code;
@@ -5110,12 +5127,14 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
5110 if (err) { 5127 if (err) {
5111 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 5128 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
5112 fwname, err); 5129 fwname, err);
5130 asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
5113 return err; 5131 return err;
5114 } 5132 }
5115 if (fw->size < 4) { 5133 if (fw->size < 4) {
5116 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", 5134 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
5117 fw->size, fwname); 5135 fw->size, fwname);
5118 release_firmware(fw); 5136 release_firmware(fw);
5137 asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
5119 return -EINVAL; 5138 return -EINVAL;
5120 } 5139 }
5121 chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | 5140 chksum = (fw->data[3] << 24) | (fw->data[2] << 16) |
@@ -5624,12 +5643,14 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
5624 if (err) { 5643 if (err) {
5625 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 5644 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
5626 fwname, err); 5645 fwname, err);
5646 asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
5627 return err; 5647 return err;
5628 } 5648 }
5629 if (fw->size < 4) { 5649 if (fw->size < 4) {
5630 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", 5650 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
5631 fw->size, fwname); 5651 fw->size, fwname);
5632 release_firmware(fw); 5652 release_firmware(fw);
5653 asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
5633 return -EINVAL; 5654 return -EINVAL;
5634 } 5655 }
5635 chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | 5656 chksum = (fw->data[3] << 24) | (fw->data[2] << 16) |
@@ -6124,12 +6145,14 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
6124 if (err) { 6145 if (err) {
6125 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 6146 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
6126 fwname, err); 6147 fwname, err);
6148 asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
6127 return err; 6149 return err;
6128 } 6150 }
6129 if (fw->size < 4) { 6151 if (fw->size < 4) {
6130 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", 6152 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
6131 fw->size, fwname); 6153 fw->size, fwname);
6132 release_firmware(fw); 6154 release_firmware(fw);
6155 asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
6133 return -EINVAL; 6156 return -EINVAL;
6134 } 6157 }
6135 chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | 6158 chksum = (fw->data[3] << 24) | (fw->data[2] << 16) |
@@ -7969,10 +7992,11 @@ static int advansys_reset(struct scsi_cmnd *scp)
7969 ASC_DBG(1, "before AscInitAsc1000Driver()\n"); 7992 ASC_DBG(1, "before AscInitAsc1000Driver()\n");
7970 status = AscInitAsc1000Driver(asc_dvc); 7993 status = AscInitAsc1000Driver(asc_dvc);
7971 7994
7972 /* Refer to ASC_IERR_* defintions for meaning of 'err_code'. */ 7995 /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */
7973 if (asc_dvc->err_code) { 7996 if (asc_dvc->err_code || !asc_dvc->overrun_dma) {
7974 scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " 7997 scmd_printk(KERN_INFO, scp, "SCSI bus reset error: "
7975 "0x%x\n", asc_dvc->err_code); 7998 "0x%x, status: 0x%x\n", asc_dvc->err_code,
7999 status);
7976 ret = FAILED; 8000 ret = FAILED;
7977 } else if (status) { 8001 } else if (status) {
7978 scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " 8002 scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: "
@@ -12303,7 +12327,7 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
12303 asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); 12327 asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL);
12304 if (!asc_dvc_varp->overrun_buf) { 12328 if (!asc_dvc_varp->overrun_buf) {
12305 ret = -ENOMEM; 12329 ret = -ENOMEM;
12306 goto err_free_wide_mem; 12330 goto err_free_irq;
12307 } 12331 }
12308 warn_code = AscInitAsc1000Driver(asc_dvc_varp); 12332 warn_code = AscInitAsc1000Driver(asc_dvc_varp);
12309 12333
@@ -12312,30 +12336,36 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
12312 "warn 0x%x, error 0x%x\n", 12336 "warn 0x%x, error 0x%x\n",
12313 asc_dvc_varp->init_state, warn_code, 12337 asc_dvc_varp->init_state, warn_code,
12314 asc_dvc_varp->err_code); 12338 asc_dvc_varp->err_code);
12315 if (asc_dvc_varp->err_code) { 12339 if (!asc_dvc_varp->overrun_dma) {
12316 ret = -ENODEV; 12340 ret = -ENODEV;
12317 kfree(asc_dvc_varp->overrun_buf); 12341 goto err_free_mem;
12318 } 12342 }
12319 } 12343 }
12320 } else { 12344 } else {
12321 if (advansys_wide_init_chip(shost)) 12345 if (advansys_wide_init_chip(shost)) {
12322 ret = -ENODEV; 12346 ret = -ENODEV;
12347 goto err_free_mem;
12348 }
12323 } 12349 }
12324 12350
12325 if (ret)
12326 goto err_free_wide_mem;
12327
12328 ASC_DBG_PRT_SCSI_HOST(2, shost); 12351 ASC_DBG_PRT_SCSI_HOST(2, shost);
12329 12352
12330 ret = scsi_add_host(shost, boardp->dev); 12353 ret = scsi_add_host(shost, boardp->dev);
12331 if (ret) 12354 if (ret)
12332 goto err_free_wide_mem; 12355 goto err_free_mem;
12333 12356
12334 scsi_scan_host(shost); 12357 scsi_scan_host(shost);
12335 return 0; 12358 return 0;
12336 12359
12337 err_free_wide_mem: 12360 err_free_mem:
12338 advansys_wide_free_mem(boardp); 12361 if (ASC_NARROW_BOARD(boardp)) {
12362 if (asc_dvc_varp->overrun_dma)
12363 dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma,
12364 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
12365 kfree(asc_dvc_varp->overrun_buf);
12366 } else
12367 advansys_wide_free_mem(boardp);
12368 err_free_irq:
12339 free_irq(boardp->irq, shost); 12369 free_irq(boardp->irq, shost);
12340 err_free_dma: 12370 err_free_dma:
12341#ifdef CONFIG_ISA 12371#ifdef CONFIG_ISA
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 1e5478abd90e..8eab8587ff21 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -254,6 +254,7 @@
254#include <linux/spinlock.h> 254#include <linux/spinlock.h>
255#include <linux/workqueue.h> 255#include <linux/workqueue.h>
256#include <linux/list.h> 256#include <linux/list.h>
257#include <linux/slab.h>
257#include <scsi/scsicam.h> 258#include <scsi/scsicam.h>
258 259
259#include "scsi.h" 260#include "scsi.h"
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 80594947c6f6..2a8cf137f609 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -39,6 +39,7 @@
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
40#include <linux/mca.h> 40#include <linux/mca.h>
41#include <linux/mca-legacy.h> 41#include <linux/mca-legacy.h>
42#include <linux/slab.h>
42 43
43#include <asm/dma.h> 44#include <asm/dma.h>
44#include <asm/system.h> 45#include <asm/system.h>
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 538135783aab..0107a4cc3331 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -50,6 +50,7 @@
50#include <linux/device.h> 50#include <linux/device.h>
51#include <linux/eisa.h> 51#include <linux/eisa.h>
52#include <linux/dma-mapping.h> 52#include <linux/dma-mapping.h>
53#include <linux/gfp.h>
53 54
54#include <asm/dma.h> 55#include <asm/dma.h>
55#include <asm/system.h> 56#include <asm/system.h>
diff --git a/drivers/scsi/aic7xxx/aic79xx.seq b/drivers/scsi/aic7xxx/aic79xx.seq
index 58bc17591b54..2fb78e35a9e5 100644
--- a/drivers/scsi/aic7xxx/aic79xx.seq
+++ b/drivers/scsi/aic7xxx/aic79xx.seq
@@ -217,7 +217,7 @@ BEGIN_CRITICAL;
217scbdma_tohost_done: 217scbdma_tohost_done:
218 test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; 218 test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone;
219 /* 219 /*
220 * An SCB has been succesfully uploaded to the host. 220 * An SCB has been successfully uploaded to the host.
221 * If the SCB was uploaded for some reason other than 221 * If the SCB was uploaded for some reason other than
222 * bad SCSI status (currently only for underruns), we 222 * bad SCSI status (currently only for underruns), we
223 * queue the SCB for normal completion. Otherwise, we 223 * queue the SCB for normal completion. Otherwise, we
@@ -1281,7 +1281,7 @@ END_CRITICAL;
1281 * Is it a disconnect message? Set a flag in the SCB to remind us 1281 * Is it a disconnect message? Set a flag in the SCB to remind us
1282 * and await the bus going free. If this is an untagged transaction 1282 * and await the bus going free. If this is an untagged transaction
1283 * store the SCB id for it in our untagged target table for lookup on 1283 * store the SCB id for it in our untagged target table for lookup on
1284 * a reselction. 1284 * a reselection.
1285 */ 1285 */
1286mesgin_disconnect: 1286mesgin_disconnect:
1287 /* 1287 /*
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 63b521d615f2..78971db5b60e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -2487,7 +2487,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2487 /* 2487 /*
2488 * Although the driver does not care about the 2488 * Although the driver does not care about the
2489 * 'Selection in Progress' status bit, the busy 2489 * 'Selection in Progress' status bit, the busy
2490 * LED does. SELINGO is only cleared by a sucessfull 2490 * LED does. SELINGO is only cleared by a successfull
2491 * selection, so we must manually clear it to insure 2491 * selection, so we must manually clear it to insure
2492 * the LED turns off just incase no future successful 2492 * the LED turns off just incase no future successful
2493 * selections occur (e.g. no devices on the bus). 2493 * selections occur (e.g. no devices on the bus).
@@ -3171,13 +3171,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3171 tinfo->curr.transport_version = 2; 3171 tinfo->curr.transport_version = 2;
3172 tinfo->goal.transport_version = 2; 3172 tinfo->goal.transport_version = 2;
3173 tinfo->goal.ppr_options = 0; 3173 tinfo->goal.ppr_options = 0;
3174 /* 3174 if (scb != NULL) {
3175 * Remove any SCBs in the waiting for selection 3175 /*
3176 * queue that may also be for this target so 3176 * Remove any SCBs in the waiting
3177 * that command ordering is preserved. 3177 * for selection queue that may
3178 */ 3178 * also be for this target so that
3179 ahd_freeze_devq(ahd, scb); 3179 * command ordering is preserved.
3180 ahd_qinfifo_requeue_tail(ahd, scb); 3180 */
3181 ahd_freeze_devq(ahd, scb);
3182 ahd_qinfifo_requeue_tail(ahd, scb);
3183 }
3181 printerror = 0; 3184 printerror = 0;
3182 } 3185 }
3183 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) 3186 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
@@ -3194,13 +3197,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3194 MSG_EXT_WDTR_BUS_8_BIT, 3197 MSG_EXT_WDTR_BUS_8_BIT,
3195 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3198 AHD_TRANS_CUR|AHD_TRANS_GOAL,
3196 /*paused*/TRUE); 3199 /*paused*/TRUE);
3197 /* 3200 if (scb != NULL) {
3198 * Remove any SCBs in the waiting for selection 3201 /*
3199 * queue that may also be for this target so that 3202 * Remove any SCBs in the waiting for
3200 * command ordering is preserved. 3203 * selection queue that may also be for
3201 */ 3204 * this target so that command ordering
3202 ahd_freeze_devq(ahd, scb); 3205 * is preserved.
3203 ahd_qinfifo_requeue_tail(ahd, scb); 3206 */
3207 ahd_freeze_devq(ahd, scb);
3208 ahd_qinfifo_requeue_tail(ahd, scb);
3209 }
3204 printerror = 0; 3210 printerror = 0;
3205 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) 3211 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
3206 && ppr_busfree == 0) { 3212 && ppr_busfree == 0) {
@@ -3217,13 +3223,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3217 /*ppr_options*/0, 3223 /*ppr_options*/0,
3218 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3224 AHD_TRANS_CUR|AHD_TRANS_GOAL,
3219 /*paused*/TRUE); 3225 /*paused*/TRUE);
3220 /* 3226 if (scb != NULL) {
3221 * Remove any SCBs in the waiting for selection 3227 /*
3222 * queue that may also be for this target so that 3228 * Remove any SCBs in the waiting for
3223 * command ordering is preserved. 3229 * selection queue that may also be for
3224 */ 3230 * this target so that command ordering
3225 ahd_freeze_devq(ahd, scb); 3231 * is preserved.
3226 ahd_qinfifo_requeue_tail(ahd, scb); 3232 */
3233 ahd_freeze_devq(ahd, scb);
3234 ahd_qinfifo_requeue_tail(ahd, scb);
3235 }
3227 printerror = 0; 3236 printerror = 0;
3228 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 3237 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
3229 && ahd_sent_msg(ahd, AHDMSG_1B, 3238 && ahd_sent_msg(ahd, AHDMSG_1B,
@@ -3251,7 +3260,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3251 * the message phases. We check it last in case we 3260 * the message phases. We check it last in case we
3252 * had to send some other message that caused a busfree. 3261 * had to send some other message that caused a busfree.
3253 */ 3262 */
3254 if (printerror != 0 3263 if (scb != NULL && printerror != 0
3255 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) 3264 && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
3256 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { 3265 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
3257 3266
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 75b23317bd26..4c41332a354b 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -53,6 +53,7 @@ static struct scsi_transport_template *ahd_linux_transport_template = NULL;
53#include <linux/blkdev.h> /* For block_size() */ 53#include <linux/blkdev.h> /* For block_size() */
54#include <linux/delay.h> /* For ssleep/msleep */ 54#include <linux/delay.h> /* For ssleep/msleep */
55#include <linux/device.h> 55#include <linux/device.h>
56#include <linux/slab.h>
56 57
57/* 58/*
58 * Bucket size for counting good commands in between bad ones. 59 * Bucket size for counting good commands in between bad ones.
@@ -2335,7 +2336,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
2335 /* 2336 /*
2336 * The sequencer will never re-reference the 2337 * The sequencer will never re-reference the
2337 * in-core SCB. To make sure we are notified 2338 * in-core SCB. To make sure we are notified
2338 * during reslection, set the MK_MESSAGE flag in 2339 * during reselection, set the MK_MESSAGE flag in
2339 * the card's copy of the SCB. 2340 * the card's copy of the SCB.
2340 */ 2341 */
2341 ahd_outb(ahd, SCB_CONTROL, 2342 ahd_outb(ahd, SCB_CONTROL,
diff --git a/drivers/scsi/aic7xxx/aic7xxx.seq b/drivers/scsi/aic7xxx/aic7xxx.seq
index 15196390e28d..5a4cfc954a9f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.seq
+++ b/drivers/scsi/aic7xxx/aic7xxx.seq
@@ -1693,7 +1693,7 @@ if ((ahc->flags & AHC_INITIATORROLE) != 0) {
1693 * Is it a disconnect message? Set a flag in the SCB to remind us 1693 * Is it a disconnect message? Set a flag in the SCB to remind us
1694 * and await the bus going free. If this is an untagged transaction 1694 * and await the bus going free. If this is an untagged transaction
1695 * store the SCB id for it in our untagged target table for lookup on 1695 * store the SCB id for it in our untagged target table for lookup on
1696 * a reselction. 1696 * a reselection.
1697 */ 1697 */
1698mesgin_disconnect: 1698mesgin_disconnect:
1699 /* 1699 /*
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 8dfb59d58992..45aa728a76b2 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -1733,7 +1733,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1733 /* 1733 /*
1734 * Although the driver does not care about the 1734 * Although the driver does not care about the
1735 * 'Selection in Progress' status bit, the busy 1735 * 'Selection in Progress' status bit, the busy
1736 * LED does. SELINGO is only cleared by a sucessfull 1736 * LED does. SELINGO is only cleared by a successfull
1737 * selection, so we must manually clear it to insure 1737 * selection, so we must manually clear it to insure
1738 * the LED turns off just incase no future successful 1738 * the LED turns off just incase no future successful
1739 * selections occur (e.g. no devices on the bus). 1739 * selections occur (e.g. no devices on the bus).
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index fd2b9785ff4f..5e42dac23505 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -129,6 +129,7 @@ static struct scsi_transport_template *ahc_linux_transport_template = NULL;
129#include <linux/mm.h> /* For fetching system memory size */ 129#include <linux/mm.h> /* For fetching system memory size */
130#include <linux/blkdev.h> /* For block_size() */ 130#include <linux/blkdev.h> /* For block_size() */
131#include <linux/delay.h> /* For ssleep/msleep */ 131#include <linux/delay.h> /* For ssleep/msleep */
132#include <linux/slab.h>
132 133
133 134
134/* 135/*
@@ -2290,7 +2291,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2290 * In the non-paging case, the sequencer will 2291 * In the non-paging case, the sequencer will
2291 * never re-reference the in-core SCB. 2292 * never re-reference the in-core SCB.
2292 * To make sure we are notified during 2293 * To make sure we are notified during
2293 * reslection, set the MK_MESSAGE flag in 2294 * reselection, set the MK_MESSAGE flag in
2294 * the card's copy of the SCB. 2295 * the card's copy of the SCB.
2295 */ 2296 */
2296 if ((ahc->flags & AHC_PAGESCBS) == 0) { 2297 if ((ahc->flags & AHC_PAGESCBS) == 0) {
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index eb9dc3195fdf..81b736c76fff 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/slab.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/firmware.h> 31#include <linux/firmware.h>
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 996f7224f90e..24ac2315c5c7 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/firmware.h> 32#include <linux/firmware.h>
33#include <linux/slab.h>
33 34
34#include <scsi/scsi_host.h> 35#include <scsi/scsi_host.h>
35 36
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
index a43e8cdf4ee4..28aaf349c111 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg_def.h
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Aic94xx SAS/SATA driver hardware registers defintions. 2 * Aic94xx SAS/SATA driver hardware registers definitions.
3 * 3 *
4 * Copyright (C) 2004 Adaptec, Inc. All rights reserved. 4 * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com> 5 * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index ca55013b6ae5..c43698b1cb64 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -24,6 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/gfp.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28 29
29#include "aic94xx.h" 30#include "aic94xx.h"
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 8630a75b2872..edb43fda9f36 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/slab.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
30 31
31#include "aic94xx.h" 32#include "aic94xx.h"
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 8f98e33155e9..d01dcc62b39a 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/gfp.h>
30#include <linux/pci.h> 31#include <linux/pci.h>
31#include <linux/module.h> 32#include <linux/module.h>
32#include <linux/firmware.h> 33#include <linux/firmware.h>
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 78eb86fc6276..0add73bdf2a4 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/gfp.h>
28#include "aic94xx.h" 29#include "aic94xx.h"
29#include "aic94xx_sas.h" 30#include "aic94xx_sas.h"
30#include "aic94xx_hwi.h" 31#include "aic94xx_hwi.h"
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 80aac01b5a6f..ffbe2192da3c 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -58,6 +58,7 @@
58#include <linux/timer.h> 58#include <linux/timer.h>
59#include <linux/pci.h> 59#include <linux/pci.h>
60#include <linux/aer.h> 60#include <linux/aer.h>
61#include <linux/slab.h>
61#include <asm/dma.h> 62#include <asm/dma.h>
62#include <asm/io.h> 63#include <asm/io.h>
63#include <asm/system.h> 64#include <asm/system.h>
@@ -98,8 +99,11 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
98static const char *arcmsr_info(struct Scsi_Host *); 99static const char *arcmsr_info(struct Scsi_Host *);
99static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 100static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
100static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, 101static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
101 int queue_depth) 102 int queue_depth, int reason)
102{ 103{
104 if (reason != SCSI_QDEPTH_DEFAULT)
105 return -EOPNOTSUPP;
106
103 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 107 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
104 queue_depth = ARCMSR_MAX_CMD_PERLUN; 108 queue_depth = ARCMSR_MAX_CMD_PERLUN;
105 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 109 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 477542602284..9e71ac611146 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
2516 if (info->scsi.phase == PHASE_IDLE) 2516 if (info->scsi.phase == PHASE_IDLE)
2517 fas216_kick(info); 2517 fas216_kick(info);
2518 2518
2519 mod_timer(&info->eh_timer, 30 * HZ); 2519 mod_timer(&info->eh_timer, jiffies + 30 * HZ);
2520 spin_unlock_irqrestore(&info->host_lock, flags); 2520 spin_unlock_irqrestore(&info->host_lock, flags);
2521 2521
2522 /* 2522 /*
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 4240b05aef6d..158ebc3644d8 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -651,6 +651,7 @@ static inline void NCR5380_print_phase(struct Scsi_Host *instance)
651 * interrupt or bottom half. 651 * interrupt or bottom half.
652 */ 652 */
653 653
654#include <linux/gfp.h>
654#include <linux/workqueue.h> 655#include <linux/workqueue.h>
655#include <linux/interrupt.h> 656#include <linux/interrupt.h>
656 657
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index b137e561f5bc..ab5bdda6903e 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -29,6 +29,7 @@
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/slab.h>
32#include <asm/system.h> 33#include <asm/system.h>
33#include <asm/io.h> 34#include <asm/io.h>
34 35
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index b36020dcf012..136b49cea791 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -20,8 +20,14 @@
20 20
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/if_vlan.h> 22#include <linux/if_vlan.h>
23 23#include <linux/blk-iopoll.h>
24#define FW_VER_LEN 32 24#define FW_VER_LEN 32
25#define MCC_Q_LEN 128
26#define MCC_CQ_LEN 256
27#define MAX_MCC_CMD 16
28/* BladeEngine Generation numbers */
29#define BE_GEN2 2
30#define BE_GEN3 3
25 31
26struct be_dma_mem { 32struct be_dma_mem {
27 void *va; 33 void *va;
@@ -55,6 +61,11 @@ static inline void *queue_head_node(struct be_queue_info *q)
55 return q->dma_mem.va + q->head * q->entry_size; 61 return q->dma_mem.va + q->head * q->entry_size;
56} 62}
57 63
64static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
65{
66 return q->dma_mem.va + wrb_num * q->entry_size;
67}
68
58static inline void *queue_tail_node(struct be_queue_info *q) 69static inline void *queue_tail_node(struct be_queue_info *q)
59{ 70{
60 return q->dma_mem.va + q->tail * q->entry_size; 71 return q->dma_mem.va + q->tail * q->entry_size;
@@ -74,18 +85,14 @@ static inline void queue_tail_inc(struct be_queue_info *q)
74 85
75struct be_eq_obj { 86struct be_eq_obj {
76 struct be_queue_info q; 87 struct be_queue_info q;
77 char desc[32]; 88 struct beiscsi_hba *phba;
78 89 struct be_queue_info *cq;
79 /* Adaptive interrupt coalescing (AIC) info */ 90 struct blk_iopoll iopoll;
80 bool enable_aic;
81 u16 min_eqd; /* in usecs */
82 u16 max_eqd; /* in usecs */
83 u16 cur_eqd; /* in usecs */
84}; 91};
85 92
86struct be_mcc_obj { 93struct be_mcc_obj {
87 struct be_queue_info *q; 94 struct be_queue_info q;
88 struct be_queue_info *cq; 95 struct be_queue_info cq;
89}; 96};
90 97
91struct be_ctrl_info { 98struct be_ctrl_info {
@@ -106,15 +113,19 @@ struct be_ctrl_info {
106 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ 113 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
107 spinlock_t mcc_cq_lock; 114 spinlock_t mcc_cq_lock;
108 115
109 /* MCC Async callback */ 116 wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
110 void (*async_cb) (void *adapter, bool link_up); 117 unsigned int mcc_tag[MAX_MCC_CMD];
111 void *adapter_ctxt; 118 unsigned int mcc_numtag[MAX_MCC_CMD + 1];
119 unsigned short mcc_alloc_index;
120 unsigned short mcc_free_index;
121 unsigned int mcc_tag_available;
112}; 122};
113 123
114#include "be_cmds.h" 124#include "be_cmds.h"
115 125
116#define PAGE_SHIFT_4K 12 126#define PAGE_SHIFT_4K 12
117#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 127#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
128#define mcc_timeout 120000 /* 5s timeout */
118 129
119/* Returns number of pages spanned by the data starting at the given addr */ 130/* Returns number of pages spanned by the data starting at the given addr */
120#define PAGES_4K_SPANNED(_address, size) \ 131#define PAGES_4K_SPANNED(_address, size) \
@@ -176,8 +187,4 @@ static inline void swap_dws(void *wrb, int len)
176 } while (len); 187 } while (len);
177#endif /* __BIG_ENDIAN */ 188#endif /* __BIG_ENDIAN */
178} 189}
179
180extern void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
181 u16 num_popped);
182
183#endif /* BEISCSI_H */ 190#endif /* BEISCSI_H */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 08007b6e42df..cda6642c7368 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -19,6 +19,55 @@
19#include "be_mgmt.h" 19#include "be_mgmt.h"
20#include "be_main.h" 20#include "be_main.h"
21 21
22void be_mcc_notify(struct beiscsi_hba *phba)
23{
24 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
25 u32 val = 0;
26
27 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
28 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
29 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
30}
31
32unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
33{
34 unsigned int tag = 0;
35
36 if (phba->ctrl.mcc_tag_available) {
37 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
38 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
39 phba->ctrl.mcc_numtag[tag] = 0;
40 }
41 if (tag) {
42 phba->ctrl.mcc_tag_available--;
43 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
44 phba->ctrl.mcc_alloc_index = 0;
45 else
46 phba->ctrl.mcc_alloc_index++;
47 }
48 return tag;
49}
50
51void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
52{
53 spin_lock(&ctrl->mbox_lock);
54 tag = tag & 0x000000FF;
55 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
56 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
57 ctrl->mcc_free_index = 0;
58 else
59 ctrl->mcc_free_index++;
60 ctrl->mcc_tag_available++;
61 spin_unlock(&ctrl->mbox_lock);
62}
63
64bool is_link_state_evt(u32 trailer)
65{
66 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
67 ASYNC_TRAILER_EVENT_CODE_MASK) ==
68 ASYNC_EVENT_CODE_LINK_STATE);
69}
70
22static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 71static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
23{ 72{
24 if (compl->flags != 0) { 73 if (compl->flags != 0) {
@@ -54,13 +103,74 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
54 return 0; 103 return 0;
55} 104}
56 105
57static inline bool is_link_state_evt(u32 trailer) 106int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
107 struct be_mcc_compl *compl)
58{ 108{
59 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 109 u16 compl_status, extd_status;
60 ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE); 110 unsigned short tag;
111
112 be_dws_le_to_cpu(compl, 4);
113
114 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
115 CQE_STATUS_COMPL_MASK;
116 /* The ctrl.mcc_numtag[tag] is filled with
117 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
118 * [7:0] = compl_status
119 */
120 tag = (compl->tag0 & 0x000000FF);
121 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
122 CQE_STATUS_EXTD_MASK;
123
124 ctrl->mcc_numtag[tag] = 0x80000000;
125 ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
126 ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
127 ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
128 wake_up_interruptible(&ctrl->mcc_wait[tag]);
129 return 0;
130}
131
132static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
133{
134 struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
135 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
136
137 if (be_mcc_compl_is_new(compl)) {
138 queue_tail_inc(mcc_cq);
139 return compl;
140 }
141 return NULL;
142}
143
144static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
145{
146 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
61} 147}
62 148
63void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, 149void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
150 struct be_async_event_link_state *evt)
151{
152 switch (evt->port_link_status) {
153 case ASYNC_EVENT_LINK_DOWN:
154 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
155 evt->physical_port);
156 phba->state |= BE_ADAPTER_LINK_DOWN;
157 iscsi_host_for_each_session(phba->shost,
158 be2iscsi_fail_session);
159 break;
160 case ASYNC_EVENT_LINK_UP:
161 phba->state = BE_ADAPTER_UP;
162 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
163 evt->physical_port);
164 break;
165 default:
166 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
167 "Physical Port %d \n",
168 evt->port_link_status,
169 evt->physical_port);
170 }
171}
172
173static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
64 u16 num_popped) 174 u16 num_popped)
65{ 175{
66 u32 val = 0; 176 u32 val = 0;
@@ -68,7 +178,69 @@ void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
68 if (arm) 178 if (arm)
69 val |= 1 << DB_CQ_REARM_SHIFT; 179 val |= 1 << DB_CQ_REARM_SHIFT;
70 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; 180 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
71 iowrite32(val, ctrl->db + DB_CQ_OFFSET); 181 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
182}
183
184
185int beiscsi_process_mcc(struct beiscsi_hba *phba)
186{
187 struct be_mcc_compl *compl;
188 int num = 0, status = 0;
189 struct be_ctrl_info *ctrl = &phba->ctrl;
190
191 spin_lock_bh(&phba->ctrl.mcc_cq_lock);
192 while ((compl = be_mcc_compl_get(phba))) {
193 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
194 /* Interpret flags as an async trailer */
195 if (is_link_state_evt(compl->flags))
196 /* Interpret compl as a async link evt */
197 beiscsi_async_link_state_process(phba,
198 (struct be_async_event_link_state *) compl);
199 else
200 SE_DEBUG(DBG_LVL_1,
201 " Unsupported Async Event, flags"
202 " = 0x%08x \n", compl->flags);
203
204 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
205 status = be_mcc_compl_process(ctrl, compl);
206 atomic_dec(&phba->ctrl.mcc_obj.q.used);
207 }
208 be_mcc_compl_use(compl);
209 num++;
210 }
211
212 if (num)
213 beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
214
215 spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
216 return status;
217}
218
219/* Wait till no more pending mcc requests are present */
220static int be_mcc_wait_compl(struct beiscsi_hba *phba)
221{
222 int i, status;
223 for (i = 0; i < mcc_timeout; i++) {
224 status = beiscsi_process_mcc(phba);
225 if (status)
226 return status;
227
228 if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
229 break;
230 udelay(100);
231 }
232 if (i == mcc_timeout) {
233 dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
234 return -1;
235 }
236 return 0;
237}
238
239/* Notify MCC requests and wait for completion */
240int be_mcc_notify_wait(struct beiscsi_hba *phba)
241{
242 be_mcc_notify(phba);
243 return be_mcc_wait_compl(phba);
72} 244}
73 245
74static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) 246static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
@@ -142,6 +314,52 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
142 return 0; 314 return 0;
143} 315}
144 316
317/*
318 * Insert the mailbox address into the doorbell in two steps
319 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
320 */
321static int be_mbox_notify_wait(struct beiscsi_hba *phba)
322{
323 int status;
324 u32 val = 0;
325 void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
326 struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
327 struct be_mcc_mailbox *mbox = mbox_mem->va;
328 struct be_mcc_compl *compl = &mbox->compl;
329 struct be_ctrl_info *ctrl = &phba->ctrl;
330
331 val |= MPU_MAILBOX_DB_HI_MASK;
332 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
333 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
334 iowrite32(val, db);
335
336 /* wait for ready to be set */
337 status = be_mbox_db_ready_wait(ctrl);
338 if (status != 0)
339 return status;
340
341 val = 0;
342 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
343 val |= (u32)(mbox_mem->dma >> 4) << 2;
344 iowrite32(val, db);
345
346 status = be_mbox_db_ready_wait(ctrl);
347 if (status != 0)
348 return status;
349
350 /* A cq entry has been made now */
351 if (be_mcc_compl_is_new(compl)) {
352 status = be_mcc_compl_process(ctrl, &mbox->compl);
353 be_mcc_compl_use(compl);
354 if (status)
355 return status;
356 } else {
357 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
358 return -1;
359 }
360 return 0;
361}
362
145void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, 363void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
146 bool embedded, u8 sge_cnt) 364 bool embedded, u8 sge_cnt)
147{ 365{
@@ -203,6 +421,21 @@ struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
203 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 421 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
204} 422}
205 423
424struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
425{
426 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
427 struct be_mcc_wrb *wrb;
428
429 BUG_ON(atomic_read(&mccq->used) >= mccq->len);
430 wrb = queue_head_node(mccq);
431 memset(wrb, 0, sizeof(*wrb));
432 wrb->tag0 = (mccq->head & 0x000000FF) << 16;
433 queue_head_inc(mccq);
434 atomic_inc(&mccq->used);
435 return wrb;
436}
437
438
206int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, 439int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
207 struct be_queue_info *eq, int eq_delay) 440 struct be_queue_info *eq, int eq_delay)
208{ 441{
@@ -212,6 +445,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
212 struct be_dma_mem *q_mem = &eq->dma_mem; 445 struct be_dma_mem *q_mem = &eq->dma_mem;
213 int status; 446 int status;
214 447
448 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
215 spin_lock(&ctrl->mbox_lock); 449 spin_lock(&ctrl->mbox_lock);
216 memset(wrb, 0, sizeof(*wrb)); 450 memset(wrb, 0, sizeof(*wrb));
217 451
@@ -249,6 +483,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
249 int status; 483 int status;
250 u8 *endian_check; 484 u8 *endian_check;
251 485
486 SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
252 spin_lock(&ctrl->mbox_lock); 487 spin_lock(&ctrl->mbox_lock);
253 memset(wrb, 0, sizeof(*wrb)); 488 memset(wrb, 0, sizeof(*wrb));
254 489
@@ -282,6 +517,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
282 void *ctxt = &req->context; 517 void *ctxt = &req->context;
283 int status; 518 int status;
284 519
520 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
285 spin_lock(&ctrl->mbox_lock); 521 spin_lock(&ctrl->mbox_lock);
286 memset(wrb, 0, sizeof(*wrb)); 522 memset(wrb, 0, sizeof(*wrb));
287 523
@@ -289,7 +525,6 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
289 525
290 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 526 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
291 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 527 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
292
293 if (!q_mem->va) 528 if (!q_mem->va)
294 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n"); 529 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
295 530
@@ -329,6 +564,53 @@ static u32 be_encoded_q_len(int q_len)
329 len_encoded = 0; 564 len_encoded = 0;
330 return len_encoded; 565 return len_encoded;
331} 566}
567
568int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
569 struct be_queue_info *mccq,
570 struct be_queue_info *cq)
571{
572 struct be_mcc_wrb *wrb;
573 struct be_cmd_req_mcc_create *req;
574 struct be_dma_mem *q_mem = &mccq->dma_mem;
575 struct be_ctrl_info *ctrl;
576 void *ctxt;
577 int status;
578
579 spin_lock(&phba->ctrl.mbox_lock);
580 ctrl = &phba->ctrl;
581 wrb = wrb_from_mbox(&ctrl->mbox_mem);
582 req = embedded_payload(wrb);
583 ctxt = &req->context;
584
585 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
586
587 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
588 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
589
590 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
591
592 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
593 PCI_FUNC(phba->pcidev->devfn));
594 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
595 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
596 be_encoded_q_len(mccq->len));
597 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
598
599 be_dws_cpu_to_le(ctxt, sizeof(req->context));
600
601 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
602
603 status = be_mbox_notify_wait(phba);
604 if (!status) {
605 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
606 mccq->id = le16_to_cpu(resp->id);
607 mccq->created = true;
608 }
609 spin_unlock(&phba->ctrl.mbox_lock);
610
611 return status;
612}
613
332int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, 614int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
333 int queue_type) 615 int queue_type)
334{ 616{
@@ -337,6 +619,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
337 u8 subsys = 0, opcode = 0; 619 u8 subsys = 0, opcode = 0;
338 int status; 620 int status;
339 621
622 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
340 spin_lock(&ctrl->mbox_lock); 623 spin_lock(&ctrl->mbox_lock);
341 memset(wrb, 0, sizeof(*wrb)); 624 memset(wrb, 0, sizeof(*wrb));
342 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 625 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -350,6 +633,10 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
350 subsys = CMD_SUBSYSTEM_COMMON; 633 subsys = CMD_SUBSYSTEM_COMMON;
351 opcode = OPCODE_COMMON_CQ_DESTROY; 634 opcode = OPCODE_COMMON_CQ_DESTROY;
352 break; 635 break;
636 case QTYPE_MCCQ:
637 subsys = CMD_SUBSYSTEM_COMMON;
638 opcode = OPCODE_COMMON_MCC_DESTROY;
639 break;
353 case QTYPE_WRBQ: 640 case QTYPE_WRBQ:
354 subsys = CMD_SUBSYSTEM_ISCSI; 641 subsys = CMD_SUBSYSTEM_ISCSI;
355 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY; 642 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
@@ -377,30 +664,6 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
377 return status; 664 return status;
378} 665}
379 666
380int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr)
381{
382 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
383 struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
384 int status;
385
386 spin_lock(&ctrl->mbox_lock);
387 memset(wrb, 0, sizeof(*wrb));
388 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
389 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
390 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
391 sizeof(*req));
392
393 status = be_mbox_notify(ctrl);
394 if (!status) {
395 struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
396
397 memcpy(mac_addr, resp->mac_address, ETH_ALEN);
398 }
399
400 spin_unlock(&ctrl->mbox_lock);
401 return status;
402}
403
404int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, 667int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
405 struct be_queue_info *cq, 668 struct be_queue_info *cq,
406 struct be_queue_info *dq, int length, 669 struct be_queue_info *dq, int length,
@@ -412,6 +675,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
412 void *ctxt = &req->context; 675 void *ctxt = &req->context;
413 int status; 676 int status;
414 677
678 SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
415 spin_lock(&ctrl->mbox_lock); 679 spin_lock(&ctrl->mbox_lock);
416 memset(wrb, 0, sizeof(*wrb)); 680 memset(wrb, 0, sizeof(*wrb));
417 681
@@ -468,8 +732,10 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
468 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 732 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
469 733
470 status = be_mbox_notify(ctrl); 734 status = be_mbox_notify(ctrl);
471 if (!status) 735 if (!status) {
472 wrbq->id = le16_to_cpu(resp->cid); 736 wrbq->id = le16_to_cpu(resp->cid);
737 wrbq->created = true;
738 }
473 spin_unlock(&ctrl->mbox_lock); 739 spin_unlock(&ctrl->mbox_lock);
474 return status; 740 return status;
475} 741}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index c20d686cbb43..49fcc787ee8b 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -47,6 +47,8 @@ struct be_mcc_wrb {
47 47
48#define CQE_FLAGS_VALID_MASK (1 << 31) 48#define CQE_FLAGS_VALID_MASK (1 << 31)
49#define CQE_FLAGS_ASYNC_MASK (1 << 30) 49#define CQE_FLAGS_ASYNC_MASK (1 << 30)
50#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
51#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
50 52
51/* Completion Status */ 53/* Completion Status */
52#define MCC_STATUS_SUCCESS 0x0 54#define MCC_STATUS_SUCCESS 0x0
@@ -173,7 +175,7 @@ struct be_cmd_req_hdr {
173 u8 domain; /* dword 0 */ 175 u8 domain; /* dword 0 */
174 u32 timeout; /* dword 1 */ 176 u32 timeout; /* dword 1 */
175 u32 request_length; /* dword 2 */ 177 u32 request_length; /* dword 2 */
176 u32 rsvd; /* dword 3 */ 178 u32 rsvd0; /* dword 3 */
177}; 179};
178 180
179struct be_cmd_resp_hdr { 181struct be_cmd_resp_hdr {
@@ -382,7 +384,6 @@ struct be_cmd_req_modify_eq_delay {
382 384
383#define ETH_ALEN 6 385#define ETH_ALEN 6
384 386
385
386struct be_cmd_req_get_mac_addr { 387struct be_cmd_req_get_mac_addr {
387 struct be_cmd_req_hdr hdr; 388 struct be_cmd_req_hdr hdr;
388 u32 nic_port_count; 389 u32 nic_port_count;
@@ -417,14 +418,27 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
417 418
418int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, 419int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
419 int type); 420 int type);
420int be_poll_mcc(struct be_ctrl_info *ctrl); 421int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
421unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl); 422 struct be_queue_info *mccq,
422int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr); 423 struct be_queue_info *cq);
423 424
425int be_poll_mcc(struct be_ctrl_info *ctrl);
426unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
427 struct beiscsi_hba *phba);
428unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
429void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
424/*ISCSI Functuions */ 430/*ISCSI Functuions */
425int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); 431int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
426 432
427struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); 433struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
434struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
435int be_mcc_notify_wait(struct beiscsi_hba *phba);
436void be_mcc_notify(struct beiscsi_hba *phba);
437unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
438void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
439 struct be_async_event_link_state *evt);
440int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
441 struct be_mcc_compl *compl);
428 442
429int be_mbox_notify(struct be_ctrl_info *ctrl); 443int be_mbox_notify(struct be_ctrl_info *ctrl);
430 444
@@ -440,6 +454,8 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
440int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, 454int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
441 struct be_queue_info *wrbq); 455 struct be_queue_info *wrbq);
442 456
457bool is_link_state_evt(u32 trailer);
458
443struct be_default_pdu_context { 459struct be_default_pdu_context {
444 u32 dw[4]; 460 u32 dw[4];
445} __packed; 461} __packed;
@@ -531,6 +547,23 @@ struct amap_sol_cqe {
531 u8 valid; /* dword 3 */ 547 u8 valid; /* dword 3 */
532} __packed; 548} __packed;
533 549
550#define SOL_ICD_INDEX_MASK 0x0003FFC0
551struct amap_sol_cqe_ring {
552 u8 hw_sts[8]; /* dword 0 */
553 u8 i_sts[8]; /* dword 0 */
554 u8 i_resp[8]; /* dword 0 */
555 u8 i_flags[7]; /* dword 0 */
556 u8 s; /* dword 0 */
557 u8 i_exp_cmd_sn[32]; /* dword 1 */
558 u8 code[6]; /* dword 2 */
559 u8 icd_index[12]; /* dword 2 */
560 u8 rsvd[6]; /* dword 2 */
561 u8 i_cmd_wnd[8]; /* dword 2 */
562 u8 i_res_cnt[31]; /* dword 3 */
563 u8 valid; /* dword 3 */
564} __packed;
565
566
534 567
535/** 568/**
536 * Post WRB Queue Doorbell Register used by the host Storage 569 * Post WRB Queue Doorbell Register used by the host Storage
@@ -664,8 +697,8 @@ struct be_fw_cfg {
664#define OPCODE_COMMON_TCP_UPLOAD 56 697#define OPCODE_COMMON_TCP_UPLOAD 56
665#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1 698#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1
666/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ 699/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
667#define CMD_ISCSI_CONNECTION_INVALIDATE 1 700#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001
668#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 2 701#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002
669#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 702#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
670 703
671#define INI_WR_CMD 1 /* Initiator write command */ 704#define INI_WR_CMD 1 /* Initiator write command */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 2fd25442cfaf..c3928cb8b042 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -67,11 +67,11 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
67 cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; 67 cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
68 } 68 }
69 69
70 cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, 70 cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
71 shost, cmds_max, 71 shost, cmds_max,
72 sizeof(*beiscsi_sess), 72 sizeof(*beiscsi_sess),
73 sizeof(*io_task), 73 sizeof(*io_task),
74 initial_cmdsn, ISCSI_MAX_TARGET); 74 initial_cmdsn, ISCSI_MAX_TARGET);
75 if (!cls_session) 75 if (!cls_session)
76 return NULL; 76 return NULL;
77 sess = cls_session->dd_data; 77 sess = cls_session->dd_data;
@@ -101,6 +101,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
101 struct iscsi_session *sess = cls_session->dd_data; 101 struct iscsi_session *sess = cls_session->dd_data;
102 struct beiscsi_session *beiscsi_sess = sess->dd_data; 102 struct beiscsi_session *beiscsi_sess = sess->dd_data;
103 103
104 SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n");
104 pci_pool_destroy(beiscsi_sess->bhs_pool); 105 pci_pool_destroy(beiscsi_sess->bhs_pool);
105 iscsi_session_teardown(cls_session); 106 iscsi_session_teardown(cls_session);
106} 107}
@@ -224,6 +225,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
224 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 225 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
225 int len = 0; 226 int len = 0;
226 227
228 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
227 beiscsi_ep = beiscsi_conn->ep; 229 beiscsi_ep = beiscsi_conn->ep;
228 if (!beiscsi_ep) { 230 if (!beiscsi_ep) {
229 SE_DEBUG(DBG_LVL_1, 231 SE_DEBUG(DBG_LVL_1,
@@ -254,6 +256,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
254 struct iscsi_session *session = conn->session; 256 struct iscsi_session *session = conn->session;
255 int ret; 257 int ret;
256 258
259 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param);
257 ret = iscsi_set_param(cls_conn, param, buf, buflen); 260 ret = iscsi_set_param(cls_conn, param, buf, buflen);
258 if (ret) 261 if (ret)
259 return ret; 262 return ret;
@@ -271,8 +274,8 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
271 conn->max_recv_dlength = 65536; 274 conn->max_recv_dlength = 65536;
272 break; 275 break;
273 case ISCSI_PARAM_MAX_BURST: 276 case ISCSI_PARAM_MAX_BURST:
274 if (session->first_burst > 262144) 277 if (session->max_burst > 262144)
275 session->first_burst = 262144; 278 session->max_burst = 262144;
276 break; 279 break;
277 default: 280 default:
278 return 0; 281 return 0;
@@ -293,12 +296,41 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
293 enum iscsi_host_param param, char *buf) 296 enum iscsi_host_param param, char *buf)
294{ 297{
295 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); 298 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
299 struct be_cmd_resp_get_mac_addr *resp;
300 struct be_mcc_wrb *wrb;
301 unsigned int tag, wrb_num;
296 int len = 0; 302 int len = 0;
303 unsigned short status, extd_status;
304 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
297 305
306 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
298 switch (param) { 307 switch (param) {
299 case ISCSI_HOST_PARAM_HWADDRESS: 308 case ISCSI_HOST_PARAM_HWADDRESS:
300 be_cmd_get_mac_addr(&phba->ctrl, phba->mac_address); 309 tag = be_cmd_get_mac_addr(phba);
301 len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); 310 if (!tag) {
311 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed \n");
312 return -1;
313 } else
314 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
315 phba->ctrl.mcc_numtag[tag]);
316
317 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
318 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
319 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
320 if (status || extd_status) {
321 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
322 " status = %d extd_status = %d \n",
323 status, extd_status);
324 free_mcc_tag(&phba->ctrl, tag);
325 return -1;
326 } else {
327 wrb = queue_get_wrb(mccq, wrb_num);
328 free_mcc_tag(&phba->ctrl, tag);
329 resp = embedded_payload(wrb);
330 memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
331 len = sysfs_format_mac(buf, phba->mac_address,
332 ETH_ALEN);
333 }
302 break; 334 break;
303 default: 335 default:
304 return iscsi_host_get_param(shost, param, buf); 336 return iscsi_host_get_param(shost, param, buf);
@@ -377,16 +409,13 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
377 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 409 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
378 struct beiscsi_endpoint *beiscsi_ep; 410 struct beiscsi_endpoint *beiscsi_ep;
379 struct beiscsi_offload_params params; 411 struct beiscsi_offload_params params;
380 struct iscsi_session *session = conn->session;
381 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
382 struct beiscsi_hba *phba = iscsi_host_priv(shost);
383 412
413 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n");
384 memset(&params, 0, sizeof(struct beiscsi_offload_params)); 414 memset(&params, 0, sizeof(struct beiscsi_offload_params));
385 beiscsi_ep = beiscsi_conn->ep; 415 beiscsi_ep = beiscsi_conn->ep;
386 if (!beiscsi_ep) 416 if (!beiscsi_ep)
387 SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n"); 417 SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n");
388 418
389 free_mgmt_sgl_handle(phba, beiscsi_conn->plogin_sgl_handle);
390 beiscsi_conn->login_in_progress = 0; 419 beiscsi_conn->login_in_progress = 0;
391 beiscsi_set_params_for_offld(beiscsi_conn, &params); 420 beiscsi_set_params_for_offld(beiscsi_conn, &params);
392 beiscsi_offload_connection(beiscsi_conn, &params); 421 beiscsi_offload_connection(beiscsi_conn, &params);
@@ -426,8 +455,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
426{ 455{
427 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 456 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
428 struct beiscsi_hba *phba = beiscsi_ep->phba; 457 struct beiscsi_hba *phba = beiscsi_ep->phba;
458 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
459 struct be_mcc_wrb *wrb;
460 struct tcp_connect_and_offload_out *ptcpcnct_out;
461 unsigned short status, extd_status;
462 unsigned int tag, wrb_num;
429 int ret = -1; 463 int ret = -1;
430 464
465 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
431 beiscsi_ep->ep_cid = beiscsi_get_cid(phba); 466 beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
432 if (beiscsi_ep->ep_cid == 0xFFFF) { 467 if (beiscsi_ep->ep_cid == 0xFFFF) {
433 SE_DEBUG(DBG_LVL_1, "No free cid available\n"); 468 SE_DEBUG(DBG_LVL_1, "No free cid available\n");
@@ -435,15 +470,44 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
435 } 470 }
436 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ", 471 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
437 beiscsi_ep->ep_cid); 472 beiscsi_ep->ep_cid);
438 phba->ep_array[beiscsi_ep->ep_cid] = ep; 473 phba->ep_array[beiscsi_ep->ep_cid -
439 if (beiscsi_ep->ep_cid > 474 phba->fw_config.iscsi_cid_start] = ep;
440 (phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) { 475 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
476 phba->params.cxns_per_ctrl * 2)) {
441 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); 477 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
442 return ret; 478 return ret;
443 } 479 }
444 480
445 beiscsi_ep->cid_vld = 0; 481 beiscsi_ep->cid_vld = 0;
446 return mgmt_open_connection(phba, dst_addr, beiscsi_ep); 482 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
483 if (!tag) {
484 SE_DEBUG(DBG_LVL_1,
485 "mgmt_open_connection Failed for cid=%d \n",
486 beiscsi_ep->ep_cid);
487 } else {
488 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
489 phba->ctrl.mcc_numtag[tag]);
490 }
491 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
492 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
493 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
494 if (status || extd_status) {
495 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
496 " status = %d extd_status = %d \n",
497 status, extd_status);
498 free_mcc_tag(&phba->ctrl, tag);
499 return -1;
500 } else {
501 wrb = queue_get_wrb(mccq, wrb_num);
502 free_mcc_tag(&phba->ctrl, tag);
503
504 ptcpcnct_out = embedded_payload(wrb);
505 beiscsi_ep = ep->dd_data;
506 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
507 beiscsi_ep->cid_vld = 1;
508 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
509 }
510 return 0;
447} 511}
448 512
449/** 513/**
@@ -463,14 +527,12 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
463 * beiscsi_free_ep - free endpoint 527 * beiscsi_free_ep - free endpoint
464 * @ep: pointer to iscsi endpoint structure 528 * @ep: pointer to iscsi endpoint structure
465 */ 529 */
466static void beiscsi_free_ep(struct iscsi_endpoint *ep) 530static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
467{ 531{
468 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
469 struct beiscsi_hba *phba = beiscsi_ep->phba; 532 struct beiscsi_hba *phba = beiscsi_ep->phba;
470 533
471 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 534 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
472 beiscsi_ep->phba = NULL; 535 beiscsi_ep->phba = NULL;
473 iscsi_destroy_endpoint(ep);
474} 536}
475 537
476/** 538/**
@@ -498,6 +560,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
498 SE_DEBUG(DBG_LVL_1, "shost is NULL \n"); 560 SE_DEBUG(DBG_LVL_1, "shost is NULL \n");
499 return ERR_PTR(ret); 561 return ERR_PTR(ret);
500 } 562 }
563
564 if (phba->state != BE_ADAPTER_UP) {
565 ret = -EBUSY;
566 SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP \n");
567 return ERR_PTR(ret);
568 }
569
501 ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint)); 570 ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint));
502 if (!ep) { 571 if (!ep) {
503 ret = -ENOMEM; 572 ret = -ENOMEM;
@@ -506,9 +575,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
506 575
507 beiscsi_ep = ep->dd_data; 576 beiscsi_ep = ep->dd_data;
508 beiscsi_ep->phba = phba; 577 beiscsi_ep->phba = phba;
509 578 beiscsi_ep->openiscsi_ep = ep;
510 if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) { 579 if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
511 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); 580 SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn \n");
512 ret = -ENOMEM; 581 ret = -ENOMEM;
513 goto free_ep; 582 goto free_ep;
514 } 583 }
@@ -516,7 +585,7 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
516 return ep; 585 return ep;
517 586
518free_ep: 587free_ep:
519 beiscsi_free_ep(ep); 588 beiscsi_free_ep(beiscsi_ep);
520 return ERR_PTR(ret); 589 return ERR_PTR(ret);
521} 590}
522 591
@@ -543,20 +612,22 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
543 * @ep: The iscsi endpoint 612 * @ep: The iscsi endpoint
544 * @flag: The type of connection closure 613 * @flag: The type of connection closure
545 */ 614 */
546static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag) 615static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
547{ 616{
548 int ret = 0; 617 int ret = 0;
549 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 618 unsigned int tag;
550 struct beiscsi_hba *phba = beiscsi_ep->phba; 619 struct beiscsi_hba *phba = beiscsi_ep->phba;
551 620
552 if (MGMT_STATUS_SUCCESS != 621 tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
553 mgmt_upload_connection(phba, beiscsi_ep->ep_cid, 622 if (!tag) {
554 CONNECTION_UPLOAD_GRACEFUL)) {
555 SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x", 623 SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
556 beiscsi_ep->ep_cid); 624 beiscsi_ep->ep_cid);
557 ret = -1; 625 ret = -1;
626 } else {
627 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
628 phba->ctrl.mcc_numtag[tag]);
629 free_mcc_tag(&phba->ctrl, tag);
558 } 630 }
559
560 return ret; 631 return ret;
561} 632}
562 633
@@ -571,19 +642,17 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
571 struct beiscsi_conn *beiscsi_conn; 642 struct beiscsi_conn *beiscsi_conn;
572 struct beiscsi_endpoint *beiscsi_ep; 643 struct beiscsi_endpoint *beiscsi_ep;
573 struct beiscsi_hba *phba; 644 struct beiscsi_hba *phba;
574 int flag = 0;
575 645
576 beiscsi_ep = ep->dd_data; 646 beiscsi_ep = ep->dd_data;
577 phba = beiscsi_ep->phba; 647 phba = beiscsi_ep->phba;
578 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n"); 648 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
649 beiscsi_ep->ep_cid);
579 650
580 if (beiscsi_ep->conn) { 651 if (beiscsi_ep->conn) {
581 beiscsi_conn = beiscsi_ep->conn; 652 beiscsi_conn = beiscsi_ep->conn;
582 iscsi_suspend_queue(beiscsi_conn->conn); 653 iscsi_suspend_queue(beiscsi_conn->conn);
583 beiscsi_close_conn(ep, flag);
584 } 654 }
585 655
586 beiscsi_free_ep(ep);
587} 656}
588 657
589/** 658/**
@@ -616,23 +685,31 @@ void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
616 struct iscsi_session *session = conn->session; 685 struct iscsi_session *session = conn->session;
617 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); 686 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
618 struct beiscsi_hba *phba = iscsi_host_priv(shost); 687 struct beiscsi_hba *phba = iscsi_host_priv(shost);
619 unsigned int status; 688 unsigned int tag;
620 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; 689 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
621 690
622 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n");
623 beiscsi_ep = beiscsi_conn->ep; 691 beiscsi_ep = beiscsi_conn->ep;
624 if (!beiscsi_ep) { 692 if (!beiscsi_ep) {
625 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n"); 693 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
626 return; 694 return;
627 } 695 }
628 status = mgmt_invalidate_connection(phba, beiscsi_ep, 696 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop ep_cid = %d\n",
697 beiscsi_ep->ep_cid);
698 tag = mgmt_invalidate_connection(phba, beiscsi_ep,
629 beiscsi_ep->ep_cid, 1, 699 beiscsi_ep->ep_cid, 1,
630 savecfg_flag); 700 savecfg_flag);
631 if (status != MGMT_STATUS_SUCCESS) { 701 if (!tag) {
632 SE_DEBUG(DBG_LVL_1, 702 SE_DEBUG(DBG_LVL_1,
633 "mgmt_invalidate_connection Failed for cid=%d \n", 703 "mgmt_invalidate_connection Failed for cid=%d \n",
634 beiscsi_ep->ep_cid); 704 beiscsi_ep->ep_cid);
705 } else {
706 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
707 phba->ctrl.mcc_numtag[tag]);
708 free_mcc_tag(&phba->ctrl, tag);
635 } 709 }
710 beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
711 beiscsi_free_ep(beiscsi_ep);
712 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
636 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); 713 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
637 iscsi_conn_stop(cls_conn, flag); 714 iscsi_conn_stop(cls_conn, flag);
638} 715}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index f92ffc5349fb..1f512c28cbf9 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 4f1aca346e38..dd5b105f8f47 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -19,6 +19,7 @@
19 */ 19 */
20#include <linux/reboot.h> 20#include <linux/reboot.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/slab.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
23#include <linux/blkdev.h> 24#include <linux/blkdev.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
@@ -39,7 +40,7 @@
39 40
40static unsigned int be_iopoll_budget = 10; 41static unsigned int be_iopoll_budget = 10;
41static unsigned int be_max_phys_size = 64; 42static unsigned int be_max_phys_size = 64;
42static unsigned int enable_msix; 43static unsigned int enable_msix = 1;
43 44
44MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 45MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
45MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 46MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -58,17 +59,145 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
58 return 0; 59 return 0;
59} 60}
60 61
62static int beiscsi_eh_abort(struct scsi_cmnd *sc)
63{
64 struct iscsi_cls_session *cls_session;
65 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
66 struct beiscsi_io_task *aborted_io_task;
67 struct iscsi_conn *conn;
68 struct beiscsi_conn *beiscsi_conn;
69 struct beiscsi_hba *phba;
70 struct iscsi_session *session;
71 struct invalidate_command_table *inv_tbl;
72 unsigned int cid, tag, num_invalidate;
73
74 cls_session = starget_to_session(scsi_target(sc->device));
75 session = cls_session->dd_data;
76
77 spin_lock_bh(&session->lock);
78 if (!aborted_task || !aborted_task->sc) {
79 /* we raced */
80 spin_unlock_bh(&session->lock);
81 return SUCCESS;
82 }
83
84 aborted_io_task = aborted_task->dd_data;
85 if (!aborted_io_task->scsi_cmnd) {
86 /* raced or invalid command */
87 spin_unlock_bh(&session->lock);
88 return SUCCESS;
89 }
90 spin_unlock_bh(&session->lock);
91 conn = aborted_task->conn;
92 beiscsi_conn = conn->dd_data;
93 phba = beiscsi_conn->phba;
94
95 /* invalidate iocb */
96 cid = beiscsi_conn->beiscsi_conn_cid;
97 inv_tbl = phba->inv_tbl;
98 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
99 inv_tbl->cid = cid;
100 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
101 num_invalidate = 1;
102 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
103 if (!tag) {
104 shost_printk(KERN_WARNING, phba->shost,
105 "mgmt_invalidate_icds could not be"
106 " submitted\n");
107 return FAILED;
108 } else {
109 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
110 phba->ctrl.mcc_numtag[tag]);
111 free_mcc_tag(&phba->ctrl, tag);
112 }
113
114 return iscsi_eh_abort(sc);
115}
116
117static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
118{
119 struct iscsi_task *abrt_task;
120 struct beiscsi_io_task *abrt_io_task;
121 struct iscsi_conn *conn;
122 struct beiscsi_conn *beiscsi_conn;
123 struct beiscsi_hba *phba;
124 struct iscsi_session *session;
125 struct iscsi_cls_session *cls_session;
126 struct invalidate_command_table *inv_tbl;
127 unsigned int cid, tag, i, num_invalidate;
128 int rc = FAILED;
129
130 /* invalidate iocbs */
131 cls_session = starget_to_session(scsi_target(sc->device));
132 session = cls_session->dd_data;
133 spin_lock_bh(&session->lock);
134 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
135 goto unlock;
136
137 conn = session->leadconn;
138 beiscsi_conn = conn->dd_data;
139 phba = beiscsi_conn->phba;
140 cid = beiscsi_conn->beiscsi_conn_cid;
141 inv_tbl = phba->inv_tbl;
142 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
143 num_invalidate = 0;
144 for (i = 0; i < conn->session->cmds_max; i++) {
145 abrt_task = conn->session->cmds[i];
146 abrt_io_task = abrt_task->dd_data;
147 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
148 continue;
149
150 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
151 continue;
152
153 inv_tbl->cid = cid;
154 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
155 num_invalidate++;
156 inv_tbl++;
157 }
158 spin_unlock_bh(&session->lock);
159 inv_tbl = phba->inv_tbl;
160
161 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
162 if (!tag) {
163 shost_printk(KERN_WARNING, phba->shost,
164 "mgmt_invalidate_icds could not be"
165 " submitted\n");
166 return FAILED;
167 } else {
168 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
169 phba->ctrl.mcc_numtag[tag]);
170 free_mcc_tag(&phba->ctrl, tag);
171 }
172
173 return iscsi_eh_device_reset(sc);
174unlock:
175 spin_unlock_bh(&session->lock);
176 return rc;
177}
178
179/*------------------- PCI Driver operations and data ----------------- */
180static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
181 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
182 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
183 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
184 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
185 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
186 { 0 }
187};
188MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
189
61static struct scsi_host_template beiscsi_sht = { 190static struct scsi_host_template beiscsi_sht = {
62 .module = THIS_MODULE, 191 .module = THIS_MODULE,
63 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", 192 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
64 .proc_name = DRV_NAME, 193 .proc_name = DRV_NAME,
65 .queuecommand = iscsi_queuecommand, 194 .queuecommand = iscsi_queuecommand,
66 .eh_abort_handler = iscsi_eh_abort,
67 .change_queue_depth = iscsi_change_queue_depth, 195 .change_queue_depth = iscsi_change_queue_depth,
68 .slave_configure = beiscsi_slave_configure, 196 .slave_configure = beiscsi_slave_configure,
69 .target_alloc = iscsi_target_alloc, 197 .target_alloc = iscsi_target_alloc,
70 .eh_device_reset_handler = iscsi_eh_device_reset, 198 .eh_abort_handler = beiscsi_eh_abort,
71 .eh_target_reset_handler = iscsi_eh_target_reset, 199 .eh_device_reset_handler = beiscsi_eh_device_reset,
200 .eh_target_reset_handler = iscsi_eh_session_reset,
72 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 201 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
73 .can_queue = BE2_IO_DEPTH, 202 .can_queue = BE2_IO_DEPTH,
74 .this_id = -1, 203 .this_id = -1,
@@ -76,16 +205,8 @@ static struct scsi_host_template beiscsi_sht = {
76 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 205 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
77 .use_clustering = ENABLE_CLUSTERING, 206 .use_clustering = ENABLE_CLUSTERING,
78}; 207};
79static struct scsi_transport_template *beiscsi_scsi_transport;
80 208
81/*------------------- PCI Driver operations and data ----------------- */ 209static struct scsi_transport_template *beiscsi_scsi_transport;
82static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
83 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
84 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
85 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
86 { 0 }
87};
88MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
89 210
90static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 211static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
91{ 212{
@@ -104,11 +225,11 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
104 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 225 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
105 shost->max_lun = BEISCSI_NUM_MAX_LUN; 226 shost->max_lun = BEISCSI_NUM_MAX_LUN;
106 shost->transportt = beiscsi_scsi_transport; 227 shost->transportt = beiscsi_scsi_transport;
107
108 phba = iscsi_host_priv(shost); 228 phba = iscsi_host_priv(shost);
109 memset(phba, 0, sizeof(*phba)); 229 memset(phba, 0, sizeof(*phba));
110 phba->shost = shost; 230 phba->shost = shost;
111 phba->pcidev = pci_dev_get(pcidev); 231 phba->pcidev = pci_dev_get(pcidev);
232 pci_set_drvdata(pcidev, phba);
112 233
113 if (iscsi_host_add(shost, &phba->pcidev->dev)) 234 if (iscsi_host_add(shost, &phba->pcidev->dev))
114 goto free_devices; 235 goto free_devices;
@@ -140,6 +261,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
140 struct pci_dev *pcidev) 261 struct pci_dev *pcidev)
141{ 262{
142 u8 __iomem *addr; 263 u8 __iomem *addr;
264 int pcicfg_reg;
143 265
144 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 266 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
145 pci_resource_len(pcidev, 2)); 267 pci_resource_len(pcidev, 2));
@@ -156,13 +278,19 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
156 phba->db_va = addr; 278 phba->db_va = addr;
157 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 279 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
158 280
159 addr = ioremap_nocache(pci_resource_start(pcidev, 1), 281 if (phba->generation == BE_GEN2)
160 pci_resource_len(pcidev, 1)); 282 pcicfg_reg = 1;
283 else
284 pcicfg_reg = 0;
285
286 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
287 pci_resource_len(pcidev, pcicfg_reg));
288
161 if (addr == NULL) 289 if (addr == NULL)
162 goto pci_map_err; 290 goto pci_map_err;
163 phba->ctrl.pcicfg = addr; 291 phba->ctrl.pcicfg = addr;
164 phba->pci_va = addr; 292 phba->pci_va = addr;
165 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1); 293 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
166 return 0; 294 return 0;
167 295
168pci_map_err: 296pci_map_err:
@@ -181,6 +309,7 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
181 return ret; 309 return ret;
182 } 310 }
183 311
312 pci_set_master(pcidev);
184 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { 313 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
185 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); 314 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
186 if (ret) { 315 if (ret) {
@@ -203,7 +332,6 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
203 status = beiscsi_map_pci_bars(phba, pdev); 332 status = beiscsi_map_pci_bars(phba, pdev);
204 if (status) 333 if (status)
205 return status; 334 return status;
206
207 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 335 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
208 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 336 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
209 mbox_mem_alloc->size, 337 mbox_mem_alloc->size,
@@ -219,34 +347,35 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
219 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 347 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
220 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 348 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
221 spin_lock_init(&ctrl->mbox_lock); 349 spin_lock_init(&ctrl->mbox_lock);
350 spin_lock_init(&phba->ctrl.mcc_lock);
351 spin_lock_init(&phba->ctrl.mcc_cq_lock);
352
222 return status; 353 return status;
223} 354}
224 355
225static void beiscsi_get_params(struct beiscsi_hba *phba) 356static void beiscsi_get_params(struct beiscsi_hba *phba)
226{ 357{
227 phba->params.ios_per_ctrl = BE2_IO_DEPTH; 358 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
228 phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS; 359 - (phba->fw_config.iscsi_cid_count
229 phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS; 360 + BE2_TMFS
230 phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2; 361 + BE2_NOPOUT_REQ));
362 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
363 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
364 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
231 phba->params.num_sge_per_io = BE2_SGE; 365 phba->params.num_sge_per_io = BE2_SGE;
232 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 366 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
233 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 367 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
234 phba->params.eq_timer = 64; 368 phba->params.eq_timer = 64;
235 phba->params.num_eq_entries = 369 phba->params.num_eq_entries =
236 (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) / 370 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
237 512) + 1) * 512; 371 + BE2_TMFS) / 512) + 1) * 512;
238 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) 372 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
239 ? 1024 : phba->params.num_eq_entries; 373 ? 1024 : phba->params.num_eq_entries;
240 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n", 374 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
241 phba->params.num_eq_entries); 375 phba->params.num_eq_entries);
242 phba->params.num_cq_entries = 376 phba->params.num_cq_entries =
243 (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) / 377 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
244 512) + 1) * 512; 378 + BE2_TMFS) / 512) + 1) * 512;
245 SE_DEBUG(DBG_LVL_8,
246 "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
247 "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
248 phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
249 BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
250 phba->params.wrbs_per_cxn = 256; 379 phba->params.wrbs_per_cxn = 256;
251} 380}
252 381
@@ -268,6 +397,113 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
268} 397}
269 398
270/** 399/**
400 * be_isr_mcc - The isr routine of the driver.
401 * @irq: Not used
402 * @dev_id: Pointer to host adapter structure
403 */
404static irqreturn_t be_isr_mcc(int irq, void *dev_id)
405{
406 struct beiscsi_hba *phba;
407 struct be_eq_entry *eqe = NULL;
408 struct be_queue_info *eq;
409 struct be_queue_info *mcc;
410 unsigned int num_eq_processed;
411 struct be_eq_obj *pbe_eq;
412 unsigned long flags;
413
414 pbe_eq = dev_id;
415 eq = &pbe_eq->q;
416 phba = pbe_eq->phba;
417 mcc = &phba->ctrl.mcc_obj.cq;
418 eqe = queue_tail_node(eq);
419 if (!eqe)
420 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
421
422 num_eq_processed = 0;
423
424 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
425 & EQE_VALID_MASK) {
426 if (((eqe->dw[offsetof(struct amap_eq_entry,
427 resource_id) / 32] &
428 EQE_RESID_MASK) >> 16) == mcc->id) {
429 spin_lock_irqsave(&phba->isr_lock, flags);
430 phba->todo_mcc_cq = 1;
431 spin_unlock_irqrestore(&phba->isr_lock, flags);
432 }
433 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
434 queue_tail_inc(eq);
435 eqe = queue_tail_node(eq);
436 num_eq_processed++;
437 }
438 if (phba->todo_mcc_cq)
439 queue_work(phba->wq, &phba->work_cqs);
440 if (num_eq_processed)
441 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
442
443 return IRQ_HANDLED;
444}
445
446/**
447 * be_isr_msix - The isr routine of the driver.
448 * @irq: Not used
449 * @dev_id: Pointer to host adapter structure
450 */
451static irqreturn_t be_isr_msix(int irq, void *dev_id)
452{
453 struct beiscsi_hba *phba;
454 struct be_eq_entry *eqe = NULL;
455 struct be_queue_info *eq;
456 struct be_queue_info *cq;
457 unsigned int num_eq_processed;
458 struct be_eq_obj *pbe_eq;
459 unsigned long flags;
460
461 pbe_eq = dev_id;
462 eq = &pbe_eq->q;
463 cq = pbe_eq->cq;
464 eqe = queue_tail_node(eq);
465 if (!eqe)
466 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
467
468 phba = pbe_eq->phba;
469 num_eq_processed = 0;
470 if (blk_iopoll_enabled) {
471 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
472 & EQE_VALID_MASK) {
473 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
474 blk_iopoll_sched(&pbe_eq->iopoll);
475
476 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
477 queue_tail_inc(eq);
478 eqe = queue_tail_node(eq);
479 num_eq_processed++;
480 }
481 if (num_eq_processed)
482 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
483
484 return IRQ_HANDLED;
485 } else {
486 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
487 & EQE_VALID_MASK) {
488 spin_lock_irqsave(&phba->isr_lock, flags);
489 phba->todo_cq = 1;
490 spin_unlock_irqrestore(&phba->isr_lock, flags);
491 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
492 queue_tail_inc(eq);
493 eqe = queue_tail_node(eq);
494 num_eq_processed++;
495 }
496 if (phba->todo_cq)
497 queue_work(phba->wq, &phba->work_cqs);
498
499 if (num_eq_processed)
500 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
501
502 return IRQ_HANDLED;
503 }
504}
505
506/**
271 * be_isr - The isr routine of the driver. 507 * be_isr - The isr routine of the driver.
272 * @irq: Not used 508 * @irq: Not used
273 * @dev_id: Pointer to host adapter structure 509 * @dev_id: Pointer to host adapter structure
@@ -280,48 +516,70 @@ static irqreturn_t be_isr(int irq, void *dev_id)
280 struct be_eq_entry *eqe = NULL; 516 struct be_eq_entry *eqe = NULL;
281 struct be_queue_info *eq; 517 struct be_queue_info *eq;
282 struct be_queue_info *cq; 518 struct be_queue_info *cq;
519 struct be_queue_info *mcc;
283 unsigned long flags, index; 520 unsigned long flags, index;
284 unsigned int num_eq_processed; 521 unsigned int num_mcceq_processed, num_ioeq_processed;
285 struct be_ctrl_info *ctrl; 522 struct be_ctrl_info *ctrl;
523 struct be_eq_obj *pbe_eq;
286 int isr; 524 int isr;
287 525
288 phba = dev_id; 526 phba = dev_id;
289 if (!enable_msix) { 527 ctrl = &phba->ctrl;;
290 ctrl = &phba->ctrl;; 528 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
291 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 529 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
292 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 530 if (!isr)
293 if (!isr) 531 return IRQ_NONE;
294 return IRQ_NONE;
295 }
296 532
297 phwi_ctrlr = phba->phwi_ctrlr; 533 phwi_ctrlr = phba->phwi_ctrlr;
298 phwi_context = phwi_ctrlr->phwi_ctxt; 534 phwi_context = phwi_ctrlr->phwi_ctxt;
299 eq = &phwi_context->be_eq.q; 535 pbe_eq = &phwi_context->be_eq[0];
300 cq = &phwi_context->be_cq; 536
537 eq = &phwi_context->be_eq[0].q;
538 mcc = &phba->ctrl.mcc_obj.cq;
301 index = 0; 539 index = 0;
302 eqe = queue_tail_node(eq); 540 eqe = queue_tail_node(eq);
303 if (!eqe) 541 if (!eqe)
304 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); 542 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
305 543
306 num_eq_processed = 0; 544 num_ioeq_processed = 0;
545 num_mcceq_processed = 0;
307 if (blk_iopoll_enabled) { 546 if (blk_iopoll_enabled) {
308 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 547 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
309 & EQE_VALID_MASK) { 548 & EQE_VALID_MASK) {
310 if (!blk_iopoll_sched_prep(&phba->iopoll)) 549 if (((eqe->dw[offsetof(struct amap_eq_entry,
311 blk_iopoll_sched(&phba->iopoll); 550 resource_id) / 32] &
312 551 EQE_RESID_MASK) >> 16) == mcc->id) {
552 spin_lock_irqsave(&phba->isr_lock, flags);
553 phba->todo_mcc_cq = 1;
554 spin_unlock_irqrestore(&phba->isr_lock, flags);
555 num_mcceq_processed++;
556 } else {
557 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
558 blk_iopoll_sched(&pbe_eq->iopoll);
559 num_ioeq_processed++;
560 }
313 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 561 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
314 queue_tail_inc(eq); 562 queue_tail_inc(eq);
315 eqe = queue_tail_node(eq); 563 eqe = queue_tail_node(eq);
316 num_eq_processed++;
317 SE_DEBUG(DBG_LVL_8, "Valid EQE\n");
318 } 564 }
319 if (num_eq_processed) { 565 if (num_ioeq_processed || num_mcceq_processed) {
320 hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 0, 1); 566 if (phba->todo_mcc_cq)
567 queue_work(phba->wq, &phba->work_cqs);
568
569 if ((num_mcceq_processed) && (!num_ioeq_processed))
570 hwi_ring_eq_db(phba, eq->id, 0,
571 (num_ioeq_processed +
572 num_mcceq_processed) , 1, 1);
573 else
574 hwi_ring_eq_db(phba, eq->id, 0,
575 (num_ioeq_processed +
576 num_mcceq_processed), 0, 1);
577
321 return IRQ_HANDLED; 578 return IRQ_HANDLED;
322 } else 579 } else
323 return IRQ_NONE; 580 return IRQ_NONE;
324 } else { 581 } else {
582 cq = &phwi_context->be_cq[0];
325 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 583 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
326 & EQE_VALID_MASK) { 584 & EQE_VALID_MASK) {
327 585
@@ -339,13 +597,14 @@ static irqreturn_t be_isr(int irq, void *dev_id)
339 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 597 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
340 queue_tail_inc(eq); 598 queue_tail_inc(eq);
341 eqe = queue_tail_node(eq); 599 eqe = queue_tail_node(eq);
342 num_eq_processed++; 600 num_ioeq_processed++;
343 } 601 }
344 if (phba->todo_cq || phba->todo_mcc_cq) 602 if (phba->todo_cq || phba->todo_mcc_cq)
345 queue_work(phba->wq, &phba->work_cqs); 603 queue_work(phba->wq, &phba->work_cqs);
346 604
347 if (num_eq_processed) { 605 if (num_ioeq_processed) {
348 hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 1, 1); 606 hwi_ring_eq_db(phba, eq->id, 0,
607 num_ioeq_processed, 1, 1);
349 return IRQ_HANDLED; 608 return IRQ_HANDLED;
350 } else 609 } else
351 return IRQ_NONE; 610 return IRQ_NONE;
@@ -355,13 +614,32 @@ static irqreturn_t be_isr(int irq, void *dev_id)
355static int beiscsi_init_irqs(struct beiscsi_hba *phba) 614static int beiscsi_init_irqs(struct beiscsi_hba *phba)
356{ 615{
357 struct pci_dev *pcidev = phba->pcidev; 616 struct pci_dev *pcidev = phba->pcidev;
358 int ret; 617 struct hwi_controller *phwi_ctrlr;
618 struct hwi_context_memory *phwi_context;
619 int ret, msix_vec, i = 0;
620 char desc[32];
359 621
360 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba); 622 phwi_ctrlr = phba->phwi_ctrlr;
361 if (ret) { 623 phwi_context = phwi_ctrlr->phwi_ctxt;
362 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" 624
363 "Failed to register irq\\n"); 625 if (phba->msix_enabled) {
364 return ret; 626 for (i = 0; i < phba->num_cpus; i++) {
627 sprintf(desc, "beiscsi_msix_%04x", i);
628 msix_vec = phba->msix_entries[i].vector;
629 ret = request_irq(msix_vec, be_isr_msix, 0, desc,
630 &phwi_context->be_eq[i]);
631 }
632 msix_vec = phba->msix_entries[i].vector;
633 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
634 &phwi_context->be_eq[i]);
635 } else {
636 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
637 "beiscsi", phba);
638 if (ret) {
639 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
640 "Failed to register irq\\n");
641 return ret;
642 }
365 } 643 }
366 return 0; 644 return 0;
367} 645}
@@ -378,15 +656,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
378 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 656 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
379} 657}
380 658
381/*
382 * async pdus include
383 * a. unsolicited NOP-In (target initiated NOP-In)
384 * b. Async Messages
385 * c. Reject PDU
386 * d. Login response
387 * These headers arrive unprocessed by the EP firmware and iSCSI layer
388 * process them
389 */
390static unsigned int 659static unsigned int
391beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 660beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
392 struct beiscsi_hba *phba, 661 struct beiscsi_hba *phba,
@@ -397,6 +666,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
397{ 666{
398 struct iscsi_conn *conn = beiscsi_conn->conn; 667 struct iscsi_conn *conn = beiscsi_conn->conn;
399 struct iscsi_session *session = conn->session; 668 struct iscsi_session *session = conn->session;
669 struct iscsi_task *task;
670 struct beiscsi_io_task *io_task;
671 struct iscsi_hdr *login_hdr;
400 672
401 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & 673 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
402 PDUBASE_OPCODE_MASK) { 674 PDUBASE_OPCODE_MASK) {
@@ -412,6 +684,11 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
412 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); 684 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
413 break; 685 break;
414 case ISCSI_OP_LOGIN_RSP: 686 case ISCSI_OP_LOGIN_RSP:
687 case ISCSI_OP_TEXT_RSP:
688 task = conn->login_task;
689 io_task = task->dd_data;
690 login_hdr = (struct iscsi_hdr *)ppdu;
691 login_hdr->itt = io_task->libiscsi_itt;
415 break; 692 break;
416 default: 693 default:
417 shost_printk(KERN_WARNING, phba->shost, 694 shost_printk(KERN_WARNING, phba->shost,
@@ -440,7 +717,8 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
440 io_sgl_alloc_index]; 717 io_sgl_alloc_index];
441 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 718 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
442 phba->io_sgl_hndl_avbl--; 719 phba->io_sgl_hndl_avbl--;
443 if (phba->io_sgl_alloc_index == (phba->params.ios_per_ctrl - 1)) 720 if (phba->io_sgl_alloc_index == (phba->params.
721 ios_per_ctrl - 1))
444 phba->io_sgl_alloc_index = 0; 722 phba->io_sgl_alloc_index = 0;
445 else 723 else
446 phba->io_sgl_alloc_index++; 724 phba->io_sgl_alloc_index++;
@@ -477,22 +755,31 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
477 * alloc_wrb_handle - To allocate a wrb handle 755 * alloc_wrb_handle - To allocate a wrb handle
478 * @phba: The hba pointer 756 * @phba: The hba pointer
479 * @cid: The cid to use for allocation 757 * @cid: The cid to use for allocation
480 * @index: index allocation and wrb index
481 * 758 *
482 * This happens under session_lock until submission to chip 759 * This happens under session_lock until submission to chip
483 */ 760 */
484struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 761struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
485 int index)
486{ 762{
487 struct hwi_wrb_context *pwrb_context; 763 struct hwi_wrb_context *pwrb_context;
488 struct hwi_controller *phwi_ctrlr; 764 struct hwi_controller *phwi_ctrlr;
489 struct wrb_handle *pwrb_handle; 765 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
490 766
491 phwi_ctrlr = phba->phwi_ctrlr; 767 phwi_ctrlr = phba->phwi_ctrlr;
492 pwrb_context = &phwi_ctrlr->wrb_context[cid]; 768 pwrb_context = &phwi_ctrlr->wrb_context[cid];
493 pwrb_handle = pwrb_context->pwrb_handle_base[index]; 769 if (pwrb_context->wrb_handles_available >= 2) {
494 pwrb_handle->wrb_index = index; 770 pwrb_handle = pwrb_context->pwrb_handle_base[
495 pwrb_handle->nxt_wrb_index = index; 771 pwrb_context->alloc_index];
772 pwrb_context->wrb_handles_available--;
773 if (pwrb_context->alloc_index ==
774 (phba->params.wrbs_per_cxn - 1))
775 pwrb_context->alloc_index = 0;
776 else
777 pwrb_context->alloc_index++;
778 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
779 pwrb_context->alloc_index];
780 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
781 } else
782 pwrb_handle = NULL;
496 return pwrb_handle; 783 return pwrb_handle;
497} 784}
498 785
@@ -508,11 +795,18 @@ static void
508free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 795free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
509 struct wrb_handle *pwrb_handle) 796 struct wrb_handle *pwrb_handle)
510{ 797{
798 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
799 pwrb_context->wrb_handles_available++;
800 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
801 pwrb_context->free_index = 0;
802 else
803 pwrb_context->free_index++;
804
511 SE_DEBUG(DBG_LVL_8, 805 SE_DEBUG(DBG_LVL_8,
512 "FREE WRB: pwrb_handle=%p free_index=%d=0x%x" 806 "FREE WRB: pwrb_handle=%p free_index=0x%x"
513 "wrb_handles_available=%d \n", 807 "wrb_handles_available=%d \n",
514 pwrb_handle, pwrb_context->free_index, 808 pwrb_handle, pwrb_context->free_index,
515 pwrb_context->free_index, pwrb_context->wrb_handles_available); 809 pwrb_context->wrb_handles_available);
516} 810}
517 811
518static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 812static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
@@ -540,6 +834,8 @@ void
540free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 834free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
541{ 835{
542 836
837 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
838 phba->eh_sgl_free_index);
543 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 839 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
544 /* 840 /*
545 * this can happen if clean_task is called on a task that 841 * this can happen if clean_task is called on a task that
@@ -572,10 +868,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
572 u32 resid = 0, exp_cmdsn, max_cmdsn; 868 u32 resid = 0, exp_cmdsn, max_cmdsn;
573 u8 rsp, status, flags; 869 u8 rsp, status, flags;
574 870
575 exp_cmdsn = be32_to_cpu(psol-> 871 exp_cmdsn = (psol->
576 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 872 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
577 & SOL_EXP_CMD_SN_MASK); 873 & SOL_EXP_CMD_SN_MASK);
578 max_cmdsn = be32_to_cpu((psol-> 874 max_cmdsn = ((psol->
579 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 875 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
580 & SOL_EXP_CMD_SN_MASK) + 876 & SOL_EXP_CMD_SN_MASK) +
581 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 877 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
@@ -610,18 +906,19 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
610 } 906 }
611 907
612 if (status == SAM_STAT_CHECK_CONDITION) { 908 if (status == SAM_STAT_CHECK_CONDITION) {
909 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
613 sense = sts_bhs->sense_info + sizeof(unsigned short); 910 sense = sts_bhs->sense_info + sizeof(unsigned short);
614 sense_len = 911 sense_len = cpu_to_be16(*slen);
615 cpu_to_be16((unsigned short)(sts_bhs->sense_info[0]));
616 memcpy(task->sc->sense_buffer, sense, 912 memcpy(task->sc->sense_buffer, sense,
617 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 913 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
618 } 914 }
915
619 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) { 916 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
620 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 917 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
621 & SOL_RES_CNT_MASK) 918 & SOL_RES_CNT_MASK)
622 conn->rxdata_octets += (psol-> 919 conn->rxdata_octets += (psol->
623 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 920 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
624 & SOL_RES_CNT_MASK); 921 & SOL_RES_CNT_MASK);
625 } 922 }
626unmap: 923unmap:
627 scsi_dma_unmap(io_task->scsi_cmnd); 924 scsi_dma_unmap(io_task->scsi_cmnd);
@@ -633,9 +930,11 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
633 struct iscsi_task *task, struct sol_cqe *psol) 930 struct iscsi_task *task, struct sol_cqe *psol)
634{ 931{
635 struct iscsi_logout_rsp *hdr; 932 struct iscsi_logout_rsp *hdr;
933 struct beiscsi_io_task *io_task = task->dd_data;
636 struct iscsi_conn *conn = beiscsi_conn->conn; 934 struct iscsi_conn *conn = beiscsi_conn->conn;
637 935
638 hdr = (struct iscsi_logout_rsp *)task->hdr; 936 hdr = (struct iscsi_logout_rsp *)task->hdr;
937 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
639 hdr->t2wait = 5; 938 hdr->t2wait = 5;
640 hdr->t2retain = 0; 939 hdr->t2retain = 0;
641 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 940 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -650,8 +949,11 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
650 & SOL_EXP_CMD_SN_MASK) + 949 & SOL_EXP_CMD_SN_MASK) +
651 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 950 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
652 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 951 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
952 hdr->dlength[0] = 0;
953 hdr->dlength[1] = 0;
954 hdr->dlength[2] = 0;
653 hdr->hlength = 0; 955 hdr->hlength = 0;
654 956 hdr->itt = io_task->libiscsi_itt;
655 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 957 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
656} 958}
657 959
@@ -661,18 +963,21 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
661{ 963{
662 struct iscsi_tm_rsp *hdr; 964 struct iscsi_tm_rsp *hdr;
663 struct iscsi_conn *conn = beiscsi_conn->conn; 965 struct iscsi_conn *conn = beiscsi_conn->conn;
966 struct beiscsi_io_task *io_task = task->dd_data;
664 967
665 hdr = (struct iscsi_tm_rsp *)task->hdr; 968 hdr = (struct iscsi_tm_rsp *)task->hdr;
969 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
666 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 970 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
667 & SOL_FLAGS_MASK) >> 24) | 0x80; 971 & SOL_FLAGS_MASK) >> 24) | 0x80;
668 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 972 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
669 32] & SOL_RESP_MASK); 973 32] & SOL_RESP_MASK);
670 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, 974 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
671 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); 975 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
672 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, 976 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
673 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + 977 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
674 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 978 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
675 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 979 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
980 hdr->itt = io_task->libiscsi_itt;
676 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 981 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
677} 982}
678 983
@@ -681,18 +986,27 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
681 struct beiscsi_hba *phba, struct sol_cqe *psol) 986 struct beiscsi_hba *phba, struct sol_cqe *psol)
682{ 987{
683 struct hwi_wrb_context *pwrb_context; 988 struct hwi_wrb_context *pwrb_context;
684 struct wrb_handle *pwrb_handle; 989 struct wrb_handle *pwrb_handle = NULL;
685 struct hwi_controller *phwi_ctrlr; 990 struct hwi_controller *phwi_ctrlr;
991 struct iscsi_task *task;
992 struct beiscsi_io_task *io_task;
686 struct iscsi_conn *conn = beiscsi_conn->conn; 993 struct iscsi_conn *conn = beiscsi_conn->conn;
687 struct iscsi_session *session = conn->session; 994 struct iscsi_session *session = conn->session;
688 995
689 phwi_ctrlr = phba->phwi_ctrlr; 996 phwi_ctrlr = phba->phwi_ctrlr;
690 pwrb_context = &phwi_ctrlr->wrb_context[((psol-> 997 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
691 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 998 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
692 SOL_CID_MASK) >> 6)]; 999 SOL_CID_MASK) >> 6) -
1000 phba->fw_config.iscsi_cid_start];
693 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 1001 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
694 dw[offsetof(struct amap_sol_cqe, wrb_index) / 1002 dw[offsetof(struct amap_sol_cqe, wrb_index) /
695 32] & SOL_WRB_INDEX_MASK) >> 16)]; 1003 32] & SOL_WRB_INDEX_MASK) >> 16)];
1004 task = pwrb_handle->pio_handle;
1005
1006 io_task = task->dd_data;
1007 spin_lock(&phba->mgmt_sgl_lock);
1008 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1009 spin_unlock(&phba->mgmt_sgl_lock);
696 spin_lock_bh(&session->lock); 1010 spin_lock_bh(&session->lock);
697 free_wrb_handle(phba, pwrb_context, pwrb_handle); 1011 free_wrb_handle(phba, pwrb_context, pwrb_handle);
698 spin_unlock_bh(&session->lock); 1012 spin_unlock_bh(&session->lock);
@@ -704,6 +1018,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
704{ 1018{
705 struct iscsi_nopin *hdr; 1019 struct iscsi_nopin *hdr;
706 struct iscsi_conn *conn = beiscsi_conn->conn; 1020 struct iscsi_conn *conn = beiscsi_conn->conn;
1021 struct beiscsi_io_task *io_task = task->dd_data;
707 1022
708 hdr = (struct iscsi_nopin *)task->hdr; 1023 hdr = (struct iscsi_nopin *)task->hdr;
709 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1024 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -715,6 +1030,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
715 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1030 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
716 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1031 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
717 hdr->opcode = ISCSI_OP_NOOP_IN; 1032 hdr->opcode = ISCSI_OP_NOOP_IN;
1033 hdr->itt = io_task->libiscsi_itt;
718 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1034 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
719} 1035}
720 1036
@@ -726,36 +1042,40 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
726 struct iscsi_wrb *pwrb = NULL; 1042 struct iscsi_wrb *pwrb = NULL;
727 struct hwi_controller *phwi_ctrlr; 1043 struct hwi_controller *phwi_ctrlr;
728 struct iscsi_task *task; 1044 struct iscsi_task *task;
729 struct beiscsi_io_task *io_task; 1045 unsigned int type;
730 struct iscsi_conn *conn = beiscsi_conn->conn; 1046 struct iscsi_conn *conn = beiscsi_conn->conn;
731 struct iscsi_session *session = conn->session; 1047 struct iscsi_session *session = conn->session;
732 1048
733 phwi_ctrlr = phba->phwi_ctrlr; 1049 phwi_ctrlr = phba->phwi_ctrlr;
734 1050 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
735 pwrb_context = &phwi_ctrlr-> 1051 (struct amap_sol_cqe, cid) / 32]
736 wrb_context[((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32] 1052 & SOL_CID_MASK) >> 6) -
737 & SOL_CID_MASK) >> 6)]; 1053 phba->fw_config.iscsi_cid_start];
738 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 1054 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
739 dw[offsetof(struct amap_sol_cqe, wrb_index) / 1055 dw[offsetof(struct amap_sol_cqe, wrb_index) /
740 32] & SOL_WRB_INDEX_MASK) >> 16)]; 1056 32] & SOL_WRB_INDEX_MASK) >> 16)];
741
742 task = pwrb_handle->pio_handle; 1057 task = pwrb_handle->pio_handle;
743 io_task = task->dd_data;
744 spin_lock_bh(&session->lock);
745 pwrb = pwrb_handle->pwrb; 1058 pwrb = pwrb_handle->pwrb;
746 switch ((pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & 1059 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
747 WRB_TYPE_MASK) >> 28) { 1060 WRB_TYPE_MASK) >> 28;
1061
1062 spin_lock_bh(&session->lock);
1063 switch (type) {
748 case HWH_TYPE_IO: 1064 case HWH_TYPE_IO:
749 case HWH_TYPE_IO_RD: 1065 case HWH_TYPE_IO_RD:
750 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1066 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
751 ISCSI_OP_NOOP_OUT) { 1067 ISCSI_OP_NOOP_OUT)
752 be_complete_nopin_resp(beiscsi_conn, task, psol); 1068 be_complete_nopin_resp(beiscsi_conn, task, psol);
753 } else 1069 else
754 be_complete_io(beiscsi_conn, task, psol); 1070 be_complete_io(beiscsi_conn, task, psol);
755 break; 1071 break;
756 1072
757 case HWH_TYPE_LOGOUT: 1073 case HWH_TYPE_LOGOUT:
758 be_complete_logout(beiscsi_conn, task, psol); 1074 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1075 be_complete_logout(beiscsi_conn, task, psol);
1076 else
1077 be_complete_tmf(beiscsi_conn, task, psol);
1078
759 break; 1079 break;
760 1080
761 case HWH_TYPE_LOGIN: 1081 case HWH_TYPE_LOGIN:
@@ -764,21 +1084,18 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
764 "- Solicited path \n"); 1084 "- Solicited path \n");
765 break; 1085 break;
766 1086
767 case HWH_TYPE_TMF:
768 be_complete_tmf(beiscsi_conn, task, psol);
769 break;
770
771 case HWH_TYPE_NOP: 1087 case HWH_TYPE_NOP:
772 be_complete_nopin_resp(beiscsi_conn, task, psol); 1088 be_complete_nopin_resp(beiscsi_conn, task, psol);
773 break; 1089 break;
774 1090
775 default: 1091 default:
776 shost_printk(KERN_WARNING, phba->shost, 1092 shost_printk(KERN_WARNING, phba->shost,
777 "wrb_index 0x%x CID 0x%x\n", 1093 "In hwi_complete_cmd, unknown type = %d"
778 ((psol->dw[offsetof(struct amap_iscsi_wrb, type) / 1094 "wrb_index 0x%x CID 0x%x\n", type,
779 32] & SOL_WRB_INDEX_MASK) >> 16), 1095 ((psol->dw[offsetof(struct amap_iscsi_wrb,
780 ((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32] 1096 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
781 & SOL_CID_MASK) >> 6)); 1097 ((psol->dw[offsetof(struct amap_sol_cqe,
1098 cid) / 32] & SOL_CID_MASK) >> 6));
782 break; 1099 break;
783 } 1100 }
784 1101
@@ -863,7 +1180,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
863 1180
864 WARN_ON(!pasync_handle); 1181 WARN_ON(!pasync_handle);
865 1182
866 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid; 1183 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1184 phba->fw_config.iscsi_cid_start;
867 pasync_handle->is_header = is_header; 1185 pasync_handle->is_header = is_header;
868 pasync_handle->buffer_len = ((pdpdu_cqe-> 1186 pasync_handle->buffer_len = ((pdpdu_cqe->
869 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] 1187 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
@@ -1113,9 +1431,10 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1113 } 1431 }
1114 1432
1115 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1433 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1116 beiscsi_conn->beiscsi_conn_cid, 1434 (beiscsi_conn->beiscsi_conn_cid -
1117 phdr, hdr_len, pfirst_buffer, 1435 phba->fw_config.iscsi_cid_start),
1118 buf_len); 1436 phdr, hdr_len, pfirst_buffer,
1437 buf_len);
1119 1438
1120 if (status == 0) 1439 if (status == 0)
1121 hwi_free_async_msg(phba, cri); 1440 hwi_free_async_msg(phba, cri);
@@ -1208,40 +1527,79 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1208 hwi_post_async_buffers(phba, pasync_handle->is_header); 1527 hwi_post_async_buffers(phba, pasync_handle->is_header);
1209} 1528}
1210 1529
1211static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) 1530static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1531{
1532 struct be_queue_info *mcc_cq;
1533 struct be_mcc_compl *mcc_compl;
1534 unsigned int num_processed = 0;
1535
1536 mcc_cq = &phba->ctrl.mcc_obj.cq;
1537 mcc_compl = queue_tail_node(mcc_cq);
1538 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1539 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1540
1541 if (num_processed >= 32) {
1542 hwi_ring_cq_db(phba, mcc_cq->id,
1543 num_processed, 0, 0);
1544 num_processed = 0;
1545 }
1546 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1547 /* Interpret flags as an async trailer */
1548 if (is_link_state_evt(mcc_compl->flags))
1549 /* Interpret compl as a async link evt */
1550 beiscsi_async_link_state_process(phba,
1551 (struct be_async_event_link_state *) mcc_compl);
1552 else
1553 SE_DEBUG(DBG_LVL_1,
1554 " Unsupported Async Event, flags"
1555 " = 0x%08x \n", mcc_compl->flags);
1556 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1557 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1558 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1559 }
1560
1561 mcc_compl->flags = 0;
1562 queue_tail_inc(mcc_cq);
1563 mcc_compl = queue_tail_node(mcc_cq);
1564 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1565 num_processed++;
1566 }
1567
1568 if (num_processed > 0)
1569 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1570
1571}
1572
1573static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1212{ 1574{
1213 struct hwi_controller *phwi_ctrlr;
1214 struct hwi_context_memory *phwi_context;
1215 struct be_queue_info *cq; 1575 struct be_queue_info *cq;
1216 struct sol_cqe *sol; 1576 struct sol_cqe *sol;
1217 struct dmsg_cqe *dmsg; 1577 struct dmsg_cqe *dmsg;
1218 unsigned int num_processed = 0; 1578 unsigned int num_processed = 0;
1219 unsigned int tot_nump = 0; 1579 unsigned int tot_nump = 0;
1220 struct beiscsi_conn *beiscsi_conn; 1580 struct beiscsi_conn *beiscsi_conn;
1581 struct beiscsi_endpoint *beiscsi_ep;
1582 struct iscsi_endpoint *ep;
1583 struct beiscsi_hba *phba;
1221 1584
1222 phwi_ctrlr = phba->phwi_ctrlr; 1585 cq = pbe_eq->cq;
1223 phwi_context = phwi_ctrlr->phwi_ctxt;
1224 cq = &phwi_context->be_cq;
1225 sol = queue_tail_node(cq); 1586 sol = queue_tail_node(cq);
1587 phba = pbe_eq->phba;
1226 1588
1227 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1589 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1228 CQE_VALID_MASK) { 1590 CQE_VALID_MASK) {
1229 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1591 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1230 1592
1231 beiscsi_conn = phba->conn_table[(u32) (sol-> 1593 ep = phba->ep_array[(u32) ((sol->
1232 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 1594 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1233 SOL_CID_MASK) >> 6]; 1595 SOL_CID_MASK) >> 6) -
1596 phba->fw_config.iscsi_cid_start];
1234 1597
1235 if (!beiscsi_conn || !beiscsi_conn->ep) { 1598 beiscsi_ep = ep->dd_data;
1236 shost_printk(KERN_WARNING, phba->shost, 1599 beiscsi_conn = beiscsi_ep->conn;
1237 "Connection table empty for cid = %d\n",
1238 (u32)(sol->dw[offsetof(struct amap_sol_cqe,
1239 cid) / 32] & SOL_CID_MASK) >> 6);
1240 return 0;
1241 }
1242 1600
1243 if (num_processed >= 32) { 1601 if (num_processed >= 32) {
1244 hwi_ring_cq_db(phba, phwi_context->be_cq.id, 1602 hwi_ring_cq_db(phba, cq->id,
1245 num_processed, 0, 0); 1603 num_processed, 0, 0);
1246 tot_nump += num_processed; 1604 tot_nump += num_processed;
1247 num_processed = 0; 1605 num_processed = 0;
@@ -1258,8 +1616,12 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1258 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1616 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1259 break; 1617 break;
1260 case UNSOL_HDR_NOTIFY: 1618 case UNSOL_HDR_NOTIFY:
1619 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1620 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1621 (struct i_t_dpdu_cqe *)sol);
1622 break;
1261 case UNSOL_DATA_NOTIFY: 1623 case UNSOL_DATA_NOTIFY:
1262 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR/DATA_NOTIFY\n"); 1624 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1263 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1625 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1264 (struct i_t_dpdu_cqe *)sol); 1626 (struct i_t_dpdu_cqe *)sol);
1265 break; 1627 break;
@@ -1306,23 +1668,23 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1306 case CXN_KILLED_OVER_RUN_RESIDUAL: 1668 case CXN_KILLED_OVER_RUN_RESIDUAL:
1307 case CXN_KILLED_UNDER_RUN_RESIDUAL: 1669 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1308 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 1670 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1309 SE_DEBUG(DBG_LVL_1, "CQ Error %d, resetting CID " 1671 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1310 "0x%x...\n", 1672 "0x%x...\n",
1311 sol->dw[offsetof(struct amap_sol_cqe, code) / 1673 sol->dw[offsetof(struct amap_sol_cqe, code) /
1312 32] & CQE_CODE_MASK, 1674 32] & CQE_CODE_MASK,
1313 sol->dw[offsetof(struct amap_sol_cqe, cid) / 1675 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1314 32] & CQE_CID_MASK); 1676 32] & CQE_CID_MASK));
1315 iscsi_conn_failure(beiscsi_conn->conn, 1677 iscsi_conn_failure(beiscsi_conn->conn,
1316 ISCSI_ERR_CONN_FAILED); 1678 ISCSI_ERR_CONN_FAILED);
1317 break; 1679 break;
1318 case CXN_KILLED_RST_SENT: 1680 case CXN_KILLED_RST_SENT:
1319 case CXN_KILLED_RST_RCVD: 1681 case CXN_KILLED_RST_RCVD:
1320 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset received/sent " 1682 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1321 "on CID 0x%x...\n", 1683 "received/sent on CID 0x%x...\n",
1322 sol->dw[offsetof(struct amap_sol_cqe, code) / 1684 sol->dw[offsetof(struct amap_sol_cqe, code) /
1323 32] & CQE_CODE_MASK, 1685 32] & CQE_CODE_MASK,
1324 sol->dw[offsetof(struct amap_sol_cqe, cid) / 1686 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1325 32] & CQE_CID_MASK); 1687 32] & CQE_CID_MASK));
1326 iscsi_conn_failure(beiscsi_conn->conn, 1688 iscsi_conn_failure(beiscsi_conn->conn,
1327 ISCSI_ERR_CONN_FAILED); 1689 ISCSI_ERR_CONN_FAILED);
1328 break; 1690 break;
@@ -1331,8 +1693,8 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1331 "received on CID 0x%x...\n", 1693 "received on CID 0x%x...\n",
1332 sol->dw[offsetof(struct amap_sol_cqe, code) / 1694 sol->dw[offsetof(struct amap_sol_cqe, code) /
1333 32] & CQE_CODE_MASK, 1695 32] & CQE_CODE_MASK,
1334 sol->dw[offsetof(struct amap_sol_cqe, cid) / 1696 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1335 32] & CQE_CID_MASK); 1697 32] & CQE_CID_MASK));
1336 break; 1698 break;
1337 } 1699 }
1338 1700
@@ -1344,30 +1706,39 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1344 1706
1345 if (num_processed > 0) { 1707 if (num_processed > 0) {
1346 tot_nump += num_processed; 1708 tot_nump += num_processed;
1347 hwi_ring_cq_db(phba, phwi_context->be_cq.id, num_processed, 1709 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1348 1, 0);
1349 } 1710 }
1350 return tot_nump; 1711 return tot_nump;
1351} 1712}
1352 1713
1353static void beiscsi_process_all_cqs(struct work_struct *work) 1714void beiscsi_process_all_cqs(struct work_struct *work)
1354{ 1715{
1355 unsigned long flags; 1716 unsigned long flags;
1717 struct hwi_controller *phwi_ctrlr;
1718 struct hwi_context_memory *phwi_context;
1719 struct be_eq_obj *pbe_eq;
1356 struct beiscsi_hba *phba = 1720 struct beiscsi_hba *phba =
1357 container_of(work, struct beiscsi_hba, work_cqs); 1721 container_of(work, struct beiscsi_hba, work_cqs);
1358 1722
1723 phwi_ctrlr = phba->phwi_ctrlr;
1724 phwi_context = phwi_ctrlr->phwi_ctxt;
1725 if (phba->msix_enabled)
1726 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1727 else
1728 pbe_eq = &phwi_context->be_eq[0];
1729
1359 if (phba->todo_mcc_cq) { 1730 if (phba->todo_mcc_cq) {
1360 spin_lock_irqsave(&phba->isr_lock, flags); 1731 spin_lock_irqsave(&phba->isr_lock, flags);
1361 phba->todo_mcc_cq = 0; 1732 phba->todo_mcc_cq = 0;
1362 spin_unlock_irqrestore(&phba->isr_lock, flags); 1733 spin_unlock_irqrestore(&phba->isr_lock, flags);
1363 SE_DEBUG(DBG_LVL_1, "MCC Interrupt Not expected \n"); 1734 beiscsi_process_mcc_isr(phba);
1364 } 1735 }
1365 1736
1366 if (phba->todo_cq) { 1737 if (phba->todo_cq) {
1367 spin_lock_irqsave(&phba->isr_lock, flags); 1738 spin_lock_irqsave(&phba->isr_lock, flags);
1368 phba->todo_cq = 0; 1739 phba->todo_cq = 0;
1369 spin_unlock_irqrestore(&phba->isr_lock, flags); 1740 spin_unlock_irqrestore(&phba->isr_lock, flags);
1370 beiscsi_process_cq(phba); 1741 beiscsi_process_cq(pbe_eq);
1371 } 1742 }
1372} 1743}
1373 1744
@@ -1375,19 +1746,15 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
1375{ 1746{
1376 static unsigned int ret; 1747 static unsigned int ret;
1377 struct beiscsi_hba *phba; 1748 struct beiscsi_hba *phba;
1749 struct be_eq_obj *pbe_eq;
1378 1750
1379 phba = container_of(iop, struct beiscsi_hba, iopoll); 1751 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1380 1752 ret = beiscsi_process_cq(pbe_eq);
1381 ret = beiscsi_process_cq(phba);
1382 if (ret < budget) { 1753 if (ret < budget) {
1383 struct hwi_controller *phwi_ctrlr; 1754 phba = pbe_eq->phba;
1384 struct hwi_context_memory *phwi_context;
1385
1386 phwi_ctrlr = phba->phwi_ctrlr;
1387 phwi_context = phwi_ctrlr->phwi_ctxt;
1388 blk_iopoll_complete(iop); 1755 blk_iopoll_complete(iop);
1389 hwi_ring_eq_db(phba, phwi_context->be_eq.q.id, 0, 1756 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1390 0, 1, 1); 1757 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1391 } 1758 }
1392 return ret; 1759 return ret;
1393} 1760}
@@ -1409,7 +1776,8 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1409 io_task->bhs_pa.u.a32.address_hi); 1776 io_task->bhs_pa.u.a32.address_hi);
1410 1777
1411 l_sg = sg; 1778 l_sg = sg;
1412 for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) { 1779 for (index = 0; (index < num_sg) && (index < 2); index++,
1780 sg = sg_next(sg)) {
1413 if (index == 0) { 1781 if (index == 0) {
1414 sg_len = sg_dma_len(sg); 1782 sg_len = sg_dma_len(sg);
1415 addr = (u64) sg_dma_address(sg); 1783 addr = (u64) sg_dma_address(sg);
@@ -1420,11 +1788,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1420 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 1788 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1421 sg_len); 1789 sg_len);
1422 sge_len = sg_len; 1790 sge_len = sg_len;
1423 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1424 1);
1425 } else { 1791 } else {
1426 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1427 0);
1428 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 1792 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1429 pwrb, sge_len); 1793 pwrb, sge_len);
1430 sg_len = sg_dma_len(sg); 1794 sg_len = sg_dma_len(sg);
@@ -1447,13 +1811,27 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1447 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 1811 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1448 io_task->bhs_pa.u.a32.address_lo); 1812 io_task->bhs_pa.u.a32.address_lo);
1449 1813
1450 if (num_sg == 2) 1814 if (num_sg == 1) {
1451 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1); 1815 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1816 1);
1817 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1818 0);
1819 } else if (num_sg == 2) {
1820 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1821 0);
1822 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1823 1);
1824 } else {
1825 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1826 0);
1827 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1828 0);
1829 }
1452 sg = l_sg; 1830 sg = l_sg;
1453 psgl++; 1831 psgl++;
1454 psgl++; 1832 psgl++;
1455 offset = 0; 1833 offset = 0;
1456 for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) { 1834 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1457 sg_len = sg_dma_len(sg); 1835 sg_len = sg_dma_len(sg);
1458 addr = (u64) sg_dma_address(sg); 1836 addr = (u64) sg_dma_address(sg);
1459 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 1837 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
@@ -1537,14 +1915,12 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1537 1915
1538static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 1916static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1539{ 1917{
1540 unsigned int num_cq_pages, num_eq_pages, num_async_pdu_buf_pages; 1918 unsigned int num_cq_pages, num_async_pdu_buf_pages;
1541 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 1919 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1542 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 1920 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1543 1921
1544 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 1922 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1545 sizeof(struct sol_cqe)); 1923 sizeof(struct sol_cqe));
1546 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
1547 sizeof(struct be_eq_entry));
1548 num_async_pdu_buf_pages = 1924 num_async_pdu_buf_pages =
1549 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 1925 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1550 phba->params.defpdu_hdr_sz); 1926 phba->params.defpdu_hdr_sz);
@@ -1565,8 +1941,6 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1565 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 1941 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1566 sizeof(struct hwi_context_memory); 1942 sizeof(struct hwi_context_memory);
1567 1943
1568 phba->mem_req[HWI_MEM_CQ] = num_cq_pages * PAGE_SIZE;
1569 phba->mem_req[HWI_MEM_EQ] = num_eq_pages * PAGE_SIZE;
1570 1944
1571 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 1945 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1572 * (phba->params.wrbs_per_cxn) 1946 * (phba->params.wrbs_per_cxn)
@@ -1751,8 +2125,6 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1751 2125
1752 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2126 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
1753 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2127 pwrb_context = &phwi_ctrlr->wrb_context[index];
1754 SE_DEBUG(DBG_LVL_8, "cid=%d pwrb_context=%p \n", index,
1755 pwrb_context);
1756 pwrb_context->pwrb_handle_base = 2128 pwrb_context->pwrb_handle_base =
1757 kzalloc(sizeof(struct wrb_handle *) * 2129 kzalloc(sizeof(struct wrb_handle *) *
1758 phba->params.wrbs_per_cxn, GFP_KERNEL); 2130 phba->params.wrbs_per_cxn, GFP_KERNEL);
@@ -1767,6 +2139,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1767 pwrb_context->pwrb_handle_basestd[j] = 2139 pwrb_context->pwrb_handle_basestd[j] =
1768 pwrb_handle; 2140 pwrb_handle;
1769 pwrb_context->wrb_handles_available++; 2141 pwrb_context->wrb_handles_available++;
2142 pwrb_handle->wrb_index = j;
1770 pwrb_handle++; 2143 pwrb_handle++;
1771 } 2144 }
1772 pwrb_context->free_index = 0; 2145 pwrb_context->free_index = 0;
@@ -1785,6 +2158,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1785 pwrb_context->pwrb_handle_basestd[j] = 2158 pwrb_context->pwrb_handle_basestd[j] =
1786 pwrb_handle; 2159 pwrb_handle;
1787 pwrb_context->wrb_handles_available++; 2160 pwrb_context->wrb_handles_available++;
2161 pwrb_handle->wrb_index = j;
1788 pwrb_handle++; 2162 pwrb_handle++;
1789 } 2163 }
1790 pwrb_context->free_index = 0; 2164 pwrb_context->free_index = 0;
@@ -1793,11 +2167,10 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1793 } 2167 }
1794 idx = 0; 2168 idx = 0;
1795 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2169 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
1796 num_cxn_wrb = 2170 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
1797 ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) * 2171 ((sizeof(struct iscsi_wrb) *
1798 phba->params.wrbs_per_cxn); 2172 phba->params.wrbs_per_cxn));
1799 2173 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
1800 for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
1801 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2174 pwrb_context = &phwi_ctrlr->wrb_context[index];
1802 if (num_cxn_wrb) { 2175 if (num_cxn_wrb) {
1803 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2176 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
@@ -1809,9 +2182,9 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1809 } else { 2182 } else {
1810 idx++; 2183 idx++;
1811 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2184 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
1812 num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) / 2185 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
1813 (sizeof(struct iscsi_wrb)) * 2186 ((sizeof(struct iscsi_wrb) *
1814 phba->params.wrbs_per_cxn); 2187 phba->params.wrbs_per_cxn));
1815 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2188 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
1816 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2189 pwrb_handle = pwrb_context->pwrb_handle_base[j];
1817 pwrb_handle->pwrb = pwrb; 2190 pwrb_handle->pwrb = pwrb;
@@ -2042,79 +2415,126 @@ static int be_fill_queue(struct be_queue_info *q,
2042 return 0; 2415 return 0;
2043} 2416}
2044 2417
2045static int beiscsi_create_eq(struct beiscsi_hba *phba, 2418static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2046 struct hwi_context_memory *phwi_context) 2419 struct hwi_context_memory *phwi_context)
2047{ 2420{
2048 unsigned int idx; 2421 unsigned int i, num_eq_pages;
2049 int ret; 2422 int ret, eq_for_mcc;
2050 struct be_queue_info *eq; 2423 struct be_queue_info *eq;
2051 struct be_dma_mem *mem; 2424 struct be_dma_mem *mem;
2052 struct be_mem_descriptor *mem_descr;
2053 void *eq_vaddress; 2425 void *eq_vaddress;
2426 dma_addr_t paddr;
2054 2427
2055 idx = 0; 2428 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2056 eq = &phwi_context->be_eq.q; 2429 sizeof(struct be_eq_entry));
2057 mem = &eq->dma_mem;
2058 mem_descr = phba->init_mem;
2059 mem_descr += HWI_MEM_EQ;
2060 eq_vaddress = mem_descr->mem_array[idx].virtual_address;
2061
2062 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2063 sizeof(struct be_eq_entry), eq_vaddress);
2064 if (ret) {
2065 shost_printk(KERN_ERR, phba->shost,
2066 "be_fill_queue Failed for EQ \n");
2067 return ret;
2068 }
2069 2430
2070 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; 2431 if (phba->msix_enabled)
2432 eq_for_mcc = 1;
2433 else
2434 eq_for_mcc = 0;
2435 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2436 eq = &phwi_context->be_eq[i].q;
2437 mem = &eq->dma_mem;
2438 phwi_context->be_eq[i].phba = phba;
2439 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2440 num_eq_pages * PAGE_SIZE,
2441 &paddr);
2442 if (!eq_vaddress)
2443 goto create_eq_error;
2444
2445 mem->va = eq_vaddress;
2446 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2447 sizeof(struct be_eq_entry), eq_vaddress);
2448 if (ret) {
2449 shost_printk(KERN_ERR, phba->shost,
2450 "be_fill_queue Failed for EQ \n");
2451 goto create_eq_error;
2452 }
2071 2453
2072 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 2454 mem->dma = paddr;
2073 phwi_context->be_eq.cur_eqd); 2455 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2074 if (ret) { 2456 phwi_context->cur_eqd);
2075 shost_printk(KERN_ERR, phba->shost, "beiscsi_cmd_eq_create" 2457 if (ret) {
2076 "Failedfor EQ \n"); 2458 shost_printk(KERN_ERR, phba->shost,
2077 return ret; 2459 "beiscsi_cmd_eq_create"
2460 "Failedfor EQ \n");
2461 goto create_eq_error;
2462 }
2463 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2078 } 2464 }
2079 SE_DEBUG(DBG_LVL_8, "eq id is %d\n", phwi_context->be_eq.q.id);
2080 return 0; 2465 return 0;
2466create_eq_error:
2467 for (i = 0; i < (phba->num_cpus + 1); i++) {
2468 eq = &phwi_context->be_eq[i].q;
2469 mem = &eq->dma_mem;
2470 if (mem->va)
2471 pci_free_consistent(phba->pcidev, num_eq_pages
2472 * PAGE_SIZE,
2473 mem->va, mem->dma);
2474 }
2475 return ret;
2081} 2476}
2082 2477
2083static int beiscsi_create_cq(struct beiscsi_hba *phba, 2478static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2084 struct hwi_context_memory *phwi_context) 2479 struct hwi_context_memory *phwi_context)
2085{ 2480{
2086 unsigned int idx; 2481 unsigned int i, num_cq_pages;
2087 int ret; 2482 int ret;
2088 struct be_queue_info *cq, *eq; 2483 struct be_queue_info *cq, *eq;
2089 struct be_dma_mem *mem; 2484 struct be_dma_mem *mem;
2090 struct be_mem_descriptor *mem_descr; 2485 struct be_eq_obj *pbe_eq;
2091 void *cq_vaddress; 2486 void *cq_vaddress;
2487 dma_addr_t paddr;
2092 2488
2093 idx = 0; 2489 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2094 cq = &phwi_context->be_cq; 2490 sizeof(struct sol_cqe));
2095 eq = &phwi_context->be_eq.q;
2096 mem = &cq->dma_mem;
2097 mem_descr = phba->init_mem;
2098 mem_descr += HWI_MEM_CQ;
2099 cq_vaddress = mem_descr->mem_array[idx].virtual_address;
2100 ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
2101 sizeof(struct sol_cqe), cq_vaddress);
2102 if (ret) {
2103 shost_printk(KERN_ERR, phba->shost,
2104 "be_fill_queue Failed for ISCSI CQ \n");
2105 return ret;
2106 }
2107 2491
2108 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; 2492 for (i = 0; i < phba->num_cpus; i++) {
2109 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0); 2493 cq = &phwi_context->be_cq[i];
2110 if (ret) { 2494 eq = &phwi_context->be_eq[i].q;
2111 shost_printk(KERN_ERR, phba->shost, 2495 pbe_eq = &phwi_context->be_eq[i];
2112 "beiscsi_cmd_eq_create Failed for ISCSI CQ \n"); 2496 pbe_eq->cq = cq;
2113 return ret; 2497 pbe_eq->phba = phba;
2498 mem = &cq->dma_mem;
2499 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2500 num_cq_pages * PAGE_SIZE,
2501 &paddr);
2502 if (!cq_vaddress)
2503 goto create_cq_error;
2504 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2505 sizeof(struct sol_cqe), cq_vaddress);
2506 if (ret) {
2507 shost_printk(KERN_ERR, phba->shost,
2508 "be_fill_queue Failed for ISCSI CQ \n");
2509 goto create_cq_error;
2510 }
2511
2512 mem->dma = paddr;
2513 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2514 false, 0);
2515 if (ret) {
2516 shost_printk(KERN_ERR, phba->shost,
2517 "beiscsi_cmd_eq_create"
2518 "Failed for ISCSI CQ \n");
2519 goto create_cq_error;
2520 }
2521 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2522 cq->id, eq->id);
2523 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2114 } 2524 }
2115 SE_DEBUG(DBG_LVL_8, "iscsi cq id is %d\n", phwi_context->be_cq.id);
2116 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2117 return 0; 2525 return 0;
2526
2527create_cq_error:
2528 for (i = 0; i < phba->num_cpus; i++) {
2529 cq = &phwi_context->be_cq[i];
2530 mem = &cq->dma_mem;
2531 if (mem->va)
2532 pci_free_consistent(phba->pcidev, num_cq_pages
2533 * PAGE_SIZE,
2534 mem->va, mem->dma);
2535 }
2536 return ret;
2537
2118} 2538}
2119 2539
2120static int 2540static int
@@ -2132,7 +2552,7 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2132 2552
2133 idx = 0; 2553 idx = 0;
2134 dq = &phwi_context->be_def_hdrq; 2554 dq = &phwi_context->be_def_hdrq;
2135 cq = &phwi_context->be_cq; 2555 cq = &phwi_context->be_cq[0];
2136 mem = &dq->dma_mem; 2556 mem = &dq->dma_mem;
2137 mem_descr = phba->init_mem; 2557 mem_descr = phba->init_mem;
2138 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2558 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
@@ -2176,7 +2596,7 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
2176 2596
2177 idx = 0; 2597 idx = 0;
2178 dataq = &phwi_context->be_def_dataq; 2598 dataq = &phwi_context->be_def_dataq;
2179 cq = &phwi_context->be_cq; 2599 cq = &phwi_context->be_cq[0];
2180 mem = &dataq->dma_mem; 2600 mem = &dataq->dma_mem;
2181 mem_descr = phba->init_mem; 2601 mem_descr = phba->init_mem;
2182 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2602 mem_descr += HWI_MEM_ASYNC_DATA_RING;
@@ -2239,6 +2659,30 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
2239 return 0; 2659 return 0;
2240} 2660}
2241 2661
2662static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2663{
2664 struct be_dma_mem *mem = &q->dma_mem;
2665 if (mem->va)
2666 pci_free_consistent(phba->pcidev, mem->size,
2667 mem->va, mem->dma);
2668}
2669
2670static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2671 u16 len, u16 entry_size)
2672{
2673 struct be_dma_mem *mem = &q->dma_mem;
2674
2675 memset(q, 0, sizeof(*q));
2676 q->len = len;
2677 q->entry_size = entry_size;
2678 mem->size = len * entry_size;
2679 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2680 if (!mem->va)
2681 return -1;
2682 memset(mem->va, 0, mem->size);
2683 return 0;
2684}
2685
2242static int 2686static int
2243beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 2687beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2244 struct hwi_context_memory *phwi_context, 2688 struct hwi_context_memory *phwi_context,
@@ -2308,7 +2752,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2308 "wrbq create failed."); 2752 "wrbq create failed.");
2309 return status; 2753 return status;
2310 } 2754 }
2311 phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id; 2755 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2756 id;
2312 } 2757 }
2313 kfree(pwrb_arr); 2758 kfree(pwrb_arr);
2314 return 0; 2759 return 0;
@@ -2328,13 +2773,29 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
2328 } 2773 }
2329} 2774}
2330 2775
2776static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2777{
2778 struct be_queue_info *q;
2779 struct be_ctrl_info *ctrl = &phba->ctrl;
2780
2781 q = &phba->ctrl.mcc_obj.q;
2782 if (q->created)
2783 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2784 be_queue_free(phba, q);
2785
2786 q = &phba->ctrl.mcc_obj.cq;
2787 if (q->created)
2788 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2789 be_queue_free(phba, q);
2790}
2791
2331static void hwi_cleanup(struct beiscsi_hba *phba) 2792static void hwi_cleanup(struct beiscsi_hba *phba)
2332{ 2793{
2333 struct be_queue_info *q; 2794 struct be_queue_info *q;
2334 struct be_ctrl_info *ctrl = &phba->ctrl; 2795 struct be_ctrl_info *ctrl = &phba->ctrl;
2335 struct hwi_controller *phwi_ctrlr; 2796 struct hwi_controller *phwi_ctrlr;
2336 struct hwi_context_memory *phwi_context; 2797 struct hwi_context_memory *phwi_context;
2337 int i; 2798 int i, eq_num;
2338 2799
2339 phwi_ctrlr = phba->phwi_ctrlr; 2800 phwi_ctrlr = phba->phwi_ctrlr;
2340 phwi_context = phwi_ctrlr->phwi_ctxt; 2801 phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -2343,7 +2804,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
2343 if (q->created) 2804 if (q->created)
2344 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 2805 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2345 } 2806 }
2346
2347 free_wrb_handles(phba); 2807 free_wrb_handles(phba);
2348 2808
2349 q = &phwi_context->be_def_hdrq; 2809 q = &phwi_context->be_def_hdrq;
@@ -2356,13 +2816,76 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
2356 2816
2357 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 2817 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2358 2818
2359 q = &phwi_context->be_cq; 2819 for (i = 0; i < (phba->num_cpus); i++) {
2360 if (q->created) 2820 q = &phwi_context->be_cq[i];
2361 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 2821 if (q->created)
2822 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2823 }
2824 if (phba->msix_enabled)
2825 eq_num = 1;
2826 else
2827 eq_num = 0;
2828 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2829 q = &phwi_context->be_eq[i].q;
2830 if (q->created)
2831 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2832 }
2833 be_mcc_queues_destroy(phba);
2834}
2362 2835
2363 q = &phwi_context->be_eq.q; 2836static int be_mcc_queues_create(struct beiscsi_hba *phba,
2364 if (q->created) 2837 struct hwi_context_memory *phwi_context)
2365 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 2838{
2839 struct be_queue_info *q, *cq;
2840 struct be_ctrl_info *ctrl = &phba->ctrl;
2841
2842 /* Alloc MCC compl queue */
2843 cq = &phba->ctrl.mcc_obj.cq;
2844 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2845 sizeof(struct be_mcc_compl)))
2846 goto err;
2847 /* Ask BE to create MCC compl queue; */
2848 if (phba->msix_enabled) {
2849 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2850 [phba->num_cpus].q, false, true, 0))
2851 goto mcc_cq_free;
2852 } else {
2853 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2854 false, true, 0))
2855 goto mcc_cq_free;
2856 }
2857
2858 /* Alloc MCC queue */
2859 q = &phba->ctrl.mcc_obj.q;
2860 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2861 goto mcc_cq_destroy;
2862
2863 /* Ask BE to create MCC queue */
2864 if (beiscsi_cmd_mccq_create(phba, q, cq))
2865 goto mcc_q_free;
2866
2867 return 0;
2868
2869mcc_q_free:
2870 be_queue_free(phba, q);
2871mcc_cq_destroy:
2872 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2873mcc_cq_free:
2874 be_queue_free(phba, cq);
2875err:
2876 return -1;
2877}
2878
2879static int find_num_cpus(void)
2880{
2881 int num_cpus = 0;
2882
2883 num_cpus = num_online_cpus();
2884 if (num_cpus >= MAX_CPUS)
2885 num_cpus = MAX_CPUS - 1;
2886
2887 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2888 return num_cpus;
2366} 2889}
2367 2890
2368static int hwi_init_port(struct beiscsi_hba *phba) 2891static int hwi_init_port(struct beiscsi_hba *phba)
@@ -2376,34 +2899,30 @@ static int hwi_init_port(struct beiscsi_hba *phba)
2376 def_pdu_ring_sz = 2899 def_pdu_ring_sz =
2377 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); 2900 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2378 phwi_ctrlr = phba->phwi_ctrlr; 2901 phwi_ctrlr = phba->phwi_ctrlr;
2379
2380 phwi_context = phwi_ctrlr->phwi_ctxt; 2902 phwi_context = phwi_ctrlr->phwi_ctxt;
2381 phwi_context->be_eq.max_eqd = 0; 2903 phwi_context->max_eqd = 0;
2382 phwi_context->be_eq.min_eqd = 0; 2904 phwi_context->min_eqd = 0;
2383 phwi_context->be_eq.cur_eqd = 64; 2905 phwi_context->cur_eqd = 64;
2384 phwi_context->be_eq.enable_aic = false;
2385 be_cmd_fw_initialize(&phba->ctrl); 2906 be_cmd_fw_initialize(&phba->ctrl);
2386 status = beiscsi_create_eq(phba, phwi_context); 2907
2908 status = beiscsi_create_eqs(phba, phwi_context);
2387 if (status != 0) { 2909 if (status != 0) {
2388 shost_printk(KERN_ERR, phba->shost, "EQ not created \n"); 2910 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2389 goto error; 2911 goto error;
2390 } 2912 }
2391 2913
2392 status = mgmt_check_supported_fw(ctrl); 2914 status = be_mcc_queues_create(phba, phwi_context);
2393 if (status != 0) { 2915 if (status != 0)
2394 shost_printk(KERN_ERR, phba->shost,
2395 "Unsupported fw version \n");
2396 goto error; 2916 goto error;
2397 }
2398 2917
2399 status = mgmt_get_fw_config(ctrl, phba); 2918 status = mgmt_check_supported_fw(ctrl, phba);
2400 if (status != 0) { 2919 if (status != 0) {
2401 shost_printk(KERN_ERR, phba->shost, 2920 shost_printk(KERN_ERR, phba->shost,
2402 "Error getting fw config\n"); 2921 "Unsupported fw version \n");
2403 goto error; 2922 goto error;
2404 } 2923 }
2405 2924
2406 status = beiscsi_create_cq(phba, phwi_context); 2925 status = beiscsi_create_cqs(phba, phwi_context);
2407 if (status != 0) { 2926 if (status != 0) {
2408 shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); 2927 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2409 goto error; 2928 goto error;
@@ -2447,7 +2966,6 @@ error:
2447 return -ENOMEM; 2966 return -ENOMEM;
2448} 2967}
2449 2968
2450
2451static int hwi_init_controller(struct beiscsi_hba *phba) 2969static int hwi_init_controller(struct beiscsi_hba *phba)
2452{ 2970{
2453 struct hwi_controller *phwi_ctrlr; 2971 struct hwi_controller *phwi_ctrlr;
@@ -2530,6 +3048,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2530 3048
2531 phba->io_sgl_hndl_avbl = 0; 3049 phba->io_sgl_hndl_avbl = 0;
2532 phba->eh_sgl_hndl_avbl = 0; 3050 phba->eh_sgl_hndl_avbl = 0;
3051
2533 mem_descr_sglh = phba->init_mem; 3052 mem_descr_sglh = phba->init_mem;
2534 mem_descr_sglh += HWI_MEM_SGLH; 3053 mem_descr_sglh += HWI_MEM_SGLH;
2535 if (1 == mem_descr_sglh->num_elements) { 3054 if (1 == mem_descr_sglh->num_elements) {
@@ -2608,7 +3127,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2608 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3127 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
2609 pfrag += phba->params.num_sge_per_io; 3128 pfrag += phba->params.num_sge_per_io;
2610 psgl_handle->sgl_index = 3129 psgl_handle->sgl_index =
2611 phba->fw_config.iscsi_cid_start + arr_index++; 3130 phba->fw_config.iscsi_icd_start + arr_index++;
2612 } 3131 }
2613 idx++; 3132 idx++;
2614 } 3133 }
@@ -2623,7 +3142,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
2623{ 3142{
2624 int i, new_cid; 3143 int i, new_cid;
2625 3144
2626 phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 3145 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
2627 GFP_KERNEL); 3146 GFP_KERNEL);
2628 if (!phba->cid_array) { 3147 if (!phba->cid_array) {
2629 shost_printk(KERN_ERR, phba->shost, 3148 shost_printk(KERN_ERR, phba->shost,
@@ -2631,7 +3150,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
2631 "hba_setup_cid_tbls\n"); 3150 "hba_setup_cid_tbls\n");
2632 return -ENOMEM; 3151 return -ENOMEM;
2633 } 3152 }
2634 phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) * 3153 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
2635 phba->params.cxns_per_ctrl * 2, GFP_KERNEL); 3154 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
2636 if (!phba->ep_array) { 3155 if (!phba->ep_array) {
2637 shost_printk(KERN_ERR, phba->shost, 3156 shost_printk(KERN_ERR, phba->shost,
@@ -2640,7 +3159,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
2640 kfree(phba->cid_array); 3159 kfree(phba->cid_array);
2641 return -ENOMEM; 3160 return -ENOMEM;
2642 } 3161 }
2643 new_cid = phba->fw_config.iscsi_icd_start; 3162 new_cid = phba->fw_config.iscsi_cid_start;
2644 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3163 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2645 phba->cid_array[i] = new_cid; 3164 phba->cid_array[i] = new_cid;
2646 new_cid += 2; 3165 new_cid += 2;
@@ -2656,13 +3175,12 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
2656 struct hwi_context_memory *phwi_context; 3175 struct hwi_context_memory *phwi_context;
2657 struct be_queue_info *eq; 3176 struct be_queue_info *eq;
2658 u8 __iomem *addr; 3177 u8 __iomem *addr;
2659 u32 reg; 3178 u32 reg, i;
2660 u32 enabled; 3179 u32 enabled;
2661 3180
2662 phwi_ctrlr = phba->phwi_ctrlr; 3181 phwi_ctrlr = phba->phwi_ctrlr;
2663 phwi_context = phwi_ctrlr->phwi_ctxt; 3182 phwi_context = phwi_ctrlr->phwi_ctxt;
2664 3183
2665 eq = &phwi_context->be_eq.q;
2666 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 3184 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
2667 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 3185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
2668 reg = ioread32(addr); 3186 reg = ioread32(addr);
@@ -2673,12 +3191,18 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
2673 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3191 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
2674 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); 3192 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
2675 iowrite32(reg, addr); 3193 iowrite32(reg, addr);
2676 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); 3194 if (!phba->msix_enabled) {
2677 3195 eq = &phwi_context->be_eq[0].q;
2678 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3196 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
2679 } else 3197 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
2680 shost_printk(KERN_WARNING, phba->shost, 3198 } else {
2681 "In hwi_enable_intr, Not Enabled \n"); 3199 for (i = 0; i <= phba->num_cpus; i++) {
3200 eq = &phwi_context->be_eq[i].q;
3201 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3202 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3203 }
3204 }
3205 }
2682 return true; 3206 return true;
2683} 3207}
2684 3208
@@ -2738,17 +3262,30 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
2738 struct hwi_context_memory *phwi_context; 3262 struct hwi_context_memory *phwi_context;
2739 struct be_queue_info *eq; 3263 struct be_queue_info *eq;
2740 struct be_eq_entry *eqe = NULL; 3264 struct be_eq_entry *eqe = NULL;
3265 int i, eq_msix;
3266 unsigned int num_processed;
2741 3267
2742 phwi_ctrlr = phba->phwi_ctrlr; 3268 phwi_ctrlr = phba->phwi_ctrlr;
2743 phwi_context = phwi_ctrlr->phwi_ctxt; 3269 phwi_context = phwi_ctrlr->phwi_ctxt;
2744 eq = &phwi_context->be_eq.q; 3270 if (phba->msix_enabled)
2745 eqe = queue_tail_node(eq); 3271 eq_msix = 1;
3272 else
3273 eq_msix = 0;
2746 3274
2747 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3275 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
2748 & EQE_VALID_MASK) { 3276 eq = &phwi_context->be_eq[i].q;
2749 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
2750 queue_tail_inc(eq);
2751 eqe = queue_tail_node(eq); 3277 eqe = queue_tail_node(eq);
3278 num_processed = 0;
3279 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3280 & EQE_VALID_MASK) {
3281 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3282 queue_tail_inc(eq);
3283 eqe = queue_tail_node(eq);
3284 num_processed++;
3285 }
3286
3287 if (num_processed)
3288 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
2752 } 3289 }
2753} 3290}
2754 3291
@@ -2760,8 +3297,9 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
2760 if (mgmt_status) 3297 if (mgmt_status)
2761 shost_printk(KERN_WARNING, phba->shost, 3298 shost_printk(KERN_WARNING, phba->shost,
2762 "mgmt_epfw_cleanup FAILED \n"); 3299 "mgmt_epfw_cleanup FAILED \n");
2763 hwi_cleanup(phba); 3300
2764 hwi_purge_eq(phba); 3301 hwi_purge_eq(phba);
3302 hwi_cleanup(phba);
2765 kfree(phba->io_sgl_hndl_base); 3303 kfree(phba->io_sgl_hndl_base);
2766 kfree(phba->eh_sgl_hndl_base); 3304 kfree(phba->eh_sgl_hndl_base);
2767 kfree(phba->cid_array); 3305 kfree(phba->cid_array);
@@ -2782,7 +3320,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
2782 * We can always use 0 here because it is reserved by libiscsi for 3320 * We can always use 0 here because it is reserved by libiscsi for
2783 * login/startup related tasks. 3321 * login/startup related tasks.
2784 */ 3322 */
2785 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0); 3323 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3324 phba->fw_config.iscsi_cid_start));
2786 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb; 3325 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
2787 memset(pwrb, 0, sizeof(*pwrb)); 3326 memset(pwrb, 0, sizeof(*pwrb));
2788 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3327 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
@@ -2846,8 +3385,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
2846 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); 3385 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
2847 3386
2848 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3387 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
2849 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << 3388 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
2850 DB_DEF_PDU_WRB_INDEX_SHIFT; 3389 << DB_DEF_PDU_WRB_INDEX_SHIFT;
2851 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3390 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
2852 3391
2853 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 3392 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
@@ -2856,7 +3395,7 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
2856static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 3395static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
2857 int *index, int *age) 3396 int *index, int *age)
2858{ 3397{
2859 *index = be32_to_cpu(itt) >> 16; 3398 *index = (int)itt;
2860 if (age) 3399 if (age)
2861 *age = conn->session->age; 3400 *age = conn->session->age;
2862} 3401}
@@ -2885,15 +3424,14 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2885 3424
2886 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 3425 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
2887 GFP_KERNEL, &paddr); 3426 GFP_KERNEL, &paddr);
2888
2889 if (!io_task->cmd_bhs) 3427 if (!io_task->cmd_bhs)
2890 return -ENOMEM; 3428 return -ENOMEM;
2891
2892 io_task->bhs_pa.u.a64.address = paddr; 3429 io_task->bhs_pa.u.a64.address = paddr;
3430 io_task->libiscsi_itt = (itt_t)task->itt;
2893 io_task->pwrb_handle = alloc_wrb_handle(phba, 3431 io_task->pwrb_handle = alloc_wrb_handle(phba,
2894 beiscsi_conn->beiscsi_conn_cid, 3432 beiscsi_conn->beiscsi_conn_cid -
2895 task->itt); 3433 phba->fw_config.iscsi_cid_start
2896 io_task->pwrb_handle->pio_handle = task; 3434 );
2897 io_task->conn = beiscsi_conn; 3435 io_task->conn = beiscsi_conn;
2898 3436
2899 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 3437 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
@@ -2905,10 +3443,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2905 spin_unlock(&phba->io_sgl_lock); 3443 spin_unlock(&phba->io_sgl_lock);
2906 if (!io_task->psgl_handle) 3444 if (!io_task->psgl_handle)
2907 goto free_hndls; 3445 goto free_hndls;
2908
2909 } else { 3446 } else {
2910 io_task->scsi_cmnd = NULL; 3447 io_task->scsi_cmnd = NULL;
2911 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 3448 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
2912 if (!beiscsi_conn->login_in_progress) { 3449 if (!beiscsi_conn->login_in_progress) {
2913 spin_lock(&phba->mgmt_sgl_lock); 3450 spin_lock(&phba->mgmt_sgl_lock);
2914 io_task->psgl_handle = (struct sgl_handle *) 3451 io_task->psgl_handle = (struct sgl_handle *)
@@ -2932,14 +3469,19 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2932 goto free_hndls; 3469 goto free_hndls;
2933 } 3470 }
2934 } 3471 }
2935 itt = (itt_t) cpu_to_be32(((unsigned int)task->itt << 16) | 3472 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
2936 (unsigned int)(io_task->psgl_handle->sgl_index)); 3473 wrb_index << 16) | (unsigned int)
3474 (io_task->psgl_handle->sgl_index));
3475 io_task->pwrb_handle->pio_handle = task;
3476
2937 io_task->cmd_bhs->iscsi_hdr.itt = itt; 3477 io_task->cmd_bhs->iscsi_hdr.itt = itt;
2938 return 0; 3478 return 0;
2939 3479
2940free_hndls: 3480free_hndls:
2941 phwi_ctrlr = phba->phwi_ctrlr; 3481 phwi_ctrlr = phba->phwi_ctrlr;
2942 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid]; 3482 pwrb_context = &phwi_ctrlr->wrb_context[
3483 beiscsi_conn->beiscsi_conn_cid -
3484 phba->fw_config.iscsi_cid_start];
2943 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 3485 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2944 io_task->pwrb_handle = NULL; 3486 io_task->pwrb_handle = NULL;
2945 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 3487 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -2959,7 +3501,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
2959 struct hwi_controller *phwi_ctrlr; 3501 struct hwi_controller *phwi_ctrlr;
2960 3502
2961 phwi_ctrlr = phba->phwi_ctrlr; 3503 phwi_ctrlr = phba->phwi_ctrlr;
2962 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid]; 3504 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3505 - phba->fw_config.iscsi_cid_start];
2963 if (io_task->pwrb_handle) { 3506 if (io_task->pwrb_handle) {
2964 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 3507 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2965 io_task->pwrb_handle = NULL; 3508 io_task->pwrb_handle = NULL;
@@ -3006,7 +3549,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3006 io_task->bhs_len = sizeof(struct be_cmd_bhs); 3549 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3007 3550
3008 if (writedir) { 3551 if (writedir) {
3009 SE_DEBUG(DBG_LVL_4, " WRITE Command \t");
3010 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48); 3552 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3011 AMAP_SET_BITS(struct amap_pdu_data_out, itt, 3553 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3012 &io_task->cmd_bhs->iscsi_data_pdu, 3554 &io_task->cmd_bhs->iscsi_data_pdu,
@@ -3016,11 +3558,12 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3016 ISCSI_OPCODE_SCSI_DATA_OUT); 3558 ISCSI_OPCODE_SCSI_DATA_OUT);
3017 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, 3559 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3018 &io_task->cmd_bhs->iscsi_data_pdu, 1); 3560 &io_task->cmd_bhs->iscsi_data_pdu, 1);
3019 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); 3561 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3562 INI_WR_CMD);
3020 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 3563 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3021 } else { 3564 } else {
3022 SE_DEBUG(DBG_LVL_4, "READ Command \t"); 3565 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3023 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); 3566 INI_RD_CMD);
3024 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 3567 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3025 } 3568 }
3026 memcpy(&io_task->cmd_bhs->iscsi_data_pdu. 3569 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
@@ -3055,15 +3598,17 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3055 3598
3056static int beiscsi_mtask(struct iscsi_task *task) 3599static int beiscsi_mtask(struct iscsi_task *task)
3057{ 3600{
3058 struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data; 3601 struct beiscsi_io_task *io_task = task->dd_data;
3059 struct iscsi_conn *conn = task->conn; 3602 struct iscsi_conn *conn = task->conn;
3060 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3603 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3061 struct beiscsi_hba *phba = beiscsi_conn->phba; 3604 struct beiscsi_hba *phba = beiscsi_conn->phba;
3062 struct iscsi_wrb *pwrb = NULL; 3605 struct iscsi_wrb *pwrb = NULL;
3063 unsigned int doorbell = 0; 3606 unsigned int doorbell = 0;
3064 struct iscsi_task *aborted_task; 3607 unsigned int cid;
3065 3608
3609 cid = beiscsi_conn->beiscsi_conn_cid;
3066 pwrb = io_task->pwrb_handle->pwrb; 3610 pwrb = io_task->pwrb_handle->pwrb;
3611 memset(pwrb, 0, sizeof(*pwrb));
3067 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 3612 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3068 be32_to_cpu(task->cmdsn)); 3613 be32_to_cpu(task->cmdsn));
3069 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 3614 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@@ -3073,40 +3618,37 @@ static int beiscsi_mtask(struct iscsi_task *task)
3073 3618
3074 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 3619 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3075 case ISCSI_OP_LOGIN: 3620 case ISCSI_OP_LOGIN:
3076 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD); 3621 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3622 TGT_DM_CMD);
3077 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3623 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3078 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 3624 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3079 hwi_write_buffer(pwrb, task); 3625 hwi_write_buffer(pwrb, task);
3080 break; 3626 break;
3081 case ISCSI_OP_NOOP_OUT: 3627 case ISCSI_OP_NOOP_OUT:
3082 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); 3628 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3629 INI_RD_CMD);
3630 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3631 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3632 else
3633 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3083 hwi_write_buffer(pwrb, task); 3634 hwi_write_buffer(pwrb, task);
3084 break; 3635 break;
3085 case ISCSI_OP_TEXT: 3636 case ISCSI_OP_TEXT:
3086 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); 3637 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3087 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 3638 TGT_DM_CMD);
3639 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3088 hwi_write_buffer(pwrb, task); 3640 hwi_write_buffer(pwrb, task);
3089 break; 3641 break;
3090 case ISCSI_OP_SCSI_TMFUNC: 3642 case ISCSI_OP_SCSI_TMFUNC:
3091 aborted_task = iscsi_itt_to_task(conn, 3643 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3092 ((struct iscsi_tm *)task->hdr)->rtt); 3644 INI_TMF_CMD);
3093 if (!aborted_task)
3094 return 0;
3095 aborted_io_task = aborted_task->dd_data;
3096 if (!aborted_io_task->scsi_cmnd)
3097 return 0;
3098
3099 mgmt_invalidate_icds(phba,
3100 aborted_io_task->psgl_handle->sgl_index,
3101 beiscsi_conn->beiscsi_conn_cid);
3102 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD);
3103 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3645 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3104 hwi_write_buffer(pwrb, task); 3646 hwi_write_buffer(pwrb, task);
3105 break; 3647 break;
3106 case ISCSI_OP_LOGOUT: 3648 case ISCSI_OP_LOGOUT:
3107 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3649 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3108 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3650 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3109 HWH_TYPE_LOGOUT); 3651 HWH_TYPE_LOGOUT);
3110 hwi_write_buffer(pwrb, task); 3652 hwi_write_buffer(pwrb, task);
3111 break; 3653 break;
3112 3654
@@ -3117,12 +3659,12 @@ static int beiscsi_mtask(struct iscsi_task *task)
3117 } 3659 }
3118 3660
3119 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 3661 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3120 be32_to_cpu(task->data_count)); 3662 task->data_count);
3121 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 3663 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3122 io_task->pwrb_handle->nxt_wrb_index); 3664 io_task->pwrb_handle->nxt_wrb_index);
3123 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 3665 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3124 3666
3125 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3667 doorbell |= cid & DB_WRB_POST_CID_MASK;
3126 doorbell |= (io_task->pwrb_handle->wrb_index & 3668 doorbell |= (io_task->pwrb_handle->wrb_index &
3127 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 3669 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3128 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3670 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
@@ -3132,17 +3674,12 @@ static int beiscsi_mtask(struct iscsi_task *task)
3132 3674
3133static int beiscsi_task_xmit(struct iscsi_task *task) 3675static int beiscsi_task_xmit(struct iscsi_task *task)
3134{ 3676{
3135 struct iscsi_conn *conn = task->conn;
3136 struct beiscsi_io_task *io_task = task->dd_data; 3677 struct beiscsi_io_task *io_task = task->dd_data;
3137 struct scsi_cmnd *sc = task->sc; 3678 struct scsi_cmnd *sc = task->sc;
3138 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3139 struct scatterlist *sg; 3679 struct scatterlist *sg;
3140 int num_sg; 3680 int num_sg;
3141 unsigned int writedir = 0, xferlen = 0; 3681 unsigned int writedir = 0, xferlen = 0;
3142 3682
3143 SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3144 "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3145 task, conn, beiscsi_conn);
3146 if (!sc) 3683 if (!sc)
3147 return beiscsi_mtask(task); 3684 return beiscsi_mtask(task);
3148 3685
@@ -3168,6 +3705,10 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
3168static void beiscsi_remove(struct pci_dev *pcidev) 3705static void beiscsi_remove(struct pci_dev *pcidev)
3169{ 3706{
3170 struct beiscsi_hba *phba = NULL; 3707 struct beiscsi_hba *phba = NULL;
3708 struct hwi_controller *phwi_ctrlr;
3709 struct hwi_context_memory *phwi_context;
3710 struct be_eq_obj *pbe_eq;
3711 unsigned int i, msix_vec;
3171 3712
3172 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 3713 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3173 if (!phba) { 3714 if (!phba) {
@@ -3175,12 +3716,24 @@ static void beiscsi_remove(struct pci_dev *pcidev)
3175 return; 3716 return;
3176 } 3717 }
3177 3718
3719 phwi_ctrlr = phba->phwi_ctrlr;
3720 phwi_context = phwi_ctrlr->phwi_ctxt;
3178 hwi_disable_intr(phba); 3721 hwi_disable_intr(phba);
3179 if (phba->pcidev->irq) 3722 if (phba->msix_enabled) {
3180 free_irq(phba->pcidev->irq, phba); 3723 for (i = 0; i <= phba->num_cpus; i++) {
3724 msix_vec = phba->msix_entries[i].vector;
3725 free_irq(msix_vec, &phwi_context->be_eq[i]);
3726 }
3727 } else
3728 if (phba->pcidev->irq)
3729 free_irq(phba->pcidev->irq, phba);
3730 pci_disable_msix(phba->pcidev);
3181 destroy_workqueue(phba->wq); 3731 destroy_workqueue(phba->wq);
3182 if (blk_iopoll_enabled) 3732 if (blk_iopoll_enabled)
3183 blk_iopoll_disable(&phba->iopoll); 3733 for (i = 0; i < phba->num_cpus; i++) {
3734 pbe_eq = &phwi_context->be_eq[i];
3735 blk_iopoll_disable(&pbe_eq->iopoll);
3736 }
3184 3737
3185 beiscsi_clean_port(phba); 3738 beiscsi_clean_port(phba);
3186 beiscsi_free_mem(phba); 3739 beiscsi_free_mem(phba);
@@ -3194,11 +3747,29 @@ static void beiscsi_remove(struct pci_dev *pcidev)
3194 iscsi_host_free(phba->shost); 3747 iscsi_host_free(phba->shost);
3195} 3748}
3196 3749
3750static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3751{
3752 int i, status;
3753
3754 for (i = 0; i <= phba->num_cpus; i++)
3755 phba->msix_entries[i].entry = i;
3756
3757 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3758 (phba->num_cpus + 1));
3759 if (!status)
3760 phba->msix_enabled = true;
3761
3762 return;
3763}
3764
3197static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, 3765static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3198 const struct pci_device_id *id) 3766 const struct pci_device_id *id)
3199{ 3767{
3200 struct beiscsi_hba *phba = NULL; 3768 struct beiscsi_hba *phba = NULL;
3201 int ret; 3769 struct hwi_controller *phwi_ctrlr;
3770 struct hwi_context_memory *phwi_context;
3771 struct be_eq_obj *pbe_eq;
3772 int ret, msix_vec, num_cpus, i;
3202 3773
3203 ret = beiscsi_enable_pci(pcidev); 3774 ret = beiscsi_enable_pci(pcidev);
3204 if (ret < 0) { 3775 if (ret < 0) {
@@ -3214,7 +3785,29 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3214 goto disable_pci; 3785 goto disable_pci;
3215 } 3786 }
3216 3787
3217 pci_set_drvdata(pcidev, phba); 3788 switch (pcidev->device) {
3789 case BE_DEVICE_ID1:
3790 case OC_DEVICE_ID1:
3791 case OC_DEVICE_ID2:
3792 phba->generation = BE_GEN2;
3793 break;
3794 case BE_DEVICE_ID2:
3795 case OC_DEVICE_ID3:
3796 phba->generation = BE_GEN3;
3797 break;
3798 default:
3799 phba->generation = 0;
3800 }
3801
3802 if (enable_msix)
3803 num_cpus = find_num_cpus();
3804 else
3805 num_cpus = 1;
3806 phba->num_cpus = num_cpus;
3807 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3808
3809 if (enable_msix)
3810 beiscsi_msix_enable(phba);
3218 ret = be_ctrl_init(phba, pcidev); 3811 ret = be_ctrl_init(phba, pcidev);
3219 if (ret) { 3812 if (ret) {
3220 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3813 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3225,7 +3818,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3225 spin_lock_init(&phba->io_sgl_lock); 3818 spin_lock_init(&phba->io_sgl_lock);
3226 spin_lock_init(&phba->mgmt_sgl_lock); 3819 spin_lock_init(&phba->mgmt_sgl_lock);
3227 spin_lock_init(&phba->isr_lock); 3820 spin_lock_init(&phba->isr_lock);
3821 ret = mgmt_get_fw_config(&phba->ctrl, phba);
3822 if (ret != 0) {
3823 shost_printk(KERN_ERR, phba->shost,
3824 "Error getting fw config\n");
3825 goto free_port;
3826 }
3827 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3228 beiscsi_get_params(phba); 3828 beiscsi_get_params(phba);
3829 phba->shost->can_queue = phba->params.ios_per_ctrl;
3229 ret = beiscsi_init_port(phba); 3830 ret = beiscsi_init_port(phba);
3230 if (ret < 0) { 3831 if (ret < 0) {
3231 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3832 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3233,9 +3834,18 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3233 goto free_port; 3834 goto free_port;
3234 } 3835 }
3235 3836
3837 for (i = 0; i < MAX_MCC_CMD ; i++) {
3838 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3839 phba->ctrl.mcc_tag[i] = i + 1;
3840 phba->ctrl.mcc_numtag[i + 1] = 0;
3841 phba->ctrl.mcc_tag_available++;
3842 }
3843
3844 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3845
3236 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", 3846 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3237 phba->shost->host_no); 3847 phba->shost->host_no);
3238 phba->wq = create_singlethread_workqueue(phba->wq_name); 3848 phba->wq = create_workqueue(phba->wq_name);
3239 if (!phba->wq) { 3849 if (!phba->wq) {
3240 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3850 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3241 "Failed to allocate work queue\n"); 3851 "Failed to allocate work queue\n");
@@ -3244,11 +3854,16 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3244 3854
3245 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs); 3855 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3246 3856
3857 phwi_ctrlr = phba->phwi_ctrlr;
3858 phwi_context = phwi_ctrlr->phwi_ctxt;
3247 if (blk_iopoll_enabled) { 3859 if (blk_iopoll_enabled) {
3248 blk_iopoll_init(&phba->iopoll, be_iopoll_budget, be_iopoll); 3860 for (i = 0; i < phba->num_cpus; i++) {
3249 blk_iopoll_enable(&phba->iopoll); 3861 pbe_eq = &phwi_context->be_eq[i];
3862 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3863 be_iopoll);
3864 blk_iopoll_enable(&pbe_eq->iopoll);
3865 }
3250 } 3866 }
3251
3252 ret = beiscsi_init_irqs(phba); 3867 ret = beiscsi_init_irqs(phba);
3253 if (ret < 0) { 3868 if (ret < 0) {
3254 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3869 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3261,17 +3876,26 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3261 "Failed to hwi_enable_intr\n"); 3876 "Failed to hwi_enable_intr\n");
3262 goto free_ctrlr; 3877 goto free_ctrlr;
3263 } 3878 }
3264
3265 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n"); 3879 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3266 return 0; 3880 return 0;
3267 3881
3268free_ctrlr: 3882free_ctrlr:
3269 if (phba->pcidev->irq) 3883 if (phba->msix_enabled) {
3270 free_irq(phba->pcidev->irq, phba); 3884 for (i = 0; i <= phba->num_cpus; i++) {
3885 msix_vec = phba->msix_entries[i].vector;
3886 free_irq(msix_vec, &phwi_context->be_eq[i]);
3887 }
3888 } else
3889 if (phba->pcidev->irq)
3890 free_irq(phba->pcidev->irq, phba);
3891 pci_disable_msix(phba->pcidev);
3271free_blkenbld: 3892free_blkenbld:
3272 destroy_workqueue(phba->wq); 3893 destroy_workqueue(phba->wq);
3273 if (blk_iopoll_enabled) 3894 if (blk_iopoll_enabled)
3274 blk_iopoll_disable(&phba->iopoll); 3895 for (i = 0; i < phba->num_cpus; i++) {
3896 pbe_eq = &phwi_context->be_eq[i];
3897 blk_iopoll_disable(&pbe_eq->iopoll);
3898 }
3275free_twq: 3899free_twq:
3276 beiscsi_clean_port(phba); 3900 beiscsi_clean_port(phba);
3277 beiscsi_free_mem(phba); 3901 beiscsi_free_mem(phba);
@@ -3293,7 +3917,7 @@ disable_pci:
3293struct iscsi_transport beiscsi_iscsi_transport = { 3917struct iscsi_transport beiscsi_iscsi_transport = {
3294 .owner = THIS_MODULE, 3918 .owner = THIS_MODULE,
3295 .name = DRV_NAME, 3919 .name = DRV_NAME,
3296 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | 3920 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3297 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 3921 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3298 .param_mask = ISCSI_MAX_RECV_DLENGTH | 3922 .param_mask = ISCSI_MAX_RECV_DLENGTH |
3299 ISCSI_MAX_XMIT_DLENGTH | 3923 ISCSI_MAX_XMIT_DLENGTH |
@@ -3351,6 +3975,7 @@ static struct pci_driver beiscsi_pci_driver = {
3351 .id_table = beiscsi_pci_id_table 3975 .id_table = beiscsi_pci_id_table
3352}; 3976};
3353 3977
3978
3354static int __init beiscsi_module_init(void) 3979static int __init beiscsi_module_init(void)
3355{ 3980{
3356 int ret; 3981 int ret;
@@ -3361,7 +3986,7 @@ static int __init beiscsi_module_init(void)
3361 SE_DEBUG(DBG_LVL_1, 3986 SE_DEBUG(DBG_LVL_1,
3362 "beiscsi_module_init - Unable to register beiscsi" 3987 "beiscsi_module_init - Unable to register beiscsi"
3363 "transport.\n"); 3988 "transport.\n");
3364 ret = -ENOMEM; 3989 return -ENOMEM;
3365 } 3990 }
3366 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n", 3991 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3367 &beiscsi_iscsi_transport); 3992 &beiscsi_iscsi_transport);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 53c9b70ac7ac..87ec21280a37 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -21,11 +21,9 @@
21#ifndef _BEISCSI_MAIN_ 21#ifndef _BEISCSI_MAIN_
22#define _BEISCSI_MAIN_ 22#define _BEISCSI_MAIN_
23 23
24
25#include <linux/kernel.h> 24#include <linux/kernel.h>
26#include <linux/pci.h> 25#include <linux/pci.h>
27#include <linux/in.h> 26#include <linux/in.h>
28#include <linux/blk-iopoll.h>
29#include <scsi/scsi.h> 27#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
@@ -35,39 +33,36 @@
35#include <scsi/scsi_transport_iscsi.h> 33#include <scsi/scsi_transport_iscsi.h>
36 34
37#include "be.h" 35#include "be.h"
38
39
40
41#define DRV_NAME "be2iscsi" 36#define DRV_NAME "be2iscsi"
42#define BUILD_STR "2.0.527.0" 37#define BUILD_STR "2.0.527.0"
43
44#define BE_NAME "ServerEngines BladeEngine2" \ 38#define BE_NAME "ServerEngines BladeEngine2" \
45 "Linux iSCSI Driver version" BUILD_STR 39 "Linux iSCSI Driver version" BUILD_STR
46#define DRV_DESC BE_NAME " " "Driver" 40#define DRV_DESC BE_NAME " " "Driver"
47 41
48#define BE_VENDOR_ID 0x19A2 42#define BE_VENDOR_ID 0x19A2
43/* DEVICE ID's for BE2 */
49#define BE_DEVICE_ID1 0x212 44#define BE_DEVICE_ID1 0x212
50#define OC_DEVICE_ID1 0x702 45#define OC_DEVICE_ID1 0x702
51#define OC_DEVICE_ID2 0x703 46#define OC_DEVICE_ID2 0x703
52 47
53#define BE2_MAX_SESSIONS 64 48/* DEVICE ID's for BE3 */
49#define BE_DEVICE_ID2 0x222
50#define OC_DEVICE_ID3 0x712
51
52#define BE2_IO_DEPTH 1024
53#define BE2_MAX_SESSIONS 256
54#define BE2_CMDS_PER_CXN 128 54#define BE2_CMDS_PER_CXN 128
55#define BE2_LOGOUTS BE2_MAX_SESSIONS
56#define BE2_TMFS 16 55#define BE2_TMFS 16
57#define BE2_NOPOUT_REQ 16 56#define BE2_NOPOUT_REQ 16
58#define BE2_ASYNCPDUS BE2_MAX_SESSIONS
59#define BE2_MAX_ICDS 2048
60#define BE2_SGE 32 57#define BE2_SGE 32
61#define BE2_DEFPDU_HDR_SZ 64 58#define BE2_DEFPDU_HDR_SZ 64
62#define BE2_DEFPDU_DATA_SZ 8192 59#define BE2_DEFPDU_DATA_SZ 8192
63#define BE2_IO_DEPTH \
64 (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
65 60
66#define BEISCSI_SGLIST_ELEMENTS BE2_SGE 61#define MAX_CPUS 31
62#define BEISCSI_SGLIST_ELEMENTS 30
67 63
68#define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */
69#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ 64#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
70#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ 65#define BEISCSI_MAX_SECTORS 256 /* scsi_host->max_sectors */
71 66
72#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ 67#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
73#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ 68#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@@ -79,7 +74,7 @@
79#define BE_SENSE_INFO_SIZE 258 74#define BE_SENSE_INFO_SIZE 258
80#define BE_ISCSI_PDU_HEADER_SIZE 64 75#define BE_ISCSI_PDU_HEADER_SIZE 64
81#define BE_MIN_MEM_SIZE 16384 76#define BE_MIN_MEM_SIZE 16384
82 77#define MAX_CMD_SZ 65536
83#define IIOC_SCSI_DATA 0x05 /* Write Operation */ 78#define IIOC_SCSI_DATA 0x05 /* Write Operation */
84 79
85#define DBG_LVL 0x00000001 80#define DBG_LVL 0x00000001
@@ -100,6 +95,8 @@ do { \
100 } \ 95 } \
101} while (0); 96} while (0);
102 97
98#define BE_ADAPTER_UP 0x00000000
99#define BE_ADAPTER_LINK_DOWN 0x00000001
103/** 100/**
104 * hardware needs the async PDU buffers to be posted in multiples of 8 101 * hardware needs the async PDU buffers to be posted in multiples of 8
105 * So have atleast 8 of them by default 102 * So have atleast 8 of them by default
@@ -160,21 +157,19 @@ do { \
160 157
161enum be_mem_enum { 158enum be_mem_enum {
162 HWI_MEM_ADDN_CONTEXT, 159 HWI_MEM_ADDN_CONTEXT,
163 HWI_MEM_CQ,
164 HWI_MEM_EQ,
165 HWI_MEM_WRB, 160 HWI_MEM_WRB,
166 HWI_MEM_WRBH, 161 HWI_MEM_WRBH,
167 HWI_MEM_SGLH, /* 5 */ 162 HWI_MEM_SGLH,
168 HWI_MEM_SGE, 163 HWI_MEM_SGE,
169 HWI_MEM_ASYNC_HEADER_BUF, 164 HWI_MEM_ASYNC_HEADER_BUF, /* 5 */
170 HWI_MEM_ASYNC_DATA_BUF, 165 HWI_MEM_ASYNC_DATA_BUF,
171 HWI_MEM_ASYNC_HEADER_RING, 166 HWI_MEM_ASYNC_HEADER_RING,
172 HWI_MEM_ASYNC_DATA_RING, /* 10 */ 167 HWI_MEM_ASYNC_DATA_RING,
173 HWI_MEM_ASYNC_HEADER_HANDLE, 168 HWI_MEM_ASYNC_HEADER_HANDLE,
174 HWI_MEM_ASYNC_DATA_HANDLE, 169 HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */
175 HWI_MEM_ASYNC_PDU_CONTEXT, 170 HWI_MEM_ASYNC_PDU_CONTEXT,
176 ISCSI_MEM_GLOBAL_HEADER, 171 ISCSI_MEM_GLOBAL_HEADER,
177 SE_MEM_MAX /* 15 */ 172 SE_MEM_MAX
178}; 173};
179 174
180struct be_bus_address32 { 175struct be_bus_address32 {
@@ -212,6 +207,9 @@ struct be_mem_descriptor {
212 207
213struct sgl_handle { 208struct sgl_handle {
214 unsigned int sgl_index; 209 unsigned int sgl_index;
210 unsigned int type;
211 unsigned int cid;
212 struct iscsi_task *task;
215 struct iscsi_sge *pfrag; 213 struct iscsi_sge *pfrag;
216}; 214};
217 215
@@ -259,6 +257,11 @@ struct hba_parameters {
259 unsigned int num_sge; 257 unsigned int num_sge;
260}; 258};
261 259
260struct invalidate_command_table {
261 unsigned short icd;
262 unsigned short cid;
263} __packed;
264
262struct beiscsi_hba { 265struct beiscsi_hba {
263 struct hba_parameters params; 266 struct hba_parameters params;
264 struct hwi_controller *phwi_ctrlr; 267 struct hwi_controller *phwi_ctrlr;
@@ -274,13 +277,17 @@ struct beiscsi_hba {
274 struct pci_dev *pcidev; 277 struct pci_dev *pcidev;
275 unsigned int state; 278 unsigned int state;
276 unsigned short asic_revision; 279 unsigned short asic_revision;
277 struct blk_iopoll iopoll; 280 unsigned int num_cpus;
281 unsigned int nxt_cqid;
282 struct msix_entry msix_entries[MAX_CPUS + 1];
283 bool msix_enabled;
278 struct be_mem_descriptor *init_mem; 284 struct be_mem_descriptor *init_mem;
279 285
280 unsigned short io_sgl_alloc_index; 286 unsigned short io_sgl_alloc_index;
281 unsigned short io_sgl_free_index; 287 unsigned short io_sgl_free_index;
282 unsigned short io_sgl_hndl_avbl; 288 unsigned short io_sgl_hndl_avbl;
283 struct sgl_handle **io_sgl_hndl_base; 289 struct sgl_handle **io_sgl_hndl_base;
290 struct sgl_handle **sgl_hndl_array;
284 291
285 unsigned short eh_sgl_alloc_index; 292 unsigned short eh_sgl_alloc_index;
286 unsigned short eh_sgl_free_index; 293 unsigned short eh_sgl_free_index;
@@ -315,6 +322,7 @@ struct beiscsi_hba {
315 unsigned short cid_alloc; 322 unsigned short cid_alloc;
316 unsigned short cid_free; 323 unsigned short cid_free;
317 unsigned short avlbl_cids; 324 unsigned short avlbl_cids;
325 unsigned short iscsi_features;
318 spinlock_t cid_lock; 326 spinlock_t cid_lock;
319 } fw_config; 327 } fw_config;
320 328
@@ -325,6 +333,9 @@ struct beiscsi_hba {
325 struct workqueue_struct *wq; /* The actuak work queue */ 333 struct workqueue_struct *wq; /* The actuak work queue */
326 struct work_struct work_cqs; /* The work being queued */ 334 struct work_struct work_cqs; /* The work being queued */
327 struct be_ctrl_info ctrl; 335 struct be_ctrl_info ctrl;
336 unsigned int generation;
337 struct invalidate_command_table inv_tbl[128];
338
328}; 339};
329 340
330struct beiscsi_session { 341struct beiscsi_session {
@@ -343,6 +354,7 @@ struct beiscsi_conn {
343 unsigned short login_in_progress; 354 unsigned short login_in_progress;
344 struct sgl_handle *plogin_sgl_handle; 355 struct sgl_handle *plogin_sgl_handle;
345 struct beiscsi_session *beiscsi_sess; 356 struct beiscsi_session *beiscsi_sess;
357 struct iscsi_task *task;
346}; 358};
347 359
348/* This structure is used by the chip */ 360/* This structure is used by the chip */
@@ -390,7 +402,7 @@ struct beiscsi_io_task {
390 unsigned int flags; 402 unsigned int flags;
391 unsigned short cid; 403 unsigned short cid;
392 unsigned short header_len; 404 unsigned short header_len;
393 405 itt_t libiscsi_itt;
394 struct be_cmd_bhs *cmd_bhs; 406 struct be_cmd_bhs *cmd_bhs;
395 struct be_bus_address bhs_pa; 407 struct be_bus_address bhs_pa;
396 unsigned short bhs_len; 408 unsigned short bhs_len;
@@ -486,8 +498,6 @@ struct hwi_async_entry {
486 struct list_head data_busy_list; 498 struct list_head data_busy_list;
487}; 499};
488 500
489#define BE_MIN_ASYNC_ENTRIES 128
490
491struct hwi_async_pdu_context { 501struct hwi_async_pdu_context {
492 struct { 502 struct {
493 struct be_bus_address pa_base; 503 struct be_bus_address pa_base;
@@ -528,7 +538,7 @@ struct hwi_async_pdu_context {
528 * This is a varying size list! Do not add anything 538 * This is a varying size list! Do not add anything
529 * after this entry!! 539 * after this entry!!
530 */ 540 */
531 struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES]; 541 struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
532}; 542};
533 543
534#define PDUCQE_CODE_MASK 0x0000003F 544#define PDUCQE_CODE_MASK 0x0000003F
@@ -599,7 +609,6 @@ struct amap_cq_db {
599 609
600void beiscsi_process_eq(struct beiscsi_hba *phba); 610void beiscsi_process_eq(struct beiscsi_hba *phba);
601 611
602
603struct iscsi_wrb { 612struct iscsi_wrb {
604 u32 dw[16]; 613 u32 dw[16];
605} __packed; 614} __packed;
@@ -651,11 +660,12 @@ struct amap_iscsi_wrb {
651 660
652} __packed; 661} __packed;
653 662
654struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 663struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
655 int index);
656void 664void
657free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); 665free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
658 666
667void beiscsi_process_all_cqs(struct work_struct *work);
668
659struct pdu_nop_out { 669struct pdu_nop_out {
660 u32 dw[12]; 670 u32 dw[12];
661}; 671};
@@ -797,7 +807,6 @@ struct hwi_controller {
797 struct be_ring default_pdu_hdr; 807 struct be_ring default_pdu_hdr;
798 struct be_ring default_pdu_data; 808 struct be_ring default_pdu_data;
799 struct hwi_context_memory *phwi_ctxt; 809 struct hwi_context_memory *phwi_ctxt;
800 unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN];
801}; 810};
802 811
803enum hwh_type_enum { 812enum hwh_type_enum {
@@ -820,10 +829,12 @@ struct wrb_handle {
820}; 829};
821 830
822struct hwi_context_memory { 831struct hwi_context_memory {
823 struct be_eq_obj be_eq; 832 /* Adaptive interrupt coalescing (AIC) info */
824 struct be_queue_info be_cq; 833 u16 min_eqd; /* in usecs */
825 struct be_queue_info be_mcc_cq; 834 u16 max_eqd; /* in usecs */
826 struct be_queue_info be_mcc; 835 u16 cur_eqd; /* in usecs */
836 struct be_eq_obj be_eq[MAX_CPUS];
837 struct be_queue_info be_cq[MAX_CPUS];
827 838
828 struct be_queue_info be_def_hdrq; 839 struct be_queue_info be_def_hdrq;
829 struct be_queue_info be_def_dataq; 840 struct be_queue_info be_def_dataq;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 12e644fc746e..e641922f20bc 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -35,7 +35,6 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
35 35
36 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 36 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
37 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); 37 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
38
39 status = be_mbox_notify(ctrl); 38 status = be_mbox_notify(ctrl);
40 if (!status) { 39 if (!status) {
41 struct be_fw_cfg *pfw_cfg; 40 struct be_fw_cfg *pfw_cfg;
@@ -49,6 +48,14 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
49 pfw_cfg->ulp[0].sq_base; 48 pfw_cfg->ulp[0].sq_base;
50 phba->fw_config.iscsi_cid_count = 49 phba->fw_config.iscsi_cid_count =
51 pfw_cfg->ulp[0].sq_count; 50 pfw_cfg->ulp[0].sq_count;
51 if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
52 SE_DEBUG(DBG_LVL_8,
53 "FW reported MAX CXNS as %d \t"
54 "Max Supported = %d.\n",
55 phba->fw_config.iscsi_cid_count,
56 BE2_MAX_SESSIONS);
57 phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
58 }
52 } else { 59 } else {
53 shost_printk(KERN_WARNING, phba->shost, 60 shost_printk(KERN_WARNING, phba->shost,
54 "Failed in mgmt_get_fw_config \n"); 61 "Failed in mgmt_get_fw_config \n");
@@ -58,7 +65,8 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
58 return status; 65 return status;
59} 66}
60 67
61unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl) 68unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
69 struct beiscsi_hba *phba)
62{ 70{
63 struct be_dma_mem nonemb_cmd; 71 struct be_dma_mem nonemb_cmd;
64 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 72 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -77,6 +85,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
77 } 85 }
78 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); 86 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
79 req = nonemb_cmd.va; 87 req = nonemb_cmd.va;
88 memset(req, 0, sizeof(*req));
80 spin_lock(&ctrl->mbox_lock); 89 spin_lock(&ctrl->mbox_lock);
81 memset(wrb, 0, sizeof(*wrb)); 90 memset(wrb, 0, sizeof(*wrb));
82 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); 91 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
@@ -85,7 +94,6 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
85 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); 94 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
86 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); 95 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
87 sge->len = cpu_to_le32(nonemb_cmd.size); 96 sge->len = cpu_to_le32(nonemb_cmd.size);
88
89 status = be_mbox_notify(ctrl); 97 status = be_mbox_notify(ctrl);
90 if (!status) { 98 if (!status) {
91 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va; 99 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
@@ -95,21 +103,25 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
95 resp->params.hba_attribs.firmware_version_string); 103 resp->params.hba_attribs.firmware_version_string);
96 SE_DEBUG(DBG_LVL_8, 104 SE_DEBUG(DBG_LVL_8,
97 "Developer Build, not performing version check...\n"); 105 "Developer Build, not performing version check...\n");
98 106 phba->fw_config.iscsi_features =
107 resp->params.hba_attribs.iscsi_features;
108 SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n",
109 phba->fw_config.iscsi_features);
99 } else 110 } else
100 SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n"); 111 SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
112 spin_unlock(&ctrl->mbox_lock);
101 if (nonemb_cmd.va) 113 if (nonemb_cmd.va)
102 pci_free_consistent(ctrl->pdev, nonemb_cmd.size, 114 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
103 nonemb_cmd.va, nonemb_cmd.dma); 115 nonemb_cmd.va, nonemb_cmd.dma);
104 116
105 spin_unlock(&ctrl->mbox_lock);
106 return status; 117 return status;
107} 118}
108 119
120
109unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) 121unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
110{ 122{
111 struct be_ctrl_info *ctrl = &phba->ctrl; 123 struct be_ctrl_info *ctrl = &phba->ctrl;
112 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 124 struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
113 struct iscsi_cleanup_req *req = embedded_payload(wrb); 125 struct iscsi_cleanup_req *req = embedded_payload(wrb);
114 int status = 0; 126 int status = 0;
115 127
@@ -124,7 +136,7 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
124 req->hdr_ring_id = 0; 136 req->hdr_ring_id = 0;
125 req->data_ring_id = 0; 137 req->data_ring_id = 0;
126 138
127 status = be_mbox_notify(ctrl); 139 status = be_mcc_notify_wait(phba);
128 if (status) 140 if (status)
129 shost_printk(KERN_WARNING, phba->shost, 141 shost_printk(KERN_WARNING, phba->shost,
130 " mgmt_epfw_cleanup , FAILED\n"); 142 " mgmt_epfw_cleanup , FAILED\n");
@@ -133,14 +145,22 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
133} 145}
134 146
135unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, 147unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
136 unsigned int icd, unsigned int cid) 148 struct invalidate_command_table *inv_tbl,
149 unsigned int num_invalidate, unsigned int cid)
137{ 150{
138 struct be_dma_mem nonemb_cmd; 151 struct be_dma_mem nonemb_cmd;
139 struct be_ctrl_info *ctrl = &phba->ctrl; 152 struct be_ctrl_info *ctrl = &phba->ctrl;
140 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 153 struct be_mcc_wrb *wrb;
141 struct be_sge *sge = nonembedded_sgl(wrb); 154 struct be_sge *sge;
142 struct invalidate_commands_params_in *req; 155 struct invalidate_commands_params_in *req;
143 int status = 0; 156 unsigned int i, tag = 0;
157
158 spin_lock(&ctrl->mbox_lock);
159 tag = alloc_mcc_tag(phba);
160 if (!tag) {
161 spin_unlock(&ctrl->mbox_lock);
162 return tag;
163 }
144 164
145 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev, 165 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
146 sizeof(struct invalidate_commands_params_in), 166 sizeof(struct invalidate_commands_params_in),
@@ -149,12 +169,15 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
149 SE_DEBUG(DBG_LVL_1, 169 SE_DEBUG(DBG_LVL_1,
150 "Failed to allocate memory for" 170 "Failed to allocate memory for"
151 "mgmt_invalidate_icds \n"); 171 "mgmt_invalidate_icds \n");
172 spin_unlock(&ctrl->mbox_lock);
152 return -1; 173 return -1;
153 } 174 }
154 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 175 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
155 req = nonemb_cmd.va; 176 req = nonemb_cmd.va;
156 spin_lock(&ctrl->mbox_lock); 177 memset(req, 0, sizeof(*req));
157 memset(wrb, 0, sizeof(*wrb)); 178 wrb = wrb_from_mccq(phba);
179 sge = nonembedded_sgl(wrb);
180 wrb->tag0 |= tag;
158 181
159 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); 182 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
160 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 183 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -162,21 +185,22 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
162 sizeof(*req)); 185 sizeof(*req));
163 req->ref_handle = 0; 186 req->ref_handle = 0;
164 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; 187 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
165 req->icd_count = 0; 188 for (i = 0; i < num_invalidate; i++) {
166 req->table[req->icd_count].icd = icd; 189 req->table[i].icd = inv_tbl->icd;
167 req->table[req->icd_count].cid = cid; 190 req->table[i].cid = inv_tbl->cid;
191 req->icd_count++;
192 inv_tbl++;
193 }
168 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); 194 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
169 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); 195 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
170 sge->len = cpu_to_le32(nonemb_cmd.size); 196 sge->len = cpu_to_le32(nonemb_cmd.size);
171 197
172 status = be_mbox_notify(ctrl); 198 be_mcc_notify(phba);
173 if (status)
174 SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
175 spin_unlock(&ctrl->mbox_lock); 199 spin_unlock(&ctrl->mbox_lock);
176 if (nonemb_cmd.va) 200 if (nonemb_cmd.va)
177 pci_free_consistent(ctrl->pdev, nonemb_cmd.size, 201 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
178 nonemb_cmd.va, nonemb_cmd.dma); 202 nonemb_cmd.va, nonemb_cmd.dma);
179 return status; 203 return tag;
180} 204}
181 205
182unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, 206unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
@@ -186,13 +210,19 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
186 unsigned short savecfg_flag) 210 unsigned short savecfg_flag)
187{ 211{
188 struct be_ctrl_info *ctrl = &phba->ctrl; 212 struct be_ctrl_info *ctrl = &phba->ctrl;
189 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 213 struct be_mcc_wrb *wrb;
190 struct iscsi_invalidate_connection_params_in *req = 214 struct iscsi_invalidate_connection_params_in *req;
191 embedded_payload(wrb); 215 unsigned int tag = 0;
192 int status = 0;
193 216
194 spin_lock(&ctrl->mbox_lock); 217 spin_lock(&ctrl->mbox_lock);
195 memset(wrb, 0, sizeof(*wrb)); 218 tag = alloc_mcc_tag(phba);
219 if (!tag) {
220 spin_unlock(&ctrl->mbox_lock);
221 return tag;
222 }
223 wrb = wrb_from_mccq(phba);
224 wrb->tag0 |= tag;
225 req = embedded_payload(wrb);
196 226
197 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 227 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
198 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, 228 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
@@ -205,35 +235,37 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
205 else 235 else
206 req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; 236 req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
207 req->save_cfg = savecfg_flag; 237 req->save_cfg = savecfg_flag;
208 status = be_mbox_notify(ctrl); 238 be_mcc_notify(phba);
209 if (status)
210 SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
211
212 spin_unlock(&ctrl->mbox_lock); 239 spin_unlock(&ctrl->mbox_lock);
213 return status; 240 return tag;
214} 241}
215 242
216unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, 243unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
217 unsigned short cid, unsigned int upload_flag) 244 unsigned short cid, unsigned int upload_flag)
218{ 245{
219 struct be_ctrl_info *ctrl = &phba->ctrl; 246 struct be_ctrl_info *ctrl = &phba->ctrl;
220 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 247 struct be_mcc_wrb *wrb;
221 struct tcp_upload_params_in *req = embedded_payload(wrb); 248 struct tcp_upload_params_in *req;
222 int status = 0; 249 unsigned int tag = 0;
223 250
224 spin_lock(&ctrl->mbox_lock); 251 spin_lock(&ctrl->mbox_lock);
225 memset(wrb, 0, sizeof(*wrb)); 252 tag = alloc_mcc_tag(phba);
253 if (!tag) {
254 spin_unlock(&ctrl->mbox_lock);
255 return tag;
256 }
257 wrb = wrb_from_mccq(phba);
258 req = embedded_payload(wrb);
259 wrb->tag0 |= tag;
226 260
227 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 261 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
228 be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, 262 be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
229 OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); 263 OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
230 req->id = (unsigned short)cid; 264 req->id = (unsigned short)cid;
231 req->upload_type = (unsigned char)upload_flag; 265 req->upload_type = (unsigned char)upload_flag;
232 status = be_mbox_notify(ctrl); 266 be_mcc_notify(phba);
233 if (status)
234 SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
235 spin_unlock(&ctrl->mbox_lock); 267 spin_unlock(&ctrl->mbox_lock);
236 return status; 268 return tag;
237} 269}
238 270
239int mgmt_open_connection(struct beiscsi_hba *phba, 271int mgmt_open_connection(struct beiscsi_hba *phba,
@@ -245,13 +277,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
245 struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; 277 struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
246 struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; 278 struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
247 struct be_ctrl_info *ctrl = &phba->ctrl; 279 struct be_ctrl_info *ctrl = &phba->ctrl;
248 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 280 struct be_mcc_wrb *wrb;
249 struct tcp_connect_and_offload_in *req = embedded_payload(wrb); 281 struct tcp_connect_and_offload_in *req;
250 unsigned short def_hdr_id; 282 unsigned short def_hdr_id;
251 unsigned short def_data_id; 283 unsigned short def_data_id;
252 struct phys_addr template_address = { 0, 0 }; 284 struct phys_addr template_address = { 0, 0 };
253 struct phys_addr *ptemplate_address; 285 struct phys_addr *ptemplate_address;
254 int status = 0; 286 unsigned int tag = 0;
287 unsigned int i;
255 unsigned short cid = beiscsi_ep->ep_cid; 288 unsigned short cid = beiscsi_ep->ep_cid;
256 289
257 phwi_ctrlr = phba->phwi_ctrlr; 290 phwi_ctrlr = phba->phwi_ctrlr;
@@ -262,7 +295,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
262 ptemplate_address = &template_address; 295 ptemplate_address = &template_address;
263 ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); 296 ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
264 spin_lock(&ctrl->mbox_lock); 297 spin_lock(&ctrl->mbox_lock);
265 memset(wrb, 0, sizeof(*wrb)); 298 tag = alloc_mcc_tag(phba);
299 if (!tag) {
300 spin_unlock(&ctrl->mbox_lock);
301 return tag;
302 }
303 wrb = wrb_from_mccq(phba);
304 req = embedded_payload(wrb);
305 wrb->tag0 |= tag;
266 306
267 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 307 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
268 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 308 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -296,26 +336,47 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
296 336
297 } 337 }
298 req->cid = cid; 338 req->cid = cid;
299 req->cq_id = phwi_context->be_cq.id; 339 i = phba->nxt_cqid++;
340 if (phba->nxt_cqid == phba->num_cpus)
341 phba->nxt_cqid = 0;
342 req->cq_id = phwi_context->be_cq[i].id;
343 SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d \n", i, req->cq_id);
300 req->defq_id = def_hdr_id; 344 req->defq_id = def_hdr_id;
301 req->hdr_ring_id = def_hdr_id; 345 req->hdr_ring_id = def_hdr_id;
302 req->data_ring_id = def_data_id; 346 req->data_ring_id = def_data_id;
303 req->do_offload = 1; 347 req->do_offload = 1;
304 req->dataout_template_pa.lo = ptemplate_address->lo; 348 req->dataout_template_pa.lo = ptemplate_address->lo;
305 req->dataout_template_pa.hi = ptemplate_address->hi; 349 req->dataout_template_pa.hi = ptemplate_address->hi;
306 status = be_mbox_notify(ctrl); 350 be_mcc_notify(phba);
307 if (!status) {
308 struct iscsi_endpoint *ep;
309 struct tcp_connect_and_offload_out *ptcpcnct_out =
310 embedded_payload(wrb);
311
312 ep = phba->ep_array[ptcpcnct_out->cid];
313 beiscsi_ep = ep->dd_data;
314 beiscsi_ep->fw_handle = 0;
315 beiscsi_ep->cid_vld = 1;
316 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
317 } else
318 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n");
319 spin_unlock(&ctrl->mbox_lock); 351 spin_unlock(&ctrl->mbox_lock);
320 return status; 352 return tag;
321} 353}
354
355unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
356{
357 struct be_ctrl_info *ctrl = &phba->ctrl;
358 struct be_mcc_wrb *wrb;
359 struct be_cmd_req_get_mac_addr *req;
360 unsigned int tag = 0;
361
362 SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
363 spin_lock(&ctrl->mbox_lock);
364 tag = alloc_mcc_tag(phba);
365 if (!tag) {
366 spin_unlock(&ctrl->mbox_lock);
367 return tag;
368 }
369
370 wrb = wrb_from_mccq(phba);
371 req = embedded_payload(wrb);
372 wrb->tag0 |= tag;
373 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
374 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
375 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
376 sizeof(*req));
377
378 be_mcc_notify(phba);
379 spin_unlock(&ctrl->mbox_lock);
380 return tag;
381}
382
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 00e816ee8070..3d316b82feb1 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -94,7 +94,8 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
94 unsigned short cid, 94 unsigned short cid,
95 unsigned int upload_flag); 95 unsigned int upload_flag);
96unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, 96unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
97 unsigned int icd, unsigned int cid); 97 struct invalidate_command_table *inv_tbl,
98 unsigned int num_invalidate, unsigned int cid);
98 99
99struct iscsi_invalidate_connection_params_in { 100struct iscsi_invalidate_connection_params_in {
100 struct be_cmd_req_hdr hdr; 101 struct be_cmd_req_hdr hdr;
@@ -116,11 +117,6 @@ union iscsi_invalidate_connection_params {
116 struct iscsi_invalidate_connection_params_out response; 117 struct iscsi_invalidate_connection_params_out response;
117} __packed; 118} __packed;
118 119
119struct invalidate_command_table {
120 unsigned short icd;
121 unsigned short cid;
122} __packed;
123
124struct invalidate_commands_params_in { 120struct invalidate_commands_params_in {
125 struct be_cmd_req_hdr hdr; 121 struct be_cmd_req_hdr hdr;
126 unsigned int ref_handle; 122 unsigned int ref_handle;
@@ -175,7 +171,9 @@ struct mgmt_hba_attributes {
175 u8 phy_port; 171 u8 phy_port;
176 u32 firmware_post_status; 172 u32 firmware_post_status;
177 u32 hba_mtu[8]; 173 u32 hba_mtu[8];
178 u32 future_u32[4]; 174 u8 iscsi_features;
175 u8 future_u8[3];
176 u32 future_u32[3];
179} __packed; 177} __packed;
180 178
181struct mgmt_controller_attributes { 179struct mgmt_controller_attributes {
@@ -229,6 +227,7 @@ struct beiscsi_endpoint {
229 struct beiscsi_hba *phba; 227 struct beiscsi_hba *phba;
230 struct beiscsi_sess *sess; 228 struct beiscsi_sess *sess;
231 struct beiscsi_conn *conn; 229 struct beiscsi_conn *conn;
230 struct iscsi_endpoint *openiscsi_ep;
232 unsigned short ip_type; 231 unsigned short ip_type;
233 char dst6_addr[ISCSI_ADDRESS_BUF_LEN]; 232 char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
234 unsigned long dst_addr; 233 unsigned long dst_addr;
@@ -246,4 +245,5 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
246 unsigned short cid, 245 unsigned short cid,
247 unsigned short issue_reset, 246 unsigned short issue_reset,
248 unsigned short savecfg_flag); 247 unsigned short savecfg_flag);
248
249#endif 249#endif
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index 1d6009490d1c..17e06cae71b2 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -2,14 +2,14 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
2 2
3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o 3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
4 4
5bfa-y += bfa_core.o bfa_ioc.o bfa_iocfc.o bfa_fcxp.o bfa_lps.o 5bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o
6bfa-y += bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o 6bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o 7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
8bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o 8bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
9bfa-y += bfa_csdebug.o bfa_sm.o plog.o 9bfa-y += bfa_csdebug.o bfa_sm.o plog.o
10 10
11bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o 11bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
12bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o 12bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
13bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o 13bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
14 14
15ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna 15ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
index 0050c838c358..961fe439daad 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -51,7 +51,7 @@ bfad_int_to_lun(u32 luno)
51 lun.bfa_lun = 0; 51 lun.bfa_lun = 0;
52 lun.scsi_lun[0] = bfa_os_htons(luno); 52 lun.scsi_lun[0] = bfa_os_htons(luno);
53 53
54 return (lun.bfa_lun); 54 return lun.bfa_lun;
55} 55}
56 56
57/** 57/**
@@ -68,7 +68,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
68{ 68{
69 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 69 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
70 70
71 return ((u8 *) cmnd->cmnd); 71 return (u8 *) cmnd->cmnd;
72} 72}
73 73
74/** 74/**
@@ -97,7 +97,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
97{ 97{
98 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 98 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
99 99
100 return (scsi_bufflen(cmnd)); 100 return scsi_bufflen(cmnd);
101} 101}
102 102
103/** 103/**
@@ -129,7 +129,7 @@ bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid)
129 sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid; 129 sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
130 addr = (u64) sg_dma_address(sge); 130 addr = (u64) sg_dma_address(sge);
131 131
132 return (*(union bfi_addr_u *) &addr); 132 return *((union bfi_addr_u *) &addr);
133} 133}
134 134
135static inline u32 135static inline u32
@@ -197,7 +197,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
197{ 197{
198 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 198 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
199 199
200 return (cmnd->cmd_len); 200 return cmnd->cmd_len;
201} 201}
202 202
203 203
diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c
index 7a959c34e789..2b917792c6bc 100644
--- a/drivers/scsi/bfa/bfa_cee.c
+++ b/drivers/scsi/bfa/bfa_cee.c
@@ -228,7 +228,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
228u32 228u32
229bfa_cee_meminfo(void) 229bfa_cee_meminfo(void)
230{ 230{
231 return (bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo()); 231 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
232} 232}
233 233
234/** 234/**
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 44e2d1155c51..0c08e185a766 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -385,6 +385,15 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
385} 385}
386 386
387/** 387/**
388 * Clear the saved firmware trace information of an IOC.
389 */
390void
391bfa_debug_fwsave_clear(struct bfa_s *bfa)
392{
393 bfa_ioc_debug_fwsave_clear(&bfa->ioc);
394}
395
396/**
388 * Fetch firmware trace data. 397 * Fetch firmware trace data.
389 * 398 *
390 * @param[in] bfa BFA instance 399 * @param[in] bfa BFA instance
@@ -399,4 +408,14 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
399{ 408{
400 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 409 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
401} 410}
411
412/**
413 * Reset hw semaphore & usage cnt regs and initialize.
414 */
415void
416bfa_chip_reset(struct bfa_s *bfa)
417{
418 bfa_ioc_ownership_reset(&bfa->ioc);
419 bfa_ioc_pll_init(&bfa->ioc);
420}
402#endif 421#endif
diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c
index 1b71d349451a..caeb1143a4e6 100644
--- a/drivers/scsi/bfa/bfa_csdebug.c
+++ b/drivers/scsi/bfa/bfa_csdebug.c
@@ -47,12 +47,12 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
47 tqe = bfa_q_next(q); 47 tqe = bfa_q_next(q);
48 while (tqe != q) { 48 while (tqe != q) {
49 if (tqe == qe) 49 if (tqe == qe)
50 return (1); 50 return 1;
51 tqe = bfa_q_next(tqe); 51 tqe = bfa_q_next(tqe);
52 if (tqe == NULL) 52 if (tqe == NULL)
53 break; 53 break;
54 } 54 }
55 return (0); 55 return 0;
56} 56}
57 57
58 58
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 401babe3494e..790c945aeae6 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -131,7 +131,7 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa)
131{ 131{
132 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 132 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
133 133
134 return (fcpim->path_tov / 1000); 134 return fcpim->path_tov / 1000;
135} 135}
136 136
137bfa_status_t 137bfa_status_t
@@ -169,7 +169,7 @@ bfa_fcpim_qdepth_get(struct bfa_s *bfa)
169{ 169{
170 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 170 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
171 171
172 return (fcpim->q_depth); 172 return fcpim->q_depth;
173} 173}
174 174
175 175
diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h
index 153206cfb37a..5cf418460f75 100644
--- a/drivers/scsi/bfa/bfa_fcpim_priv.h
+++ b/drivers/scsi/bfa/bfa_fcpim_priv.h
@@ -35,7 +35,7 @@
35#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */ 35#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
36 36
37#define bfa_fcpim_stats(__fcpim, __stats) \ 37#define bfa_fcpim_stats(__fcpim, __stats) \
38 (__fcpim)->stats.__stats ++ 38 ((__fcpim)->stats.__stats++)
39 39
40struct bfa_fcpim_mod_s { 40struct bfa_fcpim_mod_s {
41 struct bfa_s *bfa; 41 struct bfa_s *bfa;
@@ -143,7 +143,7 @@ struct bfa_itnim_s {
143 struct bfa_itnim_hal_stats_s stats; 143 struct bfa_itnim_hal_stats_s stats;
144}; 144};
145 145
146#define bfa_itnim_is_online(_itnim) (_itnim)->is_online 146#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
147#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) 147#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
148#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ 148#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
149 (&fcpim->ioim_arr[_iotag]) 149 (&fcpim->ioim_arr[_iotag])
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
index 992435987deb..c589488db0c1 100644
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -23,40 +23,33 @@
23#include <cs/bfa_plog.h> 23#include <cs/bfa_plog.h>
24#include <aen/bfa_aen_port.h> 24#include <aen/bfa_aen_port.h>
25 25
26BFA_TRC_FILE(HAL, PPORT); 26BFA_TRC_FILE(HAL, FCPORT);
27BFA_MODULE(pport); 27BFA_MODULE(fcport);
28
29#define bfa_pport_callback(__pport, __event) do { \
30 if ((__pport)->bfa->fcs) { \
31 (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \
32 } else { \
33 (__pport)->hcb_event = (__event); \
34 bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \
35 __bfa_cb_port_event, (__pport)); \
36 } \
37} while (0)
38 28
39/* 29/*
40 * The port is considered disabled if corresponding physical port or IOC are 30 * The port is considered disabled if corresponding physical port or IOC are
41 * disabled explicitly 31 * disabled explicitly
42 */ 32 */
43#define BFA_PORT_IS_DISABLED(bfa) \ 33#define BFA_PORT_IS_DISABLED(bfa) \
44 ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \ 34 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
45 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 35 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
46 36
47/* 37/*
48 * forward declarations 38 * forward declarations
49 */ 39 */
50static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port); 40static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
51static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port); 41static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
52static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport); 42static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
53static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport); 43static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
54static void bfa_pport_set_wwns(struct bfa_pport_s *port); 44static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
55static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete); 45static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
56static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete); 46static void bfa_fcport_callback(struct bfa_fcport_s *fcport,
57static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete); 47 enum bfa_pport_linkstate event);
58static void bfa_port_stats_timeout(void *cbarg); 48static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
59static void bfa_port_stats_clr_timeout(void *cbarg); 49 enum bfa_pport_linkstate event);
50static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
51static void bfa_fcport_stats_get_timeout(void *cbarg);
52static void bfa_fcport_stats_clr_timeout(void *cbarg);
60 53
61/** 54/**
62 * bfa_pport_private 55 * bfa_pport_private
@@ -65,111 +58,114 @@ static void bfa_port_stats_clr_timeout(void *cbarg);
65/** 58/**
66 * BFA port state machine events 59 * BFA port state machine events
67 */ 60 */
68enum bfa_pport_sm_event { 61enum bfa_fcport_sm_event {
69 BFA_PPORT_SM_START = 1, /* start port state machine */ 62 BFA_FCPORT_SM_START = 1, /* start port state machine */
70 BFA_PPORT_SM_STOP = 2, /* stop port state machine */ 63 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
71 BFA_PPORT_SM_ENABLE = 3, /* enable port */ 64 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
72 BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */ 65 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
73 BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ 66 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
74 BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */ 67 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
75 BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */ 68 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
76 BFA_PPORT_SM_QRESUME = 8, /* CQ space available */ 69 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
77 BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 70 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
78}; 71};
79 72
80static void bfa_pport_sm_uninit(struct bfa_pport_s *pport, 73/**
81 enum bfa_pport_sm_event event); 74 * BFA port link notification state machine events
82static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, 75 */
83 enum bfa_pport_sm_event event); 76
84static void bfa_pport_sm_enabling(struct bfa_pport_s *pport, 77enum bfa_fcport_ln_sm_event {
85 enum bfa_pport_sm_event event); 78 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
86static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport, 79 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
87 enum bfa_pport_sm_event event); 80 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
88static void bfa_pport_sm_linkup(struct bfa_pport_s *pport, 81};
89 enum bfa_pport_sm_event event); 82
90static void bfa_pport_sm_disabling(struct bfa_pport_s *pport, 83static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
91 enum bfa_pport_sm_event event); 84 enum bfa_fcport_sm_event event);
92static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport, 85static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
93 enum bfa_pport_sm_event event); 86 enum bfa_fcport_sm_event event);
94static void bfa_pport_sm_disabled(struct bfa_pport_s *pport, 87static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
95 enum bfa_pport_sm_event event); 88 enum bfa_fcport_sm_event event);
96static void bfa_pport_sm_stopped(struct bfa_pport_s *pport, 89static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
97 enum bfa_pport_sm_event event); 90 enum bfa_fcport_sm_event event);
98static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport, 91static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
99 enum bfa_pport_sm_event event); 92 enum bfa_fcport_sm_event event);
100static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport, 93static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
101 enum bfa_pport_sm_event event); 94 enum bfa_fcport_sm_event event);
95static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
96 enum bfa_fcport_sm_event event);
97static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
98 enum bfa_fcport_sm_event event);
99static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
100 enum bfa_fcport_sm_event event);
101static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
102 enum bfa_fcport_sm_event event);
103static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
104 enum bfa_fcport_sm_event event);
105
106static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
107 enum bfa_fcport_ln_sm_event event);
108static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
109 enum bfa_fcport_ln_sm_event event);
110static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
111 enum bfa_fcport_ln_sm_event event);
112static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
113 enum bfa_fcport_ln_sm_event event);
114static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
115 enum bfa_fcport_ln_sm_event event);
116static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
117 enum bfa_fcport_ln_sm_event event);
118static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
119 enum bfa_fcport_ln_sm_event event);
102 120
103static struct bfa_sm_table_s hal_pport_sm_table[] = { 121static struct bfa_sm_table_s hal_pport_sm_table[] = {
104 {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT}, 122 {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT},
105 {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT}, 123 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
106 {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING}, 124 {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING},
107 {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN}, 125 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
108 {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP}, 126 {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP},
109 {BFA_SM(bfa_pport_sm_disabling_qwait), 127 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT},
110 BFA_PPORT_ST_DISABLING_QWAIT}, 128 {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING},
111 {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING}, 129 {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED},
112 {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED}, 130 {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED},
113 {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED}, 131 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
114 {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN}, 132 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
115 {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
116}; 133};
117 134
118static void 135static void
119bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event) 136bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
120{ 137{
121 union bfa_aen_data_u aen_data; 138 union bfa_aen_data_u aen_data;
122 struct bfa_log_mod_s *logmod = pport->bfa->logm; 139 struct bfa_log_mod_s *logmod = fcport->bfa->logm;
123 wwn_t pwwn = pport->pwwn; 140 wwn_t pwwn = fcport->pwwn;
124 char pwwn_ptr[BFA_STRING_32]; 141 char pwwn_ptr[BFA_STRING_32];
125 struct bfa_ioc_attr_s ioc_attr;
126 142
143 memset(&aen_data, 0, sizeof(aen_data));
127 wwn2str(pwwn_ptr, pwwn); 144 wwn2str(pwwn_ptr, pwwn);
128 switch (event) { 145 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr);
129 case BFA_PORT_AEN_ONLINE:
130 bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
131 break;
132 case BFA_PORT_AEN_OFFLINE:
133 bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
134 break;
135 case BFA_PORT_AEN_ENABLE:
136 bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
137 break;
138 case BFA_PORT_AEN_DISABLE:
139 bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
140 break;
141 case BFA_PORT_AEN_DISCONNECT:
142 bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
143 break;
144 case BFA_PORT_AEN_QOS_NEG:
145 bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
146 break;
147 default:
148 break;
149 }
150 146
151 bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr); 147 aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
152 aen_data.port.ioc_type = ioc_attr.ioc_type;
153 aen_data.port.pwwn = pwwn; 148 aen_data.port.pwwn = pwwn;
154} 149}
155 150
156static void 151static void
157bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 152bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
153 enum bfa_fcport_sm_event event)
158{ 154{
159 bfa_trc(pport->bfa, event); 155 bfa_trc(fcport->bfa, event);
160 156
161 switch (event) { 157 switch (event) {
162 case BFA_PPORT_SM_START: 158 case BFA_FCPORT_SM_START:
163 /** 159 /**
164 * Start event after IOC is configured and BFA is started. 160 * Start event after IOC is configured and BFA is started.
165 */ 161 */
166 if (bfa_pport_send_enable(pport)) 162 if (bfa_fcport_send_enable(fcport))
167 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 163 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
168 else 164 else
169 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 165 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
170 break; 166 break;
171 167
172 case BFA_PPORT_SM_ENABLE: 168 case BFA_FCPORT_SM_ENABLE:
173 /** 169 /**
174 * Port is persistently configured to be in enabled state. Do 170 * Port is persistently configured to be in enabled state. Do
175 * not change state. Port enabling is done when START event is 171 * not change state. Port enabling is done when START event is
@@ -177,392 +173,412 @@ bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
177 */ 173 */
178 break; 174 break;
179 175
180 case BFA_PPORT_SM_DISABLE: 176 case BFA_FCPORT_SM_DISABLE:
181 /** 177 /**
182 * If a port is persistently configured to be disabled, the 178 * If a port is persistently configured to be disabled, the
183 * first event will a port disable request. 179 * first event will a port disable request.
184 */ 180 */
185 bfa_sm_set_state(pport, bfa_pport_sm_disabled); 181 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
186 break; 182 break;
187 183
188 case BFA_PPORT_SM_HWFAIL: 184 case BFA_FCPORT_SM_HWFAIL:
189 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 185 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
190 break; 186 break;
191 187
192 default: 188 default:
193 bfa_sm_fault(pport->bfa, event); 189 bfa_sm_fault(fcport->bfa, event);
194 } 190 }
195} 191}
196 192
197static void 193static void
198bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, 194bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
199 enum bfa_pport_sm_event event) 195 enum bfa_fcport_sm_event event)
200{ 196{
201 bfa_trc(pport->bfa, event); 197 bfa_trc(fcport->bfa, event);
202 198
203 switch (event) { 199 switch (event) {
204 case BFA_PPORT_SM_QRESUME: 200 case BFA_FCPORT_SM_QRESUME:
205 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 201 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
206 bfa_pport_send_enable(pport); 202 bfa_fcport_send_enable(fcport);
207 break; 203 break;
208 204
209 case BFA_PPORT_SM_STOP: 205 case BFA_FCPORT_SM_STOP:
210 bfa_reqq_wcancel(&pport->reqq_wait); 206 bfa_reqq_wcancel(&fcport->reqq_wait);
211 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 207 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
212 break; 208 break;
213 209
214 case BFA_PPORT_SM_ENABLE: 210 case BFA_FCPORT_SM_ENABLE:
215 /** 211 /**
216 * Already enable is in progress. 212 * Already enable is in progress.
217 */ 213 */
218 break; 214 break;
219 215
220 case BFA_PPORT_SM_DISABLE: 216 case BFA_FCPORT_SM_DISABLE:
221 /** 217 /**
222 * Just send disable request to firmware when room becomes 218 * Just send disable request to firmware when room becomes
223 * available in request queue. 219 * available in request queue.
224 */ 220 */
225 bfa_sm_set_state(pport, bfa_pport_sm_disabled); 221 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
226 bfa_reqq_wcancel(&pport->reqq_wait); 222 bfa_reqq_wcancel(&fcport->reqq_wait);
227 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 223 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
228 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 224 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
229 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 225 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
230 break; 226 break;
231 227
232 case BFA_PPORT_SM_LINKUP: 228 case BFA_FCPORT_SM_LINKUP:
233 case BFA_PPORT_SM_LINKDOWN: 229 case BFA_FCPORT_SM_LINKDOWN:
234 /** 230 /**
235 * Possible to get link events when doing back-to-back 231 * Possible to get link events when doing back-to-back
236 * enable/disables. 232 * enable/disables.
237 */ 233 */
238 break; 234 break;
239 235
240 case BFA_PPORT_SM_HWFAIL: 236 case BFA_FCPORT_SM_HWFAIL:
241 bfa_reqq_wcancel(&pport->reqq_wait); 237 bfa_reqq_wcancel(&fcport->reqq_wait);
242 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 238 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
243 break; 239 break;
244 240
245 default: 241 default:
246 bfa_sm_fault(pport->bfa, event); 242 bfa_sm_fault(fcport->bfa, event);
247 } 243 }
248} 244}
249 245
250static void 246static void
251bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 247bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
248 enum bfa_fcport_sm_event event)
252{ 249{
253 bfa_trc(pport->bfa, event); 250 bfa_trc(fcport->bfa, event);
254 251
255 switch (event) { 252 switch (event) {
256 case BFA_PPORT_SM_FWRSP: 253 case BFA_FCPORT_SM_FWRSP:
257 case BFA_PPORT_SM_LINKDOWN: 254 case BFA_FCPORT_SM_LINKDOWN:
258 bfa_sm_set_state(pport, bfa_pport_sm_linkdown); 255 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
259 break; 256 break;
260 257
261 case BFA_PPORT_SM_LINKUP: 258 case BFA_FCPORT_SM_LINKUP:
262 bfa_pport_update_linkinfo(pport); 259 bfa_fcport_update_linkinfo(fcport);
263 bfa_sm_set_state(pport, bfa_pport_sm_linkup); 260 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
264 261
265 bfa_assert(pport->event_cbfn); 262 bfa_assert(fcport->event_cbfn);
266 bfa_pport_callback(pport, BFA_PPORT_LINKUP); 263 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
267 break; 264 break;
268 265
269 case BFA_PPORT_SM_ENABLE: 266 case BFA_FCPORT_SM_ENABLE:
270 /** 267 /**
271 * Already being enabled. 268 * Already being enabled.
272 */ 269 */
273 break; 270 break;
274 271
275 case BFA_PPORT_SM_DISABLE: 272 case BFA_FCPORT_SM_DISABLE:
276 if (bfa_pport_send_disable(pport)) 273 if (bfa_fcport_send_disable(fcport))
277 bfa_sm_set_state(pport, bfa_pport_sm_disabling); 274 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
278 else 275 else
279 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 276 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
280 277
281 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 278 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
282 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 279 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
283 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 280 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
284 break; 281 break;
285 282
286 case BFA_PPORT_SM_STOP: 283 case BFA_FCPORT_SM_STOP:
287 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 284 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
288 break; 285 break;
289 286
290 case BFA_PPORT_SM_HWFAIL: 287 case BFA_FCPORT_SM_HWFAIL:
291 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 288 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
292 break; 289 break;
293 290
294 default: 291 default:
295 bfa_sm_fault(pport->bfa, event); 292 bfa_sm_fault(fcport->bfa, event);
296 } 293 }
297} 294}
298 295
299static void 296static void
300bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 297bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
298 enum bfa_fcport_sm_event event)
301{ 299{
302 bfa_trc(pport->bfa, event); 300 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
301 bfa_trc(fcport->bfa, event);
303 302
304 switch (event) { 303 switch (event) {
305 case BFA_PPORT_SM_LINKUP: 304 case BFA_FCPORT_SM_LINKUP:
306 bfa_pport_update_linkinfo(pport); 305 bfa_fcport_update_linkinfo(fcport);
307 bfa_sm_set_state(pport, bfa_pport_sm_linkup); 306 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
308 bfa_assert(pport->event_cbfn); 307 bfa_assert(fcport->event_cbfn);
309 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 308 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
310 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); 309 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
311 bfa_pport_callback(pport, BFA_PPORT_LINKUP); 310
312 bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE); 311 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
312
313 bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled);
314 bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed);
315
316 if (pevent->link_state.fcf.fipfailed)
317 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
318 BFA_PL_EID_FIP_FCF_DISC, 0,
319 "FIP FCF Discovery Failed");
320 else
321 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
322 BFA_PL_EID_FIP_FCF_DISC, 0,
323 "FIP FCF Discovered");
324 }
325
326 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
327 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
313 /** 328 /**
314 * If QoS is enabled and it is not online, 329 * If QoS is enabled and it is not online,
315 * Send a separate event. 330 * Send a separate event.
316 */ 331 */
317 if ((pport->cfg.qos_enabled) 332 if ((fcport->cfg.qos_enabled)
318 && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE)) 333 && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE))
319 bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG); 334 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
320 335
321 break; 336 break;
322 337
323 case BFA_PPORT_SM_LINKDOWN: 338 case BFA_FCPORT_SM_LINKDOWN:
324 /** 339 /**
325 * Possible to get link down event. 340 * Possible to get link down event.
326 */ 341 */
327 break; 342 break;
328 343
329 case BFA_PPORT_SM_ENABLE: 344 case BFA_FCPORT_SM_ENABLE:
330 /** 345 /**
331 * Already enabled. 346 * Already enabled.
332 */ 347 */
333 break; 348 break;
334 349
335 case BFA_PPORT_SM_DISABLE: 350 case BFA_FCPORT_SM_DISABLE:
336 if (bfa_pport_send_disable(pport)) 351 if (bfa_fcport_send_disable(fcport))
337 bfa_sm_set_state(pport, bfa_pport_sm_disabling); 352 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
338 else 353 else
339 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 354 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
340 355
341 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 356 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
342 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 357 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
343 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 358 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
344 break; 359 break;
345 360
346 case BFA_PPORT_SM_STOP: 361 case BFA_FCPORT_SM_STOP:
347 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 362 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
348 break; 363 break;
349 364
350 case BFA_PPORT_SM_HWFAIL: 365 case BFA_FCPORT_SM_HWFAIL:
351 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 366 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
352 break; 367 break;
353 368
354 default: 369 default:
355 bfa_sm_fault(pport->bfa, event); 370 bfa_sm_fault(fcport->bfa, event);
356 } 371 }
357} 372}
358 373
359static void 374static void
360bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 375bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
376 enum bfa_fcport_sm_event event)
361{ 377{
362 bfa_trc(pport->bfa, event); 378 bfa_trc(fcport->bfa, event);
363 379
364 switch (event) { 380 switch (event) {
365 case BFA_PPORT_SM_ENABLE: 381 case BFA_FCPORT_SM_ENABLE:
366 /** 382 /**
367 * Already enabled. 383 * Already enabled.
368 */ 384 */
369 break; 385 break;
370 386
371 case BFA_PPORT_SM_DISABLE: 387 case BFA_FCPORT_SM_DISABLE:
372 if (bfa_pport_send_disable(pport)) 388 if (bfa_fcport_send_disable(fcport))
373 bfa_sm_set_state(pport, bfa_pport_sm_disabling); 389 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
374 else 390 else
375 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 391 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
376 392
377 bfa_pport_reset_linkinfo(pport); 393 bfa_fcport_reset_linkinfo(fcport);
378 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 394 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
379 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 395 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
380 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 396 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
381 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 397 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
382 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 398 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
383 break; 399 break;
384 400
385 case BFA_PPORT_SM_LINKDOWN: 401 case BFA_FCPORT_SM_LINKDOWN:
386 bfa_sm_set_state(pport, bfa_pport_sm_linkdown); 402 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
387 bfa_pport_reset_linkinfo(pport); 403 bfa_fcport_reset_linkinfo(fcport);
388 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 404 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
389 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 405 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
390 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); 406 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
391 if (BFA_PORT_IS_DISABLED(pport->bfa)) { 407 if (BFA_PORT_IS_DISABLED(fcport->bfa))
392 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 408 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
393 } else { 409 else
394 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 410 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
395 }
396 break; 411 break;
397 412
398 case BFA_PPORT_SM_STOP: 413 case BFA_FCPORT_SM_STOP:
399 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 414 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
400 bfa_pport_reset_linkinfo(pport); 415 bfa_fcport_reset_linkinfo(fcport);
401 if (BFA_PORT_IS_DISABLED(pport->bfa)) { 416 if (BFA_PORT_IS_DISABLED(fcport->bfa))
402 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 417 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
403 } else { 418 else
404 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 419 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
405 }
406 break; 420 break;
407 421
408 case BFA_PPORT_SM_HWFAIL: 422 case BFA_FCPORT_SM_HWFAIL:
409 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 423 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
410 bfa_pport_reset_linkinfo(pport); 424 bfa_fcport_reset_linkinfo(fcport);
411 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 425 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
412 if (BFA_PORT_IS_DISABLED(pport->bfa)) { 426 if (BFA_PORT_IS_DISABLED(fcport->bfa))
413 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 427 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
414 } else { 428 else
415 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 429 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
416 }
417 break; 430 break;
418 431
419 default: 432 default:
420 bfa_sm_fault(pport->bfa, event); 433 bfa_sm_fault(fcport->bfa, event);
421 } 434 }
422} 435}
423 436
424static void 437static void
425bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport, 438bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
426 enum bfa_pport_sm_event event) 439 enum bfa_fcport_sm_event event)
427{ 440{
428 bfa_trc(pport->bfa, event); 441 bfa_trc(fcport->bfa, event);
429 442
430 switch (event) { 443 switch (event) {
431 case BFA_PPORT_SM_QRESUME: 444 case BFA_FCPORT_SM_QRESUME:
432 bfa_sm_set_state(pport, bfa_pport_sm_disabling); 445 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
433 bfa_pport_send_disable(pport); 446 bfa_fcport_send_disable(fcport);
434 break; 447 break;
435 448
436 case BFA_PPORT_SM_STOP: 449 case BFA_FCPORT_SM_STOP:
437 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 450 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
438 bfa_reqq_wcancel(&pport->reqq_wait); 451 bfa_reqq_wcancel(&fcport->reqq_wait);
439 break; 452 break;
440 453
441 case BFA_PPORT_SM_DISABLE: 454 case BFA_FCPORT_SM_DISABLE:
442 /** 455 /**
443 * Already being disabled. 456 * Already being disabled.
444 */ 457 */
445 break; 458 break;
446 459
447 case BFA_PPORT_SM_LINKUP: 460 case BFA_FCPORT_SM_LINKUP:
448 case BFA_PPORT_SM_LINKDOWN: 461 case BFA_FCPORT_SM_LINKDOWN:
449 /** 462 /**
450 * Possible to get link events when doing back-to-back 463 * Possible to get link events when doing back-to-back
451 * enable/disables. 464 * enable/disables.
452 */ 465 */
453 break; 466 break;
454 467
455 case BFA_PPORT_SM_HWFAIL: 468 case BFA_FCPORT_SM_HWFAIL:
456 bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 469 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
457 bfa_reqq_wcancel(&pport->reqq_wait); 470 bfa_reqq_wcancel(&fcport->reqq_wait);
458 break; 471 break;
459 472
460 default: 473 default:
461 bfa_sm_fault(pport->bfa, event); 474 bfa_sm_fault(fcport->bfa, event);
462 } 475 }
463} 476}
464 477
465static void 478static void
466bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 479bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
480 enum bfa_fcport_sm_event event)
467{ 481{
468 bfa_trc(pport->bfa, event); 482 bfa_trc(fcport->bfa, event);
469 483
470 switch (event) { 484 switch (event) {
471 case BFA_PPORT_SM_FWRSP: 485 case BFA_FCPORT_SM_FWRSP:
472 bfa_sm_set_state(pport, bfa_pport_sm_disabled); 486 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
473 break; 487 break;
474 488
475 case BFA_PPORT_SM_DISABLE: 489 case BFA_FCPORT_SM_DISABLE:
476 /** 490 /**
477 * Already being disabled. 491 * Already being disabled.
478 */ 492 */
479 break; 493 break;
480 494
481 case BFA_PPORT_SM_ENABLE: 495 case BFA_FCPORT_SM_ENABLE:
482 if (bfa_pport_send_enable(pport)) 496 if (bfa_fcport_send_enable(fcport))
483 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 497 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
484 else 498 else
485 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 499 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
486 500
487 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 501 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
488 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 502 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
489 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE); 503 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
490 break; 504 break;
491 505
492 case BFA_PPORT_SM_STOP: 506 case BFA_FCPORT_SM_STOP:
493 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 507 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
494 break; 508 break;
495 509
496 case BFA_PPORT_SM_LINKUP: 510 case BFA_FCPORT_SM_LINKUP:
497 case BFA_PPORT_SM_LINKDOWN: 511 case BFA_FCPORT_SM_LINKDOWN:
498 /** 512 /**
499 * Possible to get link events when doing back-to-back 513 * Possible to get link events when doing back-to-back
500 * enable/disables. 514 * enable/disables.
501 */ 515 */
502 break; 516 break;
503 517
504 case BFA_PPORT_SM_HWFAIL: 518 case BFA_FCPORT_SM_HWFAIL:
505 bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 519 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
506 break; 520 break;
507 521
508 default: 522 default:
509 bfa_sm_fault(pport->bfa, event); 523 bfa_sm_fault(fcport->bfa, event);
510 } 524 }
511} 525}
512 526
513static void 527static void
514bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 528bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
529 enum bfa_fcport_sm_event event)
515{ 530{
516 bfa_trc(pport->bfa, event); 531 bfa_trc(fcport->bfa, event);
517 532
518 switch (event) { 533 switch (event) {
519 case BFA_PPORT_SM_START: 534 case BFA_FCPORT_SM_START:
520 /** 535 /**
521 * Ignore start event for a port that is disabled. 536 * Ignore start event for a port that is disabled.
522 */ 537 */
523 break; 538 break;
524 539
525 case BFA_PPORT_SM_STOP: 540 case BFA_FCPORT_SM_STOP:
526 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 541 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
527 break; 542 break;
528 543
529 case BFA_PPORT_SM_ENABLE: 544 case BFA_FCPORT_SM_ENABLE:
530 if (bfa_pport_send_enable(pport)) 545 if (bfa_fcport_send_enable(fcport))
531 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 546 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
532 else 547 else
533 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 548 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
534 549
535 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 550 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
536 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 551 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
537 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE); 552 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
538 break; 553 break;
539 554
540 case BFA_PPORT_SM_DISABLE: 555 case BFA_FCPORT_SM_DISABLE:
541 /** 556 /**
542 * Already disabled. 557 * Already disabled.
543 */ 558 */
544 break; 559 break;
545 560
546 case BFA_PPORT_SM_HWFAIL: 561 case BFA_FCPORT_SM_HWFAIL:
547 bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 562 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
548 break; 563 break;
549 564
550 default: 565 default:
551 bfa_sm_fault(pport->bfa, event); 566 bfa_sm_fault(fcport->bfa, event);
552 } 567 }
553} 568}
554 569
555static void 570static void
556bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 571bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
572 enum bfa_fcport_sm_event event)
557{ 573{
558 bfa_trc(pport->bfa, event); 574 bfa_trc(fcport->bfa, event);
559 575
560 switch (event) { 576 switch (event) {
561 case BFA_PPORT_SM_START: 577 case BFA_FCPORT_SM_START:
562 if (bfa_pport_send_enable(pport)) 578 if (bfa_fcport_send_enable(fcport))
563 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 579 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
564 else 580 else
565 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 581 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
566 break; 582 break;
567 583
568 default: 584 default:
@@ -577,16 +593,17 @@ bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
577 * Port is enabled. IOC is down/failed. 593 * Port is enabled. IOC is down/failed.
578 */ 594 */
579static void 595static void
580bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 596bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
597 enum bfa_fcport_sm_event event)
581{ 598{
582 bfa_trc(pport->bfa, event); 599 bfa_trc(fcport->bfa, event);
583 600
584 switch (event) { 601 switch (event) {
585 case BFA_PPORT_SM_START: 602 case BFA_FCPORT_SM_START:
586 if (bfa_pport_send_enable(pport)) 603 if (bfa_fcport_send_enable(fcport))
587 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 604 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
588 else 605 else
589 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 606 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
590 break; 607 break;
591 608
592 default: 609 default:
@@ -601,17 +618,18 @@ bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
601 * Port is disabled. IOC is down/failed. 618 * Port is disabled. IOC is down/failed.
602 */ 619 */
603static void 620static void
604bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 621bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
622 enum bfa_fcport_sm_event event)
605{ 623{
606 bfa_trc(pport->bfa, event); 624 bfa_trc(fcport->bfa, event);
607 625
608 switch (event) { 626 switch (event) {
609 case BFA_PPORT_SM_START: 627 case BFA_FCPORT_SM_START:
610 bfa_sm_set_state(pport, bfa_pport_sm_disabled); 628 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
611 break; 629 break;
612 630
613 case BFA_PPORT_SM_ENABLE: 631 case BFA_FCPORT_SM_ENABLE:
614 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 632 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
615 break; 633 break;
616 634
617 default: 635 default:
@@ -622,41 +640,226 @@ bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
622 } 640 }
623} 641}
624 642
643/**
644 * Link state is down
645 */
646static void
647bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
648 enum bfa_fcport_ln_sm_event event)
649{
650 bfa_trc(ln->fcport->bfa, event);
651
652 switch (event) {
653 case BFA_FCPORT_LN_SM_LINKUP:
654 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
655 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
656 break;
625 657
658 default:
659 bfa_sm_fault(ln->fcport->bfa, event);
660 }
661}
662
663/**
664 * Link state is waiting for down notification
665 */
666static void
667bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
668 enum bfa_fcport_ln_sm_event event)
669{
670 bfa_trc(ln->fcport->bfa, event);
671
672 switch (event) {
673 case BFA_FCPORT_LN_SM_LINKUP:
674 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
675 break;
676
677 case BFA_FCPORT_LN_SM_NOTIFICATION:
678 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
679 break;
680
681 default:
682 bfa_sm_fault(ln->fcport->bfa, event);
683 }
684}
685
686/**
687 * Link state is waiting for down notification and there is a pending up
688 */
689static void
690bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
691 enum bfa_fcport_ln_sm_event event)
692{
693 bfa_trc(ln->fcport->bfa, event);
694
695 switch (event) {
696 case BFA_FCPORT_LN_SM_LINKDOWN:
697 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
698 break;
699
700 case BFA_FCPORT_LN_SM_NOTIFICATION:
701 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
702 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
703 break;
704
705 default:
706 bfa_sm_fault(ln->fcport->bfa, event);
707 }
708}
709
710/**
711 * Link state is up
712 */
713static void
714bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
715 enum bfa_fcport_ln_sm_event event)
716{
717 bfa_trc(ln->fcport->bfa, event);
718
719 switch (event) {
720 case BFA_FCPORT_LN_SM_LINKDOWN:
721 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
722 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
723 break;
724
725 default:
726 bfa_sm_fault(ln->fcport->bfa, event);
727 }
728}
729
730/**
731 * Link state is waiting for up notification
732 */
733static void
734bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
735 enum bfa_fcport_ln_sm_event event)
736{
737 bfa_trc(ln->fcport->bfa, event);
738
739 switch (event) {
740 case BFA_FCPORT_LN_SM_LINKDOWN:
741 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
742 break;
743
744 case BFA_FCPORT_LN_SM_NOTIFICATION:
745 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
746 break;
747
748 default:
749 bfa_sm_fault(ln->fcport->bfa, event);
750 }
751}
752
753/**
754 * Link state is waiting for up notification and there is a pending down
755 */
756static void
757bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
758 enum bfa_fcport_ln_sm_event event)
759{
760 bfa_trc(ln->fcport->bfa, event);
761
762 switch (event) {
763 case BFA_FCPORT_LN_SM_LINKUP:
764 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
765 break;
766
767 case BFA_FCPORT_LN_SM_NOTIFICATION:
768 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
769 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
770 break;
771
772 default:
773 bfa_sm_fault(ln->fcport->bfa, event);
774 }
775}
776
777/**
778 * Link state is waiting for up notification and there are pending down and up
779 */
780static void
781bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
782 enum bfa_fcport_ln_sm_event event)
783{
784 bfa_trc(ln->fcport->bfa, event);
785
786 switch (event) {
787 case BFA_FCPORT_LN_SM_LINKDOWN:
788 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
789 break;
790
791 case BFA_FCPORT_LN_SM_NOTIFICATION:
792 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
793 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
794 break;
795
796 default:
797 bfa_sm_fault(ln->fcport->bfa, event);
798 }
799}
626 800
627/** 801/**
628 * bfa_pport_private 802 * bfa_pport_private
629 */ 803 */
630 804
631static void 805static void
632__bfa_cb_port_event(void *cbarg, bfa_boolean_t complete) 806__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
633{ 807{
634 struct bfa_pport_s *pport = cbarg; 808 struct bfa_fcport_ln_s *ln = cbarg;
635 809
636 if (complete) 810 if (complete)
637 pport->event_cbfn(pport->event_cbarg, pport->hcb_event); 811 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
812 else
813 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
814}
815
816static void
817bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event)
818{
819 if (fcport->bfa->fcs) {
820 fcport->event_cbfn(fcport->event_cbarg, event);
821 return;
822 }
823
824 switch (event) {
825 case BFA_PPORT_LINKUP:
826 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
827 break;
828 case BFA_PPORT_LINKDOWN:
829 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
830 break;
831 default:
832 bfa_assert(0);
833 }
834}
835
836static void
837bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event)
838{
839 ln->ln_event = event;
840 bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln);
638} 841}
639 842
640#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \ 843#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
641 BFA_CACHELINE_SZ)) 844 BFA_CACHELINE_SZ))
642 845
643static void 846static void
644bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 847bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
645 u32 *dm_len) 848 u32 *dm_len)
646{ 849{
647 *dm_len += PPORT_STATS_DMA_SZ; 850 *dm_len += FCPORT_STATS_DMA_SZ;
648} 851}
649 852
650static void 853static void
651bfa_pport_qresume(void *cbarg) 854bfa_fcport_qresume(void *cbarg)
652{ 855{
653 struct bfa_pport_s *port = cbarg; 856 struct bfa_fcport_s *fcport = cbarg;
654 857
655 bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME); 858 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
656} 859}
657 860
658static void 861static void
659bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo) 862bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
660{ 863{
661 u8 *dm_kva; 864 u8 *dm_kva;
662 u64 dm_pa; 865 u64 dm_pa;
@@ -664,12 +867,12 @@ bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
664 dm_kva = bfa_meminfo_dma_virt(meminfo); 867 dm_kva = bfa_meminfo_dma_virt(meminfo);
665 dm_pa = bfa_meminfo_dma_phys(meminfo); 868 dm_pa = bfa_meminfo_dma_phys(meminfo);
666 869
667 pport->stats_kva = dm_kva; 870 fcport->stats_kva = dm_kva;
668 pport->stats_pa = dm_pa; 871 fcport->stats_pa = dm_pa;
669 pport->stats = (union bfa_pport_stats_u *)dm_kva; 872 fcport->stats = (union bfa_fcport_stats_u *)dm_kva;
670 873
671 dm_kva += PPORT_STATS_DMA_SZ; 874 dm_kva += FCPORT_STATS_DMA_SZ;
672 dm_pa += PPORT_STATS_DMA_SZ; 875 dm_pa += FCPORT_STATS_DMA_SZ;
673 876
674 bfa_meminfo_dma_virt(meminfo) = dm_kva; 877 bfa_meminfo_dma_virt(meminfo) = dm_kva;
675 bfa_meminfo_dma_phys(meminfo) = dm_pa; 878 bfa_meminfo_dma_phys(meminfo) = dm_pa;
@@ -679,18 +882,21 @@ bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
679 * Memory initialization. 882 * Memory initialization.
680 */ 883 */
681static void 884static void
682bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 885bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
683 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 886 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
684{ 887{
685 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 888 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
686 struct bfa_pport_cfg_s *port_cfg = &pport->cfg; 889 struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
890 struct bfa_fcport_ln_s *ln = &fcport->ln;
687 891
688 bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s)); 892 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
689 pport->bfa = bfa; 893 fcport->bfa = bfa;
894 ln->fcport = fcport;
690 895
691 bfa_pport_mem_claim(pport, meminfo); 896 bfa_fcport_mem_claim(fcport, meminfo);
692 897
693 bfa_sm_set_state(pport, bfa_pport_sm_uninit); 898 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
899 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
694 900
695 /** 901 /**
696 * initialize and set default configuration 902 * initialize and set default configuration
@@ -702,30 +908,30 @@ bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
702 908
703 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS; 909 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
704 910
705 bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport); 911 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
706} 912}
707 913
708static void 914static void
709bfa_pport_initdone(struct bfa_s *bfa) 915bfa_fcport_initdone(struct bfa_s *bfa)
710{ 916{
711 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 917 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
712 918
713 /** 919 /**
714 * Initialize port attributes from IOC hardware data. 920 * Initialize port attributes from IOC hardware data.
715 */ 921 */
716 bfa_pport_set_wwns(pport); 922 bfa_fcport_set_wwns(fcport);
717 if (pport->cfg.maxfrsize == 0) 923 if (fcport->cfg.maxfrsize == 0)
718 pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); 924 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
719 pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); 925 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
720 pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); 926 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
721 927
722 bfa_assert(pport->cfg.maxfrsize); 928 bfa_assert(fcport->cfg.maxfrsize);
723 bfa_assert(pport->cfg.rx_bbcredit); 929 bfa_assert(fcport->cfg.rx_bbcredit);
724 bfa_assert(pport->speed_sup); 930 bfa_assert(fcport->speed_sup);
725} 931}
726 932
727static void 933static void
728bfa_pport_detach(struct bfa_s *bfa) 934bfa_fcport_detach(struct bfa_s *bfa)
729{ 935{
730} 936}
731 937
@@ -733,95 +939,97 @@ bfa_pport_detach(struct bfa_s *bfa)
733 * Called when IOC is ready. 939 * Called when IOC is ready.
734 */ 940 */
735static void 941static void
736bfa_pport_start(struct bfa_s *bfa) 942bfa_fcport_start(struct bfa_s *bfa)
737{ 943{
738 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START); 944 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
739} 945}
740 946
741/** 947/**
742 * Called before IOC is stopped. 948 * Called before IOC is stopped.
743 */ 949 */
744static void 950static void
745bfa_pport_stop(struct bfa_s *bfa) 951bfa_fcport_stop(struct bfa_s *bfa)
746{ 952{
747 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP); 953 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
748} 954}
749 955
750/** 956/**
751 * Called when IOC failure is detected. 957 * Called when IOC failure is detected.
752 */ 958 */
753static void 959static void
754bfa_pport_iocdisable(struct bfa_s *bfa) 960bfa_fcport_iocdisable(struct bfa_s *bfa)
755{ 961{
756 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL); 962 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL);
757} 963}
758 964
759static void 965static void
760bfa_pport_update_linkinfo(struct bfa_pport_s *pport) 966bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
761{ 967{
762 struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event; 968 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
763 969
764 pport->speed = pevent->link_state.speed; 970 fcport->speed = pevent->link_state.speed;
765 pport->topology = pevent->link_state.topology; 971 fcport->topology = pevent->link_state.topology;
766 972
767 if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP) 973 if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
768 pport->myalpa = pevent->link_state.tl.loop_info.myalpa; 974 fcport->myalpa =
975 pevent->link_state.tl.loop_info.myalpa;
769 976
770 /* 977 /*
771 * QoS Details 978 * QoS Details
772 */ 979 */
773 bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr); 980 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
774 bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr); 981 bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr);
775 982
776 bfa_trc(pport->bfa, pport->speed); 983 bfa_trc(fcport->bfa, fcport->speed);
777 bfa_trc(pport->bfa, pport->topology); 984 bfa_trc(fcport->bfa, fcport->topology);
778} 985}
779 986
780static void 987static void
781bfa_pport_reset_linkinfo(struct bfa_pport_s *pport) 988bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
782{ 989{
783 pport->speed = BFA_PPORT_SPEED_UNKNOWN; 990 fcport->speed = BFA_PPORT_SPEED_UNKNOWN;
784 pport->topology = BFA_PPORT_TOPOLOGY_NONE; 991 fcport->topology = BFA_PPORT_TOPOLOGY_NONE;
785} 992}
786 993
787/** 994/**
788 * Send port enable message to firmware. 995 * Send port enable message to firmware.
789 */ 996 */
790static bfa_boolean_t 997static bfa_boolean_t
791bfa_pport_send_enable(struct bfa_pport_s *port) 998bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
792{ 999{
793 struct bfi_pport_enable_req_s *m; 1000 struct bfi_fcport_enable_req_s *m;
794 1001
795 /** 1002 /**
796 * Increment message tag before queue check, so that responses to old 1003 * Increment message tag before queue check, so that responses to old
797 * requests are discarded. 1004 * requests are discarded.
798 */ 1005 */
799 port->msgtag++; 1006 fcport->msgtag++;
800 1007
801 /** 1008 /**
802 * check for room in queue to send request now 1009 * check for room in queue to send request now
803 */ 1010 */
804 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1011 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
805 if (!m) { 1012 if (!m) {
806 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait); 1013 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1014 &fcport->reqq_wait);
807 return BFA_FALSE; 1015 return BFA_FALSE;
808 } 1016 }
809 1017
810 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ, 1018 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
811 bfa_lpuid(port->bfa)); 1019 bfa_lpuid(fcport->bfa));
812 m->nwwn = port->nwwn; 1020 m->nwwn = fcport->nwwn;
813 m->pwwn = port->pwwn; 1021 m->pwwn = fcport->pwwn;
814 m->port_cfg = port->cfg; 1022 m->port_cfg = fcport->cfg;
815 m->msgtag = port->msgtag; 1023 m->msgtag = fcport->msgtag;
816 m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize); 1024 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
817 bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa); 1025 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
818 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo); 1026 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
819 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi); 1027 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
820 1028
821 /** 1029 /**
822 * queue I/O message to firmware 1030 * queue I/O message to firmware
823 */ 1031 */
824 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1032 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
825 return BFA_TRUE; 1033 return BFA_TRUE;
826} 1034}
827 1035
@@ -829,74 +1037,226 @@ bfa_pport_send_enable(struct bfa_pport_s *port)
829 * Send port disable message to firmware. 1037 * Send port disable message to firmware.
830 */ 1038 */
831static bfa_boolean_t 1039static bfa_boolean_t
832bfa_pport_send_disable(struct bfa_pport_s *port) 1040bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
833{ 1041{
834 bfi_pport_disable_req_t *m; 1042 struct bfi_fcport_req_s *m;
835 1043
836 /** 1044 /**
837 * Increment message tag before queue check, so that responses to old 1045 * Increment message tag before queue check, so that responses to old
838 * requests are discarded. 1046 * requests are discarded.
839 */ 1047 */
840 port->msgtag++; 1048 fcport->msgtag++;
841 1049
842 /** 1050 /**
843 * check for room in queue to send request now 1051 * check for room in queue to send request now
844 */ 1052 */
845 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1053 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
846 if (!m) { 1054 if (!m) {
847 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait); 1055 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1056 &fcport->reqq_wait);
848 return BFA_FALSE; 1057 return BFA_FALSE;
849 } 1058 }
850 1059
851 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ, 1060 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
852 bfa_lpuid(port->bfa)); 1061 bfa_lpuid(fcport->bfa));
853 m->msgtag = port->msgtag; 1062 m->msgtag = fcport->msgtag;
854 1063
855 /** 1064 /**
856 * queue I/O message to firmware 1065 * queue I/O message to firmware
857 */ 1066 */
858 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1067 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
859 1068
860 return BFA_TRUE; 1069 return BFA_TRUE;
861} 1070}
862 1071
863static void 1072static void
864bfa_pport_set_wwns(struct bfa_pport_s *port) 1073bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
865{ 1074{
866 port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc); 1075 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
867 port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc); 1076 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
868 1077
869 bfa_trc(port->bfa, port->pwwn); 1078 bfa_trc(fcport->bfa, fcport->pwwn);
870 bfa_trc(port->bfa, port->nwwn); 1079 bfa_trc(fcport->bfa, fcport->nwwn);
871} 1080}
872 1081
873static void 1082static void
874bfa_port_send_txcredit(void *port_cbarg) 1083bfa_fcport_send_txcredit(void *port_cbarg)
875{ 1084{
876 1085
877 struct bfa_pport_s *port = port_cbarg; 1086 struct bfa_fcport_s *fcport = port_cbarg;
878 struct bfi_pport_set_svc_params_req_s *m; 1087 struct bfi_fcport_set_svc_params_req_s *m;
879 1088
880 /** 1089 /**
881 * check for room in queue to send request now 1090 * check for room in queue to send request now
882 */ 1091 */
883 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1092 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
884 if (!m) { 1093 if (!m) {
885 bfa_trc(port->bfa, port->cfg.tx_bbcredit); 1094 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
886 return; 1095 return;
887 } 1096 }
888 1097
889 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ, 1098 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
890 bfa_lpuid(port->bfa)); 1099 bfa_lpuid(fcport->bfa));
891 m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit); 1100 m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);
892 1101
893 /** 1102 /**
894 * queue I/O message to firmware 1103 * queue I/O message to firmware
895 */ 1104 */
896 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1105 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1106}
1107
1108static void
1109bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
1110 struct bfa_qos_stats_s *s)
1111{
1112 u32 *dip = (u32 *) d;
1113 u32 *sip = (u32 *) s;
1114 int i;
1115
1116 /* Now swap the 32 bit fields */
1117 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
1118 dip[i] = bfa_os_ntohl(sip[i]);
1119}
1120
1121static void
1122bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
1123 struct bfa_fcoe_stats_s *s)
1124{
1125 u32 *dip = (u32 *) d;
1126 u32 *sip = (u32 *) s;
1127 int i;
1128
1129 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
1130 i = i + 2) {
1131#ifdef __BIGENDIAN
1132 dip[i] = bfa_os_ntohl(sip[i]);
1133 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1134#else
1135 dip[i] = bfa_os_ntohl(sip[i + 1]);
1136 dip[i + 1] = bfa_os_ntohl(sip[i]);
1137#endif
1138 }
1139}
1140
1141static void
1142__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
1143{
1144 struct bfa_fcport_s *fcport = cbarg;
1145
1146 if (complete) {
1147 if (fcport->stats_status == BFA_STATUS_OK) {
1148
1149 /* Swap FC QoS or FCoE stats */
1150 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
1151 bfa_fcport_qos_stats_swap(
1152 &fcport->stats_ret->fcqos,
1153 &fcport->stats->fcqos);
1154 else
1155 bfa_fcport_fcoe_stats_swap(
1156 &fcport->stats_ret->fcoe,
1157 &fcport->stats->fcoe);
1158 }
1159 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1160 } else {
1161 fcport->stats_busy = BFA_FALSE;
1162 fcport->stats_status = BFA_STATUS_OK;
1163 }
897} 1164}
898 1165
1166static void
1167bfa_fcport_stats_get_timeout(void *cbarg)
1168{
1169 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1170
1171 bfa_trc(fcport->bfa, fcport->stats_qfull);
1172
1173 if (fcport->stats_qfull) {
1174 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1175 fcport->stats_qfull = BFA_FALSE;
1176 }
1177
1178 fcport->stats_status = BFA_STATUS_ETIMER;
1179 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
1180 fcport);
1181}
1182
1183static void
1184bfa_fcport_send_stats_get(void *cbarg)
1185{
1186 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1187 struct bfi_fcport_req_s *msg;
1188
1189 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1190
1191 if (!msg) {
1192 fcport->stats_qfull = BFA_TRUE;
1193 bfa_reqq_winit(&fcport->stats_reqq_wait,
1194 bfa_fcport_send_stats_get, fcport);
1195 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1196 &fcport->stats_reqq_wait);
1197 return;
1198 }
1199 fcport->stats_qfull = BFA_FALSE;
1200
1201 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1202 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
1203 bfa_lpuid(fcport->bfa));
1204 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1205}
1206
1207static void
1208__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
1209{
1210 struct bfa_fcport_s *fcport = cbarg;
1211
1212 if (complete) {
1213 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1214 } else {
1215 fcport->stats_busy = BFA_FALSE;
1216 fcport->stats_status = BFA_STATUS_OK;
1217 }
1218}
1219
1220static void
1221bfa_fcport_stats_clr_timeout(void *cbarg)
1222{
1223 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1224
1225 bfa_trc(fcport->bfa, fcport->stats_qfull);
1226
1227 if (fcport->stats_qfull) {
1228 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1229 fcport->stats_qfull = BFA_FALSE;
1230 }
1231
1232 fcport->stats_status = BFA_STATUS_ETIMER;
1233 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1234 __bfa_cb_fcport_stats_clr, fcport);
1235}
1236
1237static void
1238bfa_fcport_send_stats_clear(void *cbarg)
1239{
1240 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1241 struct bfi_fcport_req_s *msg;
899 1242
1243 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1244
1245 if (!msg) {
1246 fcport->stats_qfull = BFA_TRUE;
1247 bfa_reqq_winit(&fcport->stats_reqq_wait,
1248 bfa_fcport_send_stats_clear, fcport);
1249 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1250 &fcport->stats_reqq_wait);
1251 return;
1252 }
1253 fcport->stats_qfull = BFA_FALSE;
1254
1255 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1256 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
1257 bfa_lpuid(fcport->bfa));
1258 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1259}
900 1260
901/** 1261/**
902 * bfa_pport_public 1262 * bfa_pport_public
@@ -906,32 +1266,32 @@ bfa_port_send_txcredit(void *port_cbarg)
906 * Firmware message handler. 1266 * Firmware message handler.
907 */ 1267 */
908void 1268void
909bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) 1269bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
910{ 1270{
911 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1271 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
912 union bfi_pport_i2h_msg_u i2hmsg; 1272 union bfi_fcport_i2h_msg_u i2hmsg;
913 1273
914 i2hmsg.msg = msg; 1274 i2hmsg.msg = msg;
915 pport->event_arg.i2hmsg = i2hmsg; 1275 fcport->event_arg.i2hmsg = i2hmsg;
916 1276
917 switch (msg->mhdr.msg_id) { 1277 switch (msg->mhdr.msg_id) {
918 case BFI_PPORT_I2H_ENABLE_RSP: 1278 case BFI_FCPORT_I2H_ENABLE_RSP:
919 if (pport->msgtag == i2hmsg.enable_rsp->msgtag) 1279 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
920 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP); 1280 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
921 break; 1281 break;
922 1282
923 case BFI_PPORT_I2H_DISABLE_RSP: 1283 case BFI_FCPORT_I2H_DISABLE_RSP:
924 if (pport->msgtag == i2hmsg.enable_rsp->msgtag) 1284 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
925 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP); 1285 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
926 break; 1286 break;
927 1287
928 case BFI_PPORT_I2H_EVENT: 1288 case BFI_FCPORT_I2H_EVENT:
929 switch (i2hmsg.event->link_state.linkstate) { 1289 switch (i2hmsg.event->link_state.linkstate) {
930 case BFA_PPORT_LINKUP: 1290 case BFA_PPORT_LINKUP:
931 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP); 1291 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
932 break; 1292 break;
933 case BFA_PPORT_LINKDOWN: 1293 case BFA_PPORT_LINKDOWN:
934 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN); 1294 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
935 break; 1295 break;
936 case BFA_PPORT_TRUNK_LINKDOWN: 1296 case BFA_PPORT_TRUNK_LINKDOWN:
937 /** todo: event notification */ 1297 /** todo: event notification */
@@ -939,42 +1299,40 @@ bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
939 } 1299 }
940 break; 1300 break;
941 1301
942 case BFI_PPORT_I2H_GET_STATS_RSP: 1302 case BFI_FCPORT_I2H_STATS_GET_RSP:
943 case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
944 /* 1303 /*
945 * check for timer pop before processing the rsp 1304 * check for timer pop before processing the rsp
946 */ 1305 */
947 if (pport->stats_busy == BFA_FALSE 1306 if (fcport->stats_busy == BFA_FALSE ||
948 || pport->stats_status == BFA_STATUS_ETIMER) 1307 fcport->stats_status == BFA_STATUS_ETIMER)
949 break; 1308 break;
950 1309
951 bfa_timer_stop(&pport->timer); 1310 bfa_timer_stop(&fcport->timer);
952 pport->stats_status = i2hmsg.getstats_rsp->status; 1311 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
953 bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats, 1312 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
954 pport); 1313 __bfa_cb_fcport_stats_get, fcport);
955 break; 1314 break;
956 case BFI_PPORT_I2H_CLEAR_STATS_RSP: 1315
957 case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP: 1316 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
958 /* 1317 /*
959 * check for timer pop before processing the rsp 1318 * check for timer pop before processing the rsp
960 */ 1319 */
961 if (pport->stats_busy == BFA_FALSE 1320 if (fcport->stats_busy == BFA_FALSE ||
962 || pport->stats_status == BFA_STATUS_ETIMER) 1321 fcport->stats_status == BFA_STATUS_ETIMER)
963 break; 1322 break;
964 1323
965 bfa_timer_stop(&pport->timer); 1324 bfa_timer_stop(&fcport->timer);
966 pport->stats_status = BFA_STATUS_OK; 1325 fcport->stats_status = BFA_STATUS_OK;
967 bfa_cb_queue(pport->bfa, &pport->hcb_qe, 1326 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
968 __bfa_cb_port_stats_clr, pport); 1327 __bfa_cb_fcport_stats_clr, fcport);
969 break; 1328 break;
970 1329
971 default: 1330 default:
972 bfa_assert(0); 1331 bfa_assert(0);
1332 break;
973 } 1333 }
974} 1334}
975 1335
976
977
978/** 1336/**
979 * bfa_pport_api 1337 * bfa_pport_api
980 */ 1338 */
@@ -983,35 +1341,35 @@ bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
983 * Registered callback for port events. 1341 * Registered callback for port events.
984 */ 1342 */
985void 1343void
986bfa_pport_event_register(struct bfa_s *bfa, 1344bfa_fcport_event_register(struct bfa_s *bfa,
987 void (*cbfn) (void *cbarg, bfa_pport_event_t event), 1345 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
988 void *cbarg) 1346 void *cbarg)
989{ 1347{
990 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1348 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
991 1349
992 pport->event_cbfn = cbfn; 1350 fcport->event_cbfn = cbfn;
993 pport->event_cbarg = cbarg; 1351 fcport->event_cbarg = cbarg;
994} 1352}
995 1353
996bfa_status_t 1354bfa_status_t
997bfa_pport_enable(struct bfa_s *bfa) 1355bfa_fcport_enable(struct bfa_s *bfa)
998{ 1356{
999 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1357 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1000 1358
1001 if (pport->diag_busy) 1359 if (fcport->diag_busy)
1002 return (BFA_STATUS_DIAG_BUSY); 1360 return BFA_STATUS_DIAG_BUSY;
1003 else if (bfa_sm_cmp_state 1361 else if (bfa_sm_cmp_state
1004 (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait)) 1362 (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait))
1005 return (BFA_STATUS_DEVBUSY); 1363 return BFA_STATUS_DEVBUSY;
1006 1364
1007 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE); 1365 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
1008 return BFA_STATUS_OK; 1366 return BFA_STATUS_OK;
1009} 1367}
1010 1368
1011bfa_status_t 1369bfa_status_t
1012bfa_pport_disable(struct bfa_s *bfa) 1370bfa_fcport_disable(struct bfa_s *bfa)
1013{ 1371{
1014 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE); 1372 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
1015 return BFA_STATUS_OK; 1373 return BFA_STATUS_OK;
1016} 1374}
1017 1375
@@ -1019,43 +1377,43 @@ bfa_pport_disable(struct bfa_s *bfa)
1019 * Configure port speed. 1377 * Configure port speed.
1020 */ 1378 */
1021bfa_status_t 1379bfa_status_t
1022bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) 1380bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1023{ 1381{
1024 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1382 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1025 1383
1026 bfa_trc(bfa, speed); 1384 bfa_trc(bfa, speed);
1027 1385
1028 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) { 1386 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
1029 bfa_trc(bfa, pport->speed_sup); 1387 bfa_trc(bfa, fcport->speed_sup);
1030 return BFA_STATUS_UNSUPP_SPEED; 1388 return BFA_STATUS_UNSUPP_SPEED;
1031 } 1389 }
1032 1390
1033 pport->cfg.speed = speed; 1391 fcport->cfg.speed = speed;
1034 1392
1035 return (BFA_STATUS_OK); 1393 return BFA_STATUS_OK;
1036} 1394}
1037 1395
1038/** 1396/**
1039 * Get current speed. 1397 * Get current speed.
1040 */ 1398 */
1041enum bfa_pport_speed 1399enum bfa_pport_speed
1042bfa_pport_get_speed(struct bfa_s *bfa) 1400bfa_fcport_get_speed(struct bfa_s *bfa)
1043{ 1401{
1044 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1402 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1045 1403
1046 return port->speed; 1404 return fcport->speed;
1047} 1405}
1048 1406
1049/** 1407/**
1050 * Configure port topology. 1408 * Configure port topology.
1051 */ 1409 */
1052bfa_status_t 1410bfa_status_t
1053bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) 1411bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1054{ 1412{
1055 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1413 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1056 1414
1057 bfa_trc(bfa, topology); 1415 bfa_trc(bfa, topology);
1058 bfa_trc(bfa, pport->cfg.topology); 1416 bfa_trc(bfa, fcport->cfg.topology);
1059 1417
1060 switch (topology) { 1418 switch (topology) {
1061 case BFA_PPORT_TOPOLOGY_P2P: 1419 case BFA_PPORT_TOPOLOGY_P2P:
@@ -1067,120 +1425,120 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1067 return BFA_STATUS_EINVAL; 1425 return BFA_STATUS_EINVAL;
1068 } 1426 }
1069 1427
1070 pport->cfg.topology = topology; 1428 fcport->cfg.topology = topology;
1071 return (BFA_STATUS_OK); 1429 return BFA_STATUS_OK;
1072} 1430}
1073 1431
1074/** 1432/**
1075 * Get current topology. 1433 * Get current topology.
1076 */ 1434 */
1077enum bfa_pport_topology 1435enum bfa_pport_topology
1078bfa_pport_get_topology(struct bfa_s *bfa) 1436bfa_fcport_get_topology(struct bfa_s *bfa)
1079{ 1437{
1080 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1438 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1081 1439
1082 return port->topology; 1440 return fcport->topology;
1083} 1441}
1084 1442
1085bfa_status_t 1443bfa_status_t
1086bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) 1444bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
1087{ 1445{
1088 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1446 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1089 1447
1090 bfa_trc(bfa, alpa); 1448 bfa_trc(bfa, alpa);
1091 bfa_trc(bfa, pport->cfg.cfg_hardalpa); 1449 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1092 bfa_trc(bfa, pport->cfg.hardalpa); 1450 bfa_trc(bfa, fcport->cfg.hardalpa);
1093 1451
1094 pport->cfg.cfg_hardalpa = BFA_TRUE; 1452 fcport->cfg.cfg_hardalpa = BFA_TRUE;
1095 pport->cfg.hardalpa = alpa; 1453 fcport->cfg.hardalpa = alpa;
1096 1454
1097 return (BFA_STATUS_OK); 1455 return BFA_STATUS_OK;
1098} 1456}
1099 1457
1100bfa_status_t 1458bfa_status_t
1101bfa_pport_clr_hardalpa(struct bfa_s *bfa) 1459bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
1102{ 1460{
1103 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1461 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1104 1462
1105 bfa_trc(bfa, pport->cfg.cfg_hardalpa); 1463 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1106 bfa_trc(bfa, pport->cfg.hardalpa); 1464 bfa_trc(bfa, fcport->cfg.hardalpa);
1107 1465
1108 pport->cfg.cfg_hardalpa = BFA_FALSE; 1466 fcport->cfg.cfg_hardalpa = BFA_FALSE;
1109 return (BFA_STATUS_OK); 1467 return BFA_STATUS_OK;
1110} 1468}
1111 1469
1112bfa_boolean_t 1470bfa_boolean_t
1113bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) 1471bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
1114{ 1472{
1115 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1473 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1116 1474
1117 *alpa = port->cfg.hardalpa; 1475 *alpa = fcport->cfg.hardalpa;
1118 return port->cfg.cfg_hardalpa; 1476 return fcport->cfg.cfg_hardalpa;
1119} 1477}
1120 1478
1121u8 1479u8
1122bfa_pport_get_myalpa(struct bfa_s *bfa) 1480bfa_fcport_get_myalpa(struct bfa_s *bfa)
1123{ 1481{
1124 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1482 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1125 1483
1126 return port->myalpa; 1484 return fcport->myalpa;
1127} 1485}
1128 1486
1129bfa_status_t 1487bfa_status_t
1130bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) 1488bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1131{ 1489{
1132 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1490 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1133 1491
1134 bfa_trc(bfa, maxfrsize); 1492 bfa_trc(bfa, maxfrsize);
1135 bfa_trc(bfa, pport->cfg.maxfrsize); 1493 bfa_trc(bfa, fcport->cfg.maxfrsize);
1136 1494
1137 /* 1495 /*
1138 * with in range 1496 * with in range
1139 */ 1497 */
1140 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) 1498 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
1141 return (BFA_STATUS_INVLD_DFSZ); 1499 return BFA_STATUS_INVLD_DFSZ;
1142 1500
1143 /* 1501 /*
1144 * power of 2, if not the max frame size of 2112 1502 * power of 2, if not the max frame size of 2112
1145 */ 1503 */
1146 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) 1504 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
1147 return (BFA_STATUS_INVLD_DFSZ); 1505 return BFA_STATUS_INVLD_DFSZ;
1148 1506
1149 pport->cfg.maxfrsize = maxfrsize; 1507 fcport->cfg.maxfrsize = maxfrsize;
1150 return (BFA_STATUS_OK); 1508 return BFA_STATUS_OK;
1151} 1509}
1152 1510
1153u16 1511u16
1154bfa_pport_get_maxfrsize(struct bfa_s *bfa) 1512bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
1155{ 1513{
1156 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1514 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1157 1515
1158 return port->cfg.maxfrsize; 1516 return fcport->cfg.maxfrsize;
1159} 1517}
1160 1518
1161u32 1519u32
1162bfa_pport_mypid(struct bfa_s *bfa) 1520bfa_fcport_mypid(struct bfa_s *bfa)
1163{ 1521{
1164 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1522 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1165 1523
1166 return port->mypid; 1524 return fcport->mypid;
1167} 1525}
1168 1526
1169u8 1527u8
1170bfa_pport_get_rx_bbcredit(struct bfa_s *bfa) 1528bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
1171{ 1529{
1172 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1530 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1173 1531
1174 return port->cfg.rx_bbcredit; 1532 return fcport->cfg.rx_bbcredit;
1175} 1533}
1176 1534
1177void 1535void
1178bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) 1536bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1179{ 1537{
1180 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1538 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1181 1539
1182 port->cfg.tx_bbcredit = (u8) tx_bbcredit; 1540 fcport->cfg.tx_bbcredit = (u8) tx_bbcredit;
1183 bfa_port_send_txcredit(port); 1541 bfa_fcport_send_txcredit(fcport);
1184} 1542}
1185 1543
1186/** 1544/**
@@ -1188,302 +1546,192 @@ bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1188 */ 1546 */
1189 1547
1190wwn_t 1548wwn_t
1191bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) 1549bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
1192{ 1550{
1193 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1551 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1194 if (node) 1552 if (node)
1195 return pport->nwwn; 1553 return fcport->nwwn;
1196 else 1554 else
1197 return pport->pwwn; 1555 return fcport->pwwn;
1198} 1556}
1199 1557
1200void 1558void
1201bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) 1559bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1202{ 1560{
1203 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1561 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1204 1562
1205 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s)); 1563 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1206 1564
1207 attr->nwwn = pport->nwwn; 1565 attr->nwwn = fcport->nwwn;
1208 attr->pwwn = pport->pwwn; 1566 attr->pwwn = fcport->pwwn;
1209 1567
1210 bfa_os_memcpy(&attr->pport_cfg, &pport->cfg, 1568 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
1211 sizeof(struct bfa_pport_cfg_s)); 1569 sizeof(struct bfa_pport_cfg_s));
1212 /* 1570 /*
1213 * speed attributes 1571 * speed attributes
1214 */ 1572 */
1215 attr->pport_cfg.speed = pport->cfg.speed; 1573 attr->pport_cfg.speed = fcport->cfg.speed;
1216 attr->speed_supported = pport->speed_sup; 1574 attr->speed_supported = fcport->speed_sup;
1217 attr->speed = pport->speed; 1575 attr->speed = fcport->speed;
1218 attr->cos_supported = FC_CLASS_3; 1576 attr->cos_supported = FC_CLASS_3;
1219 1577
1220 /* 1578 /*
1221 * topology attributes 1579 * topology attributes
1222 */ 1580 */
1223 attr->pport_cfg.topology = pport->cfg.topology; 1581 attr->pport_cfg.topology = fcport->cfg.topology;
1224 attr->topology = pport->topology; 1582 attr->topology = fcport->topology;
1225 1583
1226 /* 1584 /*
1227 * beacon attributes 1585 * beacon attributes
1228 */ 1586 */
1229 attr->beacon = pport->beacon; 1587 attr->beacon = fcport->beacon;
1230 attr->link_e2e_beacon = pport->link_e2e_beacon; 1588 attr->link_e2e_beacon = fcport->link_e2e_beacon;
1231 attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog); 1589 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
1232 1590
1233 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); 1591 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1234 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); 1592 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1235 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm); 1593 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm);
1236 if (bfa_ioc_is_disabled(&pport->bfa->ioc)) 1594 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
1237 attr->port_state = BFA_PPORT_ST_IOCDIS; 1595 attr->port_state = BFA_PPORT_ST_IOCDIS;
1238 else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc)) 1596 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
1239 attr->port_state = BFA_PPORT_ST_FWMISMATCH; 1597 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1240} 1598}
1241 1599
1242static void 1600#define BFA_FCPORT_STATS_TOV 1000
1243bfa_port_stats_query(void *cbarg)
1244{
1245 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1246 bfi_pport_get_stats_req_t *msg;
1247
1248 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1249
1250 if (!msg) {
1251 port->stats_qfull = BFA_TRUE;
1252 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
1253 port);
1254 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1255 return;
1256 }
1257 port->stats_qfull = BFA_FALSE;
1258
1259 bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
1260 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
1261 bfa_lpuid(port->bfa));
1262 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1263 1601
1264 return; 1602/**
1265} 1603 * Fetch port attributes (FCQoS or FCoE).
1266 1604 */
1267static void 1605bfa_status_t
1268bfa_port_stats_clear(void *cbarg) 1606bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1607 bfa_cb_pport_t cbfn, void *cbarg)
1269{ 1608{
1270 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1609 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1271 bfi_pport_clear_stats_req_t *msg;
1272
1273 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1274 1610
1275 if (!msg) { 1611 if (fcport->stats_busy) {
1276 port->stats_qfull = BFA_TRUE; 1612 bfa_trc(bfa, fcport->stats_busy);
1277 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear, 1613 return BFA_STATUS_DEVBUSY;
1278 port);
1279 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1280 return;
1281 } 1614 }
1282 port->stats_qfull = BFA_FALSE;
1283
1284 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
1285 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
1286 bfa_lpuid(port->bfa));
1287 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1288 return;
1289}
1290
1291static void
1292bfa_port_qos_stats_clear(void *cbarg)
1293{
1294 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1295 bfi_pport_clear_qos_stats_req_t *msg;
1296 1615
1297 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1616 fcport->stats_busy = BFA_TRUE;
1617 fcport->stats_ret = stats;
1618 fcport->stats_cbfn = cbfn;
1619 fcport->stats_cbarg = cbarg;
1298 1620
1299 if (!msg) { 1621 bfa_fcport_send_stats_get(fcport);
1300 port->stats_qfull = BFA_TRUE;
1301 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
1302 port);
1303 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1304 return;
1305 }
1306 port->stats_qfull = BFA_FALSE;
1307 1622
1308 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t)); 1623 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
1309 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ, 1624 fcport, BFA_FCPORT_STATS_TOV);
1310 bfa_lpuid(port->bfa)); 1625 return BFA_STATUS_OK;
1311 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1312 return;
1313}
1314
1315static void
1316bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
1317{
1318 u32 *dip = (u32 *) d;
1319 u32 *sip = (u32 *) s;
1320 int i;
1321
1322 /*
1323 * Do 64 bit fields swap first
1324 */
1325 for (i = 0;
1326 i <
1327 ((sizeof(union bfa_pport_stats_u) -
1328 sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
1329#ifdef __BIGENDIAN
1330 dip[i] = bfa_os_ntohl(sip[i]);
1331 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1332#else
1333 dip[i] = bfa_os_ntohl(sip[i + 1]);
1334 dip[i + 1] = bfa_os_ntohl(sip[i]);
1335#endif
1336 }
1337
1338 /*
1339 * Now swap the 32 bit fields
1340 */
1341 for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
1342 dip[i] = bfa_os_ntohl(sip[i]);
1343} 1626}
1344 1627
1345static void 1628/**
1346__bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete) 1629 * Reset port statistics (FCQoS or FCoE).
1630 */
1631bfa_status_t
1632bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1347{ 1633{
1348 struct bfa_pport_s *port = cbarg; 1634 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1349 1635
1350 if (complete) { 1636 if (fcport->stats_busy) {
1351 port->stats_cbfn(port->stats_cbarg, port->stats_status); 1637 bfa_trc(bfa, fcport->stats_busy);
1352 } else { 1638 return BFA_STATUS_DEVBUSY;
1353 port->stats_busy = BFA_FALSE;
1354 port->stats_status = BFA_STATUS_OK;
1355 } 1639 }
1356}
1357
1358static void
1359bfa_port_stats_clr_timeout(void *cbarg)
1360{
1361 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1362 1640
1363 bfa_trc(port->bfa, port->stats_qfull); 1641 fcport->stats_busy = BFA_TRUE;
1642 fcport->stats_cbfn = cbfn;
1643 fcport->stats_cbarg = cbarg;
1364 1644
1365 if (port->stats_qfull) { 1645 bfa_fcport_send_stats_clear(fcport);
1366 bfa_reqq_wcancel(&port->stats_reqq_wait);
1367 port->stats_qfull = BFA_FALSE;
1368 }
1369 1646
1370 port->stats_status = BFA_STATUS_ETIMER; 1647 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
1371 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port); 1648 fcport, BFA_FCPORT_STATS_TOV);
1649 return BFA_STATUS_OK;
1372} 1650}
1373 1651
1374static void 1652/**
1375__bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete) 1653 * Fetch FCQoS port statistics
1654 */
1655bfa_status_t
1656bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1657 bfa_cb_pport_t cbfn, void *cbarg)
1376{ 1658{
1377 struct bfa_pport_s *port = cbarg; 1659 /* Meaningful only for FC mode */
1660 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1378 1661
1379 if (complete) { 1662 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1380 if (port->stats_status == BFA_STATUS_OK)
1381 bfa_pport_stats_swap(port->stats_ret, port->stats);
1382 port->stats_cbfn(port->stats_cbarg, port->stats_status);
1383 } else {
1384 port->stats_busy = BFA_FALSE;
1385 port->stats_status = BFA_STATUS_OK;
1386 }
1387} 1663}
1388 1664
1389static void 1665/**
1390bfa_port_stats_timeout(void *cbarg) 1666 * Reset FCoE port statistics
1667 */
1668bfa_status_t
1669bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1391{ 1670{
1392 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1671 /* Meaningful only for FC mode */
1393 1672 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1394 bfa_trc(port->bfa, port->stats_qfull);
1395 1673
1396 if (port->stats_qfull) { 1674 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1397 bfa_reqq_wcancel(&port->stats_reqq_wait);
1398 port->stats_qfull = BFA_FALSE;
1399 }
1400
1401 port->stats_status = BFA_STATUS_ETIMER;
1402 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
1403} 1675}
1404 1676
1405#define BFA_PORT_STATS_TOV 1000
1406
1407/** 1677/**
1408 * Fetch port attributes. 1678 * Fetch FCQoS port statistics
1409 */ 1679 */
1410bfa_status_t 1680bfa_status_t
1411bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, 1681bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1412 bfa_cb_pport_t cbfn, void *cbarg) 1682 bfa_cb_pport_t cbfn, void *cbarg)
1413{ 1683{
1414 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1684 /* Meaningful only for FCoE mode */
1685 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1415 1686
1416 if (port->stats_busy) { 1687 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1417 bfa_trc(bfa, port->stats_busy);
1418 return (BFA_STATUS_DEVBUSY);
1419 }
1420
1421 port->stats_busy = BFA_TRUE;
1422 port->stats_ret = stats;
1423 port->stats_cbfn = cbfn;
1424 port->stats_cbarg = cbarg;
1425
1426 bfa_port_stats_query(port);
1427
1428 bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
1429 BFA_PORT_STATS_TOV);
1430 return (BFA_STATUS_OK);
1431} 1688}
1432 1689
1690/**
1691 * Reset FCoE port statistics
1692 */
1433bfa_status_t 1693bfa_status_t
1434bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) 1694bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1435{ 1695{
1436 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1696 /* Meaningful only for FCoE mode */
1437 1697 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1438 if (port->stats_busy) {
1439 bfa_trc(bfa, port->stats_busy);
1440 return (BFA_STATUS_DEVBUSY);
1441 }
1442
1443 port->stats_busy = BFA_TRUE;
1444 port->stats_cbfn = cbfn;
1445 port->stats_cbarg = cbarg;
1446
1447 bfa_port_stats_clear(port);
1448 1698
1449 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, 1699 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1450 BFA_PORT_STATS_TOV);
1451 return (BFA_STATUS_OK);
1452} 1700}
1453 1701
1454bfa_status_t 1702bfa_status_t
1455bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap) 1703bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
1456{ 1704{
1457 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1705 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1458 1706
1459 bfa_trc(bfa, bitmap); 1707 bfa_trc(bfa, bitmap);
1460 bfa_trc(bfa, pport->cfg.trunked); 1708 bfa_trc(bfa, fcport->cfg.trunked);
1461 bfa_trc(bfa, pport->cfg.trunk_ports); 1709 bfa_trc(bfa, fcport->cfg.trunk_ports);
1462 1710
1463 if (!bitmap || (bitmap & (bitmap - 1))) 1711 if (!bitmap || (bitmap & (bitmap - 1)))
1464 return BFA_STATUS_EINVAL; 1712 return BFA_STATUS_EINVAL;
1465 1713
1466 pport->cfg.trunked = BFA_TRUE; 1714 fcport->cfg.trunked = BFA_TRUE;
1467 pport->cfg.trunk_ports = bitmap; 1715 fcport->cfg.trunk_ports = bitmap;
1468 1716
1469 return BFA_STATUS_OK; 1717 return BFA_STATUS_OK;
1470} 1718}
1471 1719
1472void 1720void
1473bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) 1721bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
1474{ 1722{
1475 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1723 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1476 1724
1477 qos_attr->state = bfa_os_ntohl(pport->qos_attr.state); 1725 qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state);
1478 qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr); 1726 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
1479} 1727}
1480 1728
1481void 1729void
1482bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, 1730bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
1483 struct bfa_qos_vc_attr_s *qos_vc_attr) 1731 struct bfa_qos_vc_attr_s *qos_vc_attr)
1484{ 1732{
1485 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1733 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1486 struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr; 1734 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
1487 u32 i = 0; 1735 u32 i = 0;
1488 1736
1489 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); 1737 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
@@ -1506,166 +1754,136 @@ bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
1506} 1754}
1507 1755
1508/** 1756/**
1509 * Fetch QoS Stats.
1510 */
1511bfa_status_t
1512bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
1513 bfa_cb_pport_t cbfn, void *cbarg)
1514{
1515 /*
1516 * QoS stats is embedded in port stats
1517 */
1518 return (bfa_pport_get_stats(bfa, stats, cbfn, cbarg));
1519}
1520
1521bfa_status_t
1522bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1523{
1524 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1525
1526 if (port->stats_busy) {
1527 bfa_trc(bfa, port->stats_busy);
1528 return (BFA_STATUS_DEVBUSY);
1529 }
1530
1531 port->stats_busy = BFA_TRUE;
1532 port->stats_cbfn = cbfn;
1533 port->stats_cbarg = cbarg;
1534
1535 bfa_port_qos_stats_clear(port);
1536
1537 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
1538 BFA_PORT_STATS_TOV);
1539 return (BFA_STATUS_OK);
1540}
1541
1542/**
1543 * Fetch port attributes. 1757 * Fetch port attributes.
1544 */ 1758 */
1545bfa_status_t 1759bfa_status_t
1546bfa_pport_trunk_disable(struct bfa_s *bfa) 1760bfa_fcport_trunk_disable(struct bfa_s *bfa)
1547{ 1761{
1548 return (BFA_STATUS_OK); 1762 return BFA_STATUS_OK;
1549} 1763}
1550 1764
1551bfa_boolean_t 1765bfa_boolean_t
1552bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap) 1766bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
1553{ 1767{
1554 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1768 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1555 1769
1556 *bitmap = port->cfg.trunk_ports; 1770 *bitmap = fcport->cfg.trunk_ports;
1557 return port->cfg.trunked; 1771 return fcport->cfg.trunked;
1558} 1772}
1559 1773
1560bfa_boolean_t 1774bfa_boolean_t
1561bfa_pport_is_disabled(struct bfa_s *bfa) 1775bfa_fcport_is_disabled(struct bfa_s *bfa)
1562{ 1776{
1563 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1777 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1564 1778
1565 return (bfa_sm_to_state(hal_pport_sm_table, port->sm) == 1779 return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) ==
1566 BFA_PPORT_ST_DISABLED); 1780 BFA_PPORT_ST_DISABLED;
1567 1781
1568} 1782}
1569 1783
1570bfa_boolean_t 1784bfa_boolean_t
1571bfa_pport_is_ratelim(struct bfa_s *bfa) 1785bfa_fcport_is_ratelim(struct bfa_s *bfa)
1572{ 1786{
1573 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1787 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1574 1788
1575return (pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE); 1789 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
1576 1790
1577} 1791}
1578 1792
1579void 1793void
1580bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) 1794bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1581{ 1795{
1582 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1796 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1797 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
1583 1798
1584 bfa_trc(bfa, on_off); 1799 bfa_trc(bfa, on_off);
1585 bfa_trc(bfa, pport->cfg.qos_enabled); 1800 bfa_trc(bfa, fcport->cfg.qos_enabled);
1586 1801
1587 pport->cfg.qos_enabled = on_off; 1802 bfa_trc(bfa, ioc_type);
1803
1804 if (ioc_type == BFA_IOC_TYPE_FC)
1805 fcport->cfg.qos_enabled = on_off;
1588} 1806}
1589 1807
1590void 1808void
1591bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) 1809bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
1592{ 1810{
1593 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1811 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1594 1812
1595 bfa_trc(bfa, on_off); 1813 bfa_trc(bfa, on_off);
1596 bfa_trc(bfa, pport->cfg.ratelimit); 1814 bfa_trc(bfa, fcport->cfg.ratelimit);
1597 1815
1598 pport->cfg.ratelimit = on_off; 1816 fcport->cfg.ratelimit = on_off;
1599 if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN) 1817 if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
1600 pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS; 1818 fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
1601} 1819}
1602 1820
1603/** 1821/**
1604 * Configure default minimum ratelim speed 1822 * Configure default minimum ratelim speed
1605 */ 1823 */
1606bfa_status_t 1824bfa_status_t
1607bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) 1825bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1608{ 1826{
1609 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1827 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1610 1828
1611 bfa_trc(bfa, speed); 1829 bfa_trc(bfa, speed);
1612 1830
1613 /* 1831 /*
1614 * Auto and speeds greater than the supported speed, are invalid 1832 * Auto and speeds greater than the supported speed, are invalid
1615 */ 1833 */
1616 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) { 1834 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
1617 bfa_trc(bfa, pport->speed_sup); 1835 bfa_trc(bfa, fcport->speed_sup);
1618 return BFA_STATUS_UNSUPP_SPEED; 1836 return BFA_STATUS_UNSUPP_SPEED;
1619 } 1837 }
1620 1838
1621 pport->cfg.trl_def_speed = speed; 1839 fcport->cfg.trl_def_speed = speed;
1622 1840
1623 return (BFA_STATUS_OK); 1841 return BFA_STATUS_OK;
1624} 1842}
1625 1843
1626/** 1844/**
1627 * Get default minimum ratelim speed 1845 * Get default minimum ratelim speed
1628 */ 1846 */
1629enum bfa_pport_speed 1847enum bfa_pport_speed
1630bfa_pport_get_ratelim_speed(struct bfa_s *bfa) 1848bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
1631{ 1849{
1632 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1850 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1633 1851
1634 bfa_trc(bfa, pport->cfg.trl_def_speed); 1852 bfa_trc(bfa, fcport->cfg.trl_def_speed);
1635 return (pport->cfg.trl_def_speed); 1853 return fcport->cfg.trl_def_speed;
1636 1854
1637} 1855}
1638 1856
1639void 1857void
1640bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status) 1858bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
1641{ 1859{
1642 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1860 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1643 1861
1644 bfa_trc(bfa, status); 1862 bfa_trc(bfa, status);
1645 bfa_trc(bfa, pport->diag_busy); 1863 bfa_trc(bfa, fcport->diag_busy);
1646 1864
1647 pport->diag_busy = status; 1865 fcport->diag_busy = status;
1648} 1866}
1649 1867
1650void 1868void
1651bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, 1869bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
1652 bfa_boolean_t link_e2e_beacon) 1870 bfa_boolean_t link_e2e_beacon)
1653{ 1871{
1654 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1872 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1655 1873
1656 bfa_trc(bfa, beacon); 1874 bfa_trc(bfa, beacon);
1657 bfa_trc(bfa, link_e2e_beacon); 1875 bfa_trc(bfa, link_e2e_beacon);
1658 bfa_trc(bfa, pport->beacon); 1876 bfa_trc(bfa, fcport->beacon);
1659 bfa_trc(bfa, pport->link_e2e_beacon); 1877 bfa_trc(bfa, fcport->link_e2e_beacon);
1660 1878
1661 pport->beacon = beacon; 1879 fcport->beacon = beacon;
1662 pport->link_e2e_beacon = link_e2e_beacon; 1880 fcport->link_e2e_beacon = link_e2e_beacon;
1663} 1881}
1664 1882
1665bfa_boolean_t 1883bfa_boolean_t
1666bfa_pport_is_linkup(struct bfa_s *bfa) 1884bfa_fcport_is_linkup(struct bfa_s *bfa)
1667{ 1885{
1668 return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup); 1886 return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
1669} 1887}
1670 1888
1671 1889
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 7cb39a306ea9..3516172c597c 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -36,6 +36,7 @@
36 * FCS sub-modules 36 * FCS sub-modules
37 */ 37 */
38struct bfa_fcs_mod_s { 38struct bfa_fcs_mod_s {
39 void (*attach) (struct bfa_fcs_s *fcs);
39 void (*modinit) (struct bfa_fcs_s *fcs); 40 void (*modinit) (struct bfa_fcs_s *fcs);
40 void (*modexit) (struct bfa_fcs_s *fcs); 41 void (*modexit) (struct bfa_fcs_s *fcs);
41}; 42};
@@ -43,12 +44,10 @@ struct bfa_fcs_mod_s {
43#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } 44#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
44 45
45static struct bfa_fcs_mod_s fcs_modules[] = { 46static struct bfa_fcs_mod_s fcs_modules[] = {
46 BFA_FCS_MODULE(bfa_fcs_pport), 47 { bfa_fcs_pport_attach, NULL, NULL },
47 BFA_FCS_MODULE(bfa_fcs_uf), 48 { bfa_fcs_uf_attach, NULL, NULL },
48 BFA_FCS_MODULE(bfa_fcs_fabric), 49 { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
49 BFA_FCS_MODULE(bfa_fcs_vport), 50 bfa_fcs_fabric_modexit },
50 BFA_FCS_MODULE(bfa_fcs_rport),
51 BFA_FCS_MODULE(bfa_fcs_fcpim),
52}; 51};
53 52
54/** 53/**
@@ -71,16 +70,10 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
71 */ 70 */
72 71
73/** 72/**
74 * FCS instance initialization. 73 * fcs attach -- called once to initialize data structures at driver attach time
75 *
76 * param[in] fcs FCS instance
77 * param[in] bfa BFA instance
78 * param[in] bfad BFA driver instance
79 *
80 * return None
81 */ 74 */
82void 75void
83bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 76bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
84 bfa_boolean_t min_cfg) 77 bfa_boolean_t min_cfg)
85{ 78{
86 int i; 79 int i;
@@ -95,7 +88,24 @@ bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
95 88
96 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { 89 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
97 mod = &fcs_modules[i]; 90 mod = &fcs_modules[i];
98 mod->modinit(fcs); 91 if (mod->attach)
92 mod->attach(fcs);
93 }
94}
95
96/**
97 * fcs initialization, called once after bfa initialization is complete
98 */
99void
100bfa_fcs_init(struct bfa_fcs_s *fcs)
101{
102 int i;
103 struct bfa_fcs_mod_s *mod;
104
105 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
106 mod = &fcs_modules[i];
107 if (mod->modinit)
108 mod->modinit(fcs);
99 } 109 }
100} 110}
101 111
@@ -127,6 +137,23 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
127} 137}
128 138
129/** 139/**
140 * @brief
141 * FCS FDMI Driver Parameter Initialization
142 *
143 * @param[in] fcs FCS instance
144 * @param[in] fdmi_enable TRUE/FALSE
145 *
146 * @return None
147 */
148void
149bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
150{
151
152 fcs->fdmi_enabled = fdmi_enable;
153
154}
155
156/**
130 * FCS instance cleanup and exit. 157 * FCS instance cleanup and exit.
131 * 158 *
132 * param[in] fcs FCS instance 159 * param[in] fcs FCS instance
@@ -143,10 +170,12 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
143 nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); 170 nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
144 171
145 for (i = 0; i < nmods; i++) { 172 for (i = 0; i < nmods; i++) {
146 bfa_wc_up(&fcs->wc);
147 173
148 mod = &fcs_modules[i]; 174 mod = &fcs_modules[i];
149 mod->modexit(fcs); 175 if (mod->modexit) {
176 bfa_wc_up(&fcs->wc);
177 mod->modexit(fcs);
178 }
150 } 179 }
151 180
152 bfa_wc_wait(&fcs->wc); 181 bfa_wc_wait(&fcs->wc);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 8975ed041dc0..7c1251c682d8 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -114,7 +114,7 @@ bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
114 break; 114 break;
115 115
116 default: 116 default:
117 bfa_assert(0); 117 bfa_sm_fault(port->fcs, event);
118 } 118 }
119} 119}
120 120
@@ -136,7 +136,7 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
136 break; 136 break;
137 137
138 default: 138 default:
139 bfa_assert(0); 139 bfa_sm_fault(port->fcs, event);
140 } 140 }
141} 141}
142 142
@@ -176,7 +176,7 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
176 break; 176 break;
177 177
178 default: 178 default:
179 bfa_assert(0); 179 bfa_sm_fault(port->fcs, event);
180 } 180 }
181} 181}
182 182
@@ -214,7 +214,7 @@ bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
214 break; 214 break;
215 215
216 default: 216 default:
217 bfa_assert(0); 217 bfa_sm_fault(port->fcs, event);
218 } 218 }
219} 219}
220 220
@@ -234,7 +234,7 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
234 break; 234 break;
235 235
236 default: 236 default:
237 bfa_assert(0); 237 bfa_sm_fault(port->fcs, event);
238 } 238 }
239} 239}
240 240
@@ -263,30 +263,8 @@ bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
263 263
264 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); 264 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
265 265
266 switch (event) { 266 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
267 case BFA_LPORT_AEN_ONLINE: 267 role_str[role/2]);
268 bfa_log(logmod, BFA_AEN_LPORT_ONLINE, lpwwn_ptr,
269 role_str[role / 2]);
270 break;
271 case BFA_LPORT_AEN_OFFLINE:
272 bfa_log(logmod, BFA_AEN_LPORT_OFFLINE, lpwwn_ptr,
273 role_str[role / 2]);
274 break;
275 case BFA_LPORT_AEN_NEW:
276 bfa_log(logmod, BFA_AEN_LPORT_NEW, lpwwn_ptr,
277 role_str[role / 2]);
278 break;
279 case BFA_LPORT_AEN_DELETE:
280 bfa_log(logmod, BFA_AEN_LPORT_DELETE, lpwwn_ptr,
281 role_str[role / 2]);
282 break;
283 case BFA_LPORT_AEN_DISCONNECT:
284 bfa_log(logmod, BFA_AEN_LPORT_DISCONNECT, lpwwn_ptr,
285 role_str[role / 2]);
286 break;
287 default:
288 break;
289 }
290 268
291 aen_data.lport.vf_id = port->fabric->vf_id; 269 aen_data.lport.vf_id = port->fabric->vf_id;
292 aen_data.lport.roles = role; 270 aen_data.lport.roles = role;
@@ -568,11 +546,10 @@ bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port)
568 546
569 __port_action[port->fabric->fab_type].offline(port); 547 __port_action[port->fabric->fab_type].offline(port);
570 548
571 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) { 549 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
572 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT); 550 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
573 } else { 551 else
574 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE); 552 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE);
575 }
576 bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles, 553 bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles,
577 port->fabric->vf_drv, 554 port->fabric->vf_drv,
578 (port->vport == NULL) ? NULL : port->vport->vport_drv); 555 (port->vport == NULL) ? NULL : port->vport->vport_drv);
@@ -777,7 +754,7 @@ bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn)
777 } 754 }
778 755
779 bfa_trc(port->fcs, pwwn); 756 bfa_trc(port->fcs, pwwn);
780 return (NULL); 757 return NULL;
781} 758}
782 759
783/** 760/**
@@ -796,7 +773,7 @@ bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn)
796 } 773 }
797 774
798 bfa_trc(port->fcs, nwwn); 775 bfa_trc(port->fcs, nwwn);
799 return (NULL); 776 return NULL;
800} 777}
801 778
802/** 779/**
@@ -870,40 +847,50 @@ bfa_fcs_port_lip(struct bfa_fcs_port_s *port)
870bfa_boolean_t 847bfa_boolean_t
871bfa_fcs_port_is_online(struct bfa_fcs_port_s *port) 848bfa_fcs_port_is_online(struct bfa_fcs_port_s *port)
872{ 849{
873 return (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online)); 850 return bfa_sm_cmp_state(port, bfa_fcs_port_sm_online);
874} 851}
875 852
876/** 853/**
877 * Logical port initialization of base or virtual port. 854 * Attach time initialization of logical ports.
878 * Called by fabric for base port or by vport for virtual ports.
879 */ 855 */
880void 856void
881bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 857bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
882 u16 vf_id, struct bfa_port_cfg_s *port_cfg, 858 uint16_t vf_id, struct bfa_fcs_vport_s *vport)
883 struct bfa_fcs_vport_s *vport)
884{ 859{
885 lport->fcs = fcs; 860 lport->fcs = fcs;
886 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); 861 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
887 bfa_os_assign(lport->port_cfg, *port_cfg);
888 lport->vport = vport; 862 lport->vport = vport;
889 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : 863 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
890 bfa_lps_get_tag(lport->fabric->lps); 864 bfa_lps_get_tag(lport->fabric->lps);
891 865
892 INIT_LIST_HEAD(&lport->rport_q); 866 INIT_LIST_HEAD(&lport->rport_q);
893 lport->num_rports = 0; 867 lport->num_rports = 0;
868}
869
870/**
871 * Logical port initialization of base or virtual port.
872 * Called by fabric for base port or by vport for virtual ports.
873 */
894 874
895 lport->bfad_port = 875void
896 bfa_fcb_port_new(fcs->bfad, lport, lport->port_cfg.roles, 876bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
877 struct bfa_port_cfg_s *port_cfg)
878{
879 struct bfa_fcs_vport_s *vport = lport->vport;
880
881 bfa_os_assign(lport->port_cfg, *port_cfg);
882
883 lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport,
884 lport->port_cfg.roles,
897 lport->fabric->vf_drv, 885 lport->fabric->vf_drv,
898 vport ? vport->vport_drv : NULL); 886 vport ? vport->vport_drv : NULL);
887
899 bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW); 888 bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW);
900 889
901 bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit); 890 bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit);
902 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 891 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
903} 892}
904 893
905
906
907/** 894/**
908 * fcs_lport_api 895 * fcs_lport_api
909 */ 896 */
@@ -922,13 +909,20 @@ bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
922 if (port->fabric) { 909 if (port->fabric) {
923 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); 910 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
924 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); 911 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
912 port_attr->authfail =
913 bfa_fcs_fabric_is_auth_failed(port->fabric);
925 port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port); 914 port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port);
926 memcpy(port_attr->fabric_ip_addr, 915 memcpy(port_attr->fabric_ip_addr,
927 bfa_fcs_port_get_fabric_ipaddr(port), 916 bfa_fcs_port_get_fabric_ipaddr(port),
928 BFA_FCS_FABRIC_IPADDR_SZ); 917 BFA_FCS_FABRIC_IPADDR_SZ);
929 918
930 if (port->vport != NULL) 919 if (port->vport != NULL) {
931 port_attr->port_type = BFA_PPORT_TYPE_VPORT; 920 port_attr->port_type = BFA_PPORT_TYPE_VPORT;
921 port_attr->fpma_mac =
922 bfa_lps_get_lp_mac(port->vport->lps);
923 } else
924 port_attr->fpma_mac =
925 bfa_lps_get_lp_mac(port->fabric->lps);
932 926
933 } else { 927 } else {
934 port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN; 928 port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN;
diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c
index 9c4b24e62de1..3c27788cd527 100644
--- a/drivers/scsi/bfa/bfa_fcs_port.c
+++ b/drivers/scsi/bfa/bfa_fcs_port.c
@@ -55,14 +55,7 @@ bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
55} 55}
56 56
57void 57void
58bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs) 58bfa_fcs_pport_attach(struct bfa_fcs_s *fcs)
59{ 59{
60 bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, 60 bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs);
61 fcs);
62}
63
64void
65bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs)
66{
67 bfa_fcs_modexit_comp(fcs);
68} 61}
diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c
index ad01db6444b2..3d57d48bbae4 100644
--- a/drivers/scsi/bfa/bfa_fcs_uf.c
+++ b/drivers/scsi/bfa/bfa_fcs_uf.c
@@ -93,13 +93,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
93} 93}
94 94
95void 95void
96bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs) 96bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
97{ 97{
98 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); 98 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
99} 99}
100
101void
102bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs)
103{
104 bfa_fcs_modexit_comp(fcs);
105}
diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c
index 4754a0e9006a..cf0ad6782686 100644
--- a/drivers/scsi/bfa/bfa_fcxp.c
+++ b/drivers/scsi/bfa/bfa_fcxp.c
@@ -199,7 +199,7 @@ bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
199 if (fcxp) 199 if (fcxp)
200 list_add_tail(&fcxp->qe, &fm->fcxp_active_q); 200 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
201 201
202 return (fcxp); 202 return fcxp;
203} 203}
204 204
205static void 205static void
@@ -503,7 +503,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
503 503
504 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); 504 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
505 if (fcxp == NULL) 505 if (fcxp == NULL)
506 return (NULL); 506 return NULL;
507 507
508 bfa_trc(bfa, fcxp->fcxp_tag); 508 bfa_trc(bfa, fcxp->fcxp_tag);
509 509
@@ -568,7 +568,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
568 } 568 }
569 } 569 }
570 570
571 return (fcxp); 571 return fcxp;
572} 572}
573 573
574/** 574/**
@@ -709,7 +709,7 @@ bfa_status_t
709bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) 709bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
710{ 710{
711 bfa_assert(0); 711 bfa_assert(0);
712 return (BFA_STATUS_OK); 712 return BFA_STATUS_OK;
713} 713}
714 714
715void 715void
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index ede1438619e2..871a4e28575c 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -53,6 +53,18 @@ bfa_hwcb_reginit(struct bfa_s *bfa)
53} 53}
54 54
55void 55void
56bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
57{
58}
59
60static void
61bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
62{
63 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
64 __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
65}
66
67void
56bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq) 68bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
57{ 69{
58} 70}
@@ -136,6 +148,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
136void 148void
137bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) 149bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
138{ 150{
151 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
139 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; 152 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
140} 153}
141 154
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 51ae5740e6e9..76ceb9a4bf2f 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -85,6 +85,15 @@ bfa_hwct_reginit(struct bfa_s *bfa)
85} 85}
86 86
87void 87void
88bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
89{
90 u32 r32;
91
92 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
93 bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
94}
95
96void
88bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) 97bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
89{ 98{
90 u32 r32; 99 u32 r32;
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
index 0ca125712a04..0eba3f930d5b 100644
--- a/drivers/scsi/bfa/bfa_intr.c
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17#include <bfa.h> 17#include <bfa.h>
18#include <bfi/bfi_cbreg.h> 18#include <bfi/bfi_ctreg.h>
19#include <bfa_port_priv.h> 19#include <bfa_port_priv.h>
20#include <bfa_intr_priv.h> 20#include <bfa_intr_priv.h>
21#include <cs/bfa_debug.h> 21#include <cs/bfa_debug.h>
@@ -34,6 +34,26 @@ bfa_msix_lpu(struct bfa_s *bfa)
34 bfa_ioc_mbox_isr(&bfa->ioc); 34 bfa_ioc_mbox_isr(&bfa->ioc);
35} 35}
36 36
37static void
38bfa_reqq_resume(struct bfa_s *bfa, int qid)
39{
40 struct list_head *waitq, *qe, *qen;
41 struct bfa_reqq_wait_s *wqe;
42
43 waitq = bfa_reqq(bfa, qid);
44 list_for_each_safe(qe, qen, waitq) {
45 /**
46 * Callback only as long as there is room in request queue
47 */
48 if (bfa_reqq_full(bfa, qid))
49 break;
50
51 list_del(qe);
52 wqe = (struct bfa_reqq_wait_s *) qe;
53 wqe->qresume(wqe->cbarg);
54 }
55}
56
37void 57void
38bfa_msix_all(struct bfa_s *bfa, int vec) 58bfa_msix_all(struct bfa_s *bfa, int vec)
39{ 59{
@@ -59,7 +79,7 @@ bfa_intx(struct bfa_s *bfa)
59 qintr = intr & __HFN_INT_RME_MASK; 79 qintr = intr & __HFN_INT_RME_MASK;
60 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); 80 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
61 81
62 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue ++) { 82 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
63 if (intr & (__HFN_INT_RME_Q0 << queue)) 83 if (intr & (__HFN_INT_RME_Q0 << queue))
64 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); 84 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
65 } 85 }
@@ -96,7 +116,8 @@ bfa_isr_enable(struct bfa_s *bfa)
96 116
97 bfa_msix_install(bfa); 117 bfa_msix_install(bfa);
98 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 118 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
99 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); 119 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
120 __HFN_INT_LL_HALT);
100 121
101 if (pci_func == 0) 122 if (pci_func == 0)
102 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | 123 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
@@ -127,23 +148,18 @@ bfa_isr_disable(struct bfa_s *bfa)
127void 148void
128bfa_msix_reqq(struct bfa_s *bfa, int qid) 149bfa_msix_reqq(struct bfa_s *bfa, int qid)
129{ 150{
130 struct list_head *waitq, *qe, *qen; 151 struct list_head *waitq;
131 struct bfa_reqq_wait_s *wqe;
132 152
133 qid &= (BFI_IOC_MAX_CQS - 1); 153 qid &= (BFI_IOC_MAX_CQS - 1);
134 154
135 waitq = bfa_reqq(bfa, qid); 155 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
136 list_for_each_safe(qe, qen, waitq) {
137 /**
138 * Callback only as long as there is room in request queue
139 */
140 if (bfa_reqq_full(bfa, qid))
141 break;
142 156
143 list_del(qe); 157 /**
144 wqe = (struct bfa_reqq_wait_s *) qe; 158 * Resume any pending requests in the corresponding reqq.
145 wqe->qresume(wqe->cbarg); 159 */
146 } 160 waitq = bfa_reqq(bfa, qid);
161 if (!list_empty(waitq))
162 bfa_reqq_resume(bfa, qid);
147} 163}
148 164
149void 165void
@@ -157,26 +173,27 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
157} 173}
158 174
159void 175void
160bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid) 176bfa_msix_rspq(struct bfa_s *bfa, int qid)
161{ 177{
162 struct bfi_msg_s *m; 178 struct bfi_msg_s *m;
163 u32 pi, ci; 179 u32 pi, ci;
180 struct list_head *waitq;
164 181
165 bfa_trc_fp(bfa, rsp_qid); 182 bfa_trc_fp(bfa, qid);
166 183
167 rsp_qid &= (BFI_IOC_MAX_CQS - 1); 184 qid &= (BFI_IOC_MAX_CQS - 1);
168 185
169 bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid); 186 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
170 187
171 ci = bfa_rspq_ci(bfa, rsp_qid); 188 ci = bfa_rspq_ci(bfa, qid);
172 pi = bfa_rspq_pi(bfa, rsp_qid); 189 pi = bfa_rspq_pi(bfa, qid);
173 190
174 bfa_trc_fp(bfa, ci); 191 bfa_trc_fp(bfa, ci);
175 bfa_trc_fp(bfa, pi); 192 bfa_trc_fp(bfa, pi);
176 193
177 if (bfa->rme_process) { 194 if (bfa->rme_process) {
178 while (ci != pi) { 195 while (ci != pi) {
179 m = bfa_rspq_elem(bfa, rsp_qid, ci); 196 m = bfa_rspq_elem(bfa, qid, ci);
180 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); 197 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
181 198
182 bfa_isrs[m->mhdr.msg_class] (bfa, m); 199 bfa_isrs[m->mhdr.msg_class] (bfa, m);
@@ -188,25 +205,59 @@ bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
188 /** 205 /**
189 * update CI 206 * update CI
190 */ 207 */
191 bfa_rspq_ci(bfa, rsp_qid) = pi; 208 bfa_rspq_ci(bfa, qid) = pi;
192 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi); 209 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
193 bfa_os_mmiowb(); 210 bfa_os_mmiowb();
211
212 /**
213 * Resume any pending requests in the corresponding reqq.
214 */
215 waitq = bfa_reqq(bfa, qid);
216 if (!list_empty(waitq))
217 bfa_reqq_resume(bfa, qid);
194} 218}
195 219
196void 220void
197bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 221bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
198{ 222{
199 u32 intr; 223 u32 intr, curr_value;
200 224
201 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 225 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
202 226
203 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 227 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
204 bfa_msix_lpu(bfa); 228 bfa_msix_lpu(bfa);
205 229
206 if (intr & (__HFN_INT_ERR_EMC | 230 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
207 __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | 231 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
208 __HFN_INT_ERR_PSS)) 232
233 if (intr) {
234 if (intr & __HFN_INT_LL_HALT) {
235 /**
236 * If LL_HALT bit is set then FW Init Halt LL Port
237 * Register needs to be cleared as well so Interrupt
238 * Status Register will be cleared.
239 */
240 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
241 curr_value &= ~__FW_INIT_HALT_P;
242 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
243 }
244
245 if (intr & __HFN_INT_ERR_PSS) {
246 /**
247 * ERR_PSS bit needs to be cleared as well in case
248 * interrups are shared so driver's interrupt handler is
249 * still called eventhough it is already masked out.
250 */
251 curr_value = bfa_reg_read(
252 bfa->ioc.ioc_regs.pss_err_status_reg);
253 curr_value &= __PSS_ERR_STATUS_SET;
254 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
255 curr_value);
256 }
257
258 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
209 bfa_msix_errint(bfa, intr); 259 bfa_msix_errint(bfa, intr);
260 }
210} 261}
211 262
212void 263void
diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h
index 8ce6e6b105c8..5fc301cf4d1b 100644
--- a/drivers/scsi/bfa/bfa_intr_priv.h
+++ b/drivers/scsi/bfa/bfa_intr_priv.h
@@ -26,9 +26,9 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
26void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); 26void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
27 27
28 28
29#define bfa_reqq_pi(__bfa, __reqq) (__bfa)->iocfc.req_cq_pi[__reqq] 29#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq])
30#define bfa_reqq_ci(__bfa, __reqq) \ 30#define bfa_reqq_ci(__bfa, __reqq) \
31 *(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva) 31 (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
32 32
33#define bfa_reqq_full(__bfa, __reqq) \ 33#define bfa_reqq_full(__bfa, __reqq) \
34 (((bfa_reqq_pi(__bfa, __reqq) + 1) & \ 34 (((bfa_reqq_pi(__bfa, __reqq) + 1) & \
@@ -50,14 +50,16 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
50} while (0) 50} while (0)
51 51
52#define bfa_rspq_pi(__bfa, __rspq) \ 52#define bfa_rspq_pi(__bfa, __rspq) \
53 *(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva) 53 (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
54 54
55#define bfa_rspq_ci(__bfa, __rspq) (__bfa)->iocfc.rsp_cq_ci[__rspq] 55#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq])
56#define bfa_rspq_elem(__bfa, __rspq, __ci) \ 56#define bfa_rspq_elem(__bfa, __rspq, __ci) \
57 &((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci] 57 (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
58 58
59#define CQ_INCR(__index, __size) \ 59#define CQ_INCR(__index, __size) do { \
60 (__index)++; (__index) &= ((__size) - 1) 60 (__index)++; \
61 (__index) &= ((__size) - 1); \
62} while (0)
61 63
62/** 64/**
63 * Queue element to wait for room in request queue. FIFO order is 65 * Queue element to wait for room in request queue. FIFO order is
@@ -94,7 +96,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
94 wqe->cbarg = cbarg; 96 wqe->cbarg = cbarg;
95} 97}
96 98
97#define bfa_reqq(__bfa, __reqq) &(__bfa)->reqq_waitq[__reqq] 99#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
98 100
99/** 101/**
100 * static inline void 102 * static inline void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 149348934ce3..e038bc9769f6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -18,7 +18,7 @@
18#include <bfa.h> 18#include <bfa.h>
19#include <bfa_ioc.h> 19#include <bfa_ioc.h>
20#include <bfa_fwimg_priv.h> 20#include <bfa_fwimg_priv.h>
21#include <bfa_trcmod_priv.h> 21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h> 22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h> 23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h> 24#include <bfi/bfi_ctreg.h>
@@ -27,18 +27,17 @@
27#include <log/bfa_log_hal.h> 27#include <log/bfa_log_hal.h>
28#include <defs/bfa_defs_pci.h> 28#include <defs/bfa_defs_pci.h>
29 29
30BFA_TRC_FILE(HAL, IOC); 30BFA_TRC_FILE(CNA, IOC);
31 31
32/** 32/**
33 * IOC local definitions 33 * IOC local definitions
34 */ 34 */
35#define BFA_IOC_TOV 2000 /* msecs */ 35#define BFA_IOC_TOV 2000 /* msecs */
36#define BFA_IOC_HB_TOV 1000 /* msecs */ 36#define BFA_IOC_HWSEM_TOV 500 /* msecs */
37#define BFA_IOC_HB_FAIL_MAX 4 37#define BFA_IOC_HB_TOV 500 /* msecs */
38#define BFA_IOC_HWINIT_MAX 2 38#define BFA_IOC_HWINIT_MAX 2
39#define BFA_IOC_FWIMG_MINSZ (16 * 1024) 39#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
40#define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \ 40#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
41 + BFA_IOC_TOV)
42 41
43#define bfa_ioc_timer_start(__ioc) \ 42#define bfa_ioc_timer_start(__ioc) \
44 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
@@ -51,12 +50,25 @@ BFA_TRC_FILE(HAL, IOC);
51 (sizeof(struct bfa_trc_mod_s) - \ 50 (sizeof(struct bfa_trc_mod_s) - \
52 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 51 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
53#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 52#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
54#define bfa_ioc_stats(_ioc, _stats) (_ioc)->stats._stats ++
55 53
56#define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 54/**
57#define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
58#define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 56 */
59bfa_boolean_t bfa_auto_recover = BFA_FALSE; 57
58#define bfa_ioc_firmware_lock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60#define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62#define bfa_ioc_fwimg_get_chunk(__ioc, __off) \
63 ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off))
64#define bfa_ioc_fwimg_get_size(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc))
66#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
67#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
68#define bfa_ioc_notify_hbfail(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
70
71bfa_boolean_t bfa_auto_recover = BFA_TRUE;
60 72
61/* 73/*
62 * forward declarations 74 * forward declarations
@@ -64,7 +76,6 @@ bfa_boolean_t bfa_auto_recover = BFA_FALSE;
64static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa, 76static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
65 enum bfa_ioc_aen_event event); 77 enum bfa_ioc_aen_event event);
66static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 78static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
67static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
68static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); 79static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
69static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 80static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
70static void bfa_ioc_timeout(void *ioc); 81static void bfa_ioc_timeout(void *ioc);
@@ -77,8 +88,6 @@ static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
77static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 88static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
78static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 89static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
79static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 90static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
80static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc);
81static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc);
82static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 91static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
83static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 92static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
84 93
@@ -508,14 +517,19 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
508 bfa_trc(ioc, event); 517 bfa_trc(ioc, event);
509 518
510 switch (event) { 519 switch (event) {
511 case IOC_E_HWERROR:
512 case IOC_E_FWRSP_DISABLE: 520 case IOC_E_FWRSP_DISABLE:
513 bfa_ioc_timer_stop(ioc); 521 bfa_ioc_timer_stop(ioc);
522 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
523 break;
524
525 case IOC_E_HWERROR:
526 bfa_ioc_timer_stop(ioc);
514 /* 527 /*
515 * !!! fall through !!! 528 * !!! fall through !!!
516 */ 529 */
517 530
518 case IOC_E_TIMEOUT: 531 case IOC_E_TIMEOUT:
532 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 533 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
520 break; 534 break;
521 535
@@ -608,15 +622,12 @@ bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
608 * Mark IOC as failed in hardware and stop firmware. 622 * Mark IOC as failed in hardware and stop firmware.
609 */ 623 */
610 bfa_ioc_lpu_stop(ioc); 624 bfa_ioc_lpu_stop(ioc);
611 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL); 625 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
612 626
613 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { 627 /**
614 bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); 628 * Notify other functions on HB failure.
615 /* 629 */
616 * Wait for halt to take effect 630 bfa_ioc_notify_hbfail(ioc);
617 */
618 bfa_reg_read(ioc->ioc_regs.ll_halt);
619 }
620 631
621 /** 632 /**
622 * Notify driver and common modules registered for notification. 633 * Notify driver and common modules registered for notification.
@@ -672,6 +683,12 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
672 */ 683 */
673 break; 684 break;
674 685
686 case IOC_E_HWERROR:
687 /*
688 * HB failure notification, ignore.
689 */
690 break;
691
675 default: 692 default:
676 bfa_sm_fault(ioc, event); 693 bfa_sm_fault(ioc, event);
677 } 694 }
@@ -700,7 +717,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
700 } 717 }
701} 718}
702 719
703static void 720void
704bfa_ioc_sem_timeout(void *ioc_arg) 721bfa_ioc_sem_timeout(void *ioc_arg)
705{ 722{
706 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; 723 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
@@ -708,26 +725,32 @@ bfa_ioc_sem_timeout(void *ioc_arg)
708 bfa_ioc_hw_sem_get(ioc); 725 bfa_ioc_hw_sem_get(ioc);
709} 726}
710 727
711static void 728bfa_boolean_t
712bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc) 729bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
713{ 730{
714 u32 r32; 731 u32 r32;
715 int cnt = 0; 732 int cnt = 0;
716#define BFA_SEM_SPINCNT 1000 733#define BFA_SEM_SPINCNT 3000
717 734
718 do { 735 r32 = bfa_reg_read(sem_reg);
719 r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg); 736
737 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
720 cnt++; 738 cnt++;
721 if (cnt > BFA_SEM_SPINCNT) 739 bfa_os_udelay(2);
722 break; 740 r32 = bfa_reg_read(sem_reg);
723 } while (r32 != 0); 741 }
742
743 if (r32 == 0)
744 return BFA_TRUE;
745
724 bfa_assert(cnt < BFA_SEM_SPINCNT); 746 bfa_assert(cnt < BFA_SEM_SPINCNT);
747 return BFA_FALSE;
725} 748}
726 749
727static void 750void
728bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc) 751bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
729{ 752{
730 bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1); 753 bfa_reg_write(sem_reg, 1);
731} 754}
732 755
733static void 756static void
@@ -737,7 +760,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
737 760
738 /** 761 /**
739 * First read to the semaphore register will return 0, subsequent reads 762 * First read to the semaphore register will return 0, subsequent reads
740 * will return 1. Semaphore is released by writing 0 to the register 763 * will return 1. Semaphore is released by writing 1 to the register
741 */ 764 */
742 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 765 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
743 if (r32 == 0) { 766 if (r32 == 0) {
@@ -746,10 +769,10 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
746 } 769 }
747 770
748 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, 771 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
749 ioc, BFA_IOC_TOV); 772 ioc, BFA_IOC_HWSEM_TOV);
750} 773}
751 774
752static void 775void
753bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) 776bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
754{ 777{
755 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); 778 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
@@ -828,7 +851,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
828/** 851/**
829 * Get driver and firmware versions. 852 * Get driver and firmware versions.
830 */ 853 */
831static void 854void
832bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 855bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
833{ 856{
834 u32 pgnum, pgoff; 857 u32 pgnum, pgoff;
@@ -847,24 +870,10 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
847 } 870 }
848} 871}
849 872
850static u32 *
851bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
852{
853 if (ioc->ctdev)
854 return bfi_image_ct_get_chunk(off);
855 return bfi_image_cb_get_chunk(off);
856}
857
858static u32
859bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc)
860{
861return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size;
862}
863
864/** 873/**
865 * Returns TRUE if same. 874 * Returns TRUE if same.
866 */ 875 */
867static bfa_boolean_t 876bfa_boolean_t
868bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 877bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
869{ 878{
870 struct bfi_ioc_image_hdr_s *drv_fwhdr; 879 struct bfi_ioc_image_hdr_s *drv_fwhdr;
@@ -921,95 +930,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
921} 930}
922 931
923/** 932/**
924 * Return true if firmware of current driver matches the running firmware.
925 */
926static bfa_boolean_t
927bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc)
928{
929 enum bfi_ioc_state ioc_fwstate;
930 u32 usecnt;
931 struct bfi_ioc_image_hdr_s fwhdr;
932
933 /**
934 * Firmware match check is relevant only for CNA.
935 */
936 if (!ioc->cna)
937 return BFA_TRUE;
938
939 /**
940 * If bios boot (flash based) -- do not increment usage count
941 */
942 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
943 return BFA_TRUE;
944
945 bfa_ioc_usage_sem_get(ioc);
946 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
947
948 /**
949 * If usage count is 0, always return TRUE.
950 */
951 if (usecnt == 0) {
952 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
953 bfa_ioc_usage_sem_release(ioc);
954 bfa_trc(ioc, usecnt);
955 return BFA_TRUE;
956 }
957
958 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
959 bfa_trc(ioc, ioc_fwstate);
960
961 /**
962 * Use count cannot be non-zero and chip in uninitialized state.
963 */
964 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
965
966 /**
967 * Check if another driver with a different firmware is active
968 */
969 bfa_ioc_fwver_get(ioc, &fwhdr);
970 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
971 bfa_ioc_usage_sem_release(ioc);
972 bfa_trc(ioc, usecnt);
973 return BFA_FALSE;
974 }
975
976 /**
977 * Same firmware version. Increment the reference count.
978 */
979 usecnt++;
980 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
981 bfa_ioc_usage_sem_release(ioc);
982 bfa_trc(ioc, usecnt);
983 return BFA_TRUE;
984}
985
986static void
987bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc)
988{
989 u32 usecnt;
990
991 /**
992 * Firmware lock is relevant only for CNA.
993 * If bios boot (flash based) -- do not decrement usage count
994 */
995 if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ))
996 return;
997
998 /**
999 * decrement usage count
1000 */
1001 bfa_ioc_usage_sem_get(ioc);
1002 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
1003 bfa_assert(usecnt > 0);
1004
1005 usecnt--;
1006 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
1007 bfa_trc(ioc, usecnt);
1008
1009 bfa_ioc_usage_sem_release(ioc);
1010}
1011
1012/**
1013 * Conditionally flush any pending message from firmware at start. 933 * Conditionally flush any pending message from firmware at start.
1014 */ 934 */
1015static void 935static void
@@ -1152,33 +1072,27 @@ bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1152static void 1072static void
1153bfa_ioc_hb_check(void *cbarg) 1073bfa_ioc_hb_check(void *cbarg)
1154{ 1074{
1155 struct bfa_ioc_s *ioc = cbarg; 1075 struct bfa_ioc_s *ioc = cbarg;
1156 u32 hb_count; 1076 u32 hb_count;
1157 1077
1158 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1078 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1159 if (ioc->hb_count == hb_count) { 1079 if (ioc->hb_count == hb_count) {
1160 ioc->hb_fail++; 1080 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
1161 } else { 1081 hb_count);
1162 ioc->hb_count = hb_count;
1163 ioc->hb_fail = 0;
1164 }
1165
1166 if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) {
1167 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count);
1168 ioc->hb_fail = 0;
1169 bfa_ioc_recover(ioc); 1082 bfa_ioc_recover(ioc);
1170 return; 1083 return;
1084 } else {
1085 ioc->hb_count = hb_count;
1171 } 1086 }
1172 1087
1173 bfa_ioc_mbox_poll(ioc); 1088 bfa_ioc_mbox_poll(ioc);
1174 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1089 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
1175 BFA_IOC_HB_TOV); 1090 ioc, BFA_IOC_HB_TOV);
1176} 1091}
1177 1092
1178static void 1093static void
1179bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) 1094bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1180{ 1095{
1181 ioc->hb_fail = 0;
1182 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1096 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1183 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1097 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1184 BFA_IOC_HB_TOV); 1098 BFA_IOC_HB_TOV);
@@ -1191,112 +1105,6 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1191} 1105}
1192 1106
1193/** 1107/**
1194 * Host to LPU mailbox message addresses
1195 */
1196static struct {
1197 u32 hfn_mbox, lpu_mbox, hfn_pgn;
1198} iocreg_fnreg[] = {
1199 {
1200 HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, {
1201 HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, {
1202 HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, {
1203 HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3}
1204};
1205
1206/**
1207 * Host <-> LPU mailbox command/status registers - port 0
1208 */
1209static struct {
1210 u32 hfn, lpu;
1211} iocreg_mbcmd_p0[] = {
1212 {
1213 HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, {
1214 HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, {
1215 HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, {
1216 HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT}
1217};
1218
1219/**
1220 * Host <-> LPU mailbox command/status registers - port 1
1221 */
1222static struct {
1223 u32 hfn, lpu;
1224} iocreg_mbcmd_p1[] = {
1225 {
1226 HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, {
1227 HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, {
1228 HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, {
1229 HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT}
1230};
1231
1232/**
1233 * Shared IRQ handling in INTX mode
1234 */
1235static struct {
1236 u32 isr, msk;
1237} iocreg_shirq_next[] = {
1238 {
1239 HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, {
1240 HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, {
1241 HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, {
1242HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},};
1243
1244static void
1245bfa_ioc_reg_init(struct bfa_ioc_s *ioc)
1246{
1247 bfa_os_addr_t rb;
1248 int pcifn = bfa_ioc_pcifn(ioc);
1249
1250 rb = bfa_ioc_bar0(ioc);
1251
1252 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
1253 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
1254 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
1255
1256 if (ioc->port_id == 0) {
1257 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
1258 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
1259 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
1260 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
1261 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
1262 } else {
1263 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
1264 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
1265 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
1266 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
1267 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
1268 }
1269
1270 /**
1271 * Shared IRQ handling in INTX mode
1272 */
1273 ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr;
1274 ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk;
1275
1276 /*
1277 * PSS control registers
1278 */
1279 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
1280 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
1281 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
1282
1283 /*
1284 * IOC semaphore registers and serialization
1285 */
1286 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
1287 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
1288 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
1289
1290 /**
1291 * sram memory access
1292 */
1293 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
1294 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
1295 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT)
1296 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
1297}
1298
1299/**
1300 * Initiate a full firmware download. 1108 * Initiate a full firmware download.
1301 */ 1109 */
1302static void 1110static void
@@ -1321,9 +1129,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1321 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 1129 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
1322 boot_type = BFI_BOOT_TYPE_FLASH; 1130 boot_type = BFI_BOOT_TYPE_FLASH;
1323 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno); 1131 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
1324 fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type);
1325 fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] =
1326 bfa_os_swap32(boot_param);
1327 1132
1328 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1133 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1329 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1134 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
@@ -1332,17 +1137,17 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1332 1137
1333 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { 1138 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
1334 1139
1335 if (BFA_FLASH_CHUNK_NO(i) != chunkno) { 1140 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1336 chunkno = BFA_FLASH_CHUNK_NO(i); 1141 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1337 fwimg = bfa_ioc_fwimg_get_chunk(ioc, 1142 fwimg = bfa_ioc_fwimg_get_chunk(ioc,
1338 BFA_FLASH_CHUNK_ADDR(chunkno)); 1143 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1339 } 1144 }
1340 1145
1341 /** 1146 /**
1342 * write smem 1147 * write smem
1343 */ 1148 */
1344 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 1149 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1345 fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]); 1150 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1346 1151
1347 loff += sizeof(u32); 1152 loff += sizeof(u32);
1348 1153
@@ -1358,6 +1163,14 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1358 1163
1359 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1164 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1360 bfa_ioc_smem_pgnum(ioc, 0)); 1165 bfa_ioc_smem_pgnum(ioc, 0));
1166
1167 /*
1168 * Set boot type and boot param at the end.
1169 */
1170 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1171 bfa_os_swap32(boot_type));
1172 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
1173 bfa_os_swap32(boot_param));
1361} 1174}
1362 1175
1363static void 1176static void
@@ -1440,168 +1253,10 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1440} 1253}
1441 1254
1442/** 1255/**
1443 * Initialize IOC to port mapping.
1444 */
1445
1446#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
1447static void
1448bfa_ioc_map_port(struct bfa_ioc_s *ioc)
1449{
1450 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1451 u32 r32;
1452
1453 /**
1454 * For crossbow, port id is same as pci function.
1455 */
1456 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) {
1457 ioc->port_id = bfa_ioc_pcifn(ioc);
1458 return;
1459 }
1460
1461 /**
1462 * For catapult, base port id on personality register and IOC type
1463 */
1464 r32 = bfa_reg_read(rb + FNC_PERS_REG);
1465 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
1466 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
1467
1468 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
1469 bfa_trc(ioc, ioc->port_id);
1470}
1471
1472
1473
1474/**
1475 * bfa_ioc_public 1256 * bfa_ioc_public
1476 */ 1257 */
1477 1258
1478/** 1259/**
1479* Set interrupt mode for a function: INTX or MSIX
1480 */
1481void
1482bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
1483{
1484 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1485 u32 r32, mode;
1486
1487 r32 = bfa_reg_read(rb + FNC_PERS_REG);
1488 bfa_trc(ioc, r32);
1489
1490 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
1491 __F0_INTX_STATUS;
1492
1493 /**
1494 * If already in desired mode, do not change anything
1495 */
1496 if (!msix && mode)
1497 return;
1498
1499 if (msix)
1500 mode = __F0_INTX_STATUS_MSIX;
1501 else
1502 mode = __F0_INTX_STATUS_INTA;
1503
1504 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
1505 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
1506 bfa_trc(ioc, r32);
1507
1508 bfa_reg_write(rb + FNC_PERS_REG, r32);
1509}
1510
1511bfa_status_t
1512bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1513{
1514 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1515 u32 pll_sclk, pll_fclk, r32;
1516
1517 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
1518 pll_sclk =
1519 __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
1520 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) |
1521 __APP_PLL_312_JITLMT0_1(3U) |
1522 __APP_PLL_312_CNTLMT0_1(1U);
1523 pll_fclk =
1524 __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
1525 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) |
1526 __APP_PLL_425_JITLMT0_1(3U) |
1527 __APP_PLL_425_CNTLMT0_1(1U);
1528
1529 /**
1530 * For catapult, choose operational mode FC/FCoE
1531 */
1532 if (ioc->fcmode) {
1533 bfa_reg_write((rb + OP_MODE), 0);
1534 bfa_reg_write((rb + ETH_MAC_SER_REG),
1535 __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2
1536 | __APP_EMS_CHANNEL_SEL);
1537 } else {
1538 ioc->pllinit = BFA_TRUE;
1539 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
1540 bfa_reg_write((rb + ETH_MAC_SER_REG),
1541 __APP_EMS_REFCKBUFEN1);
1542 }
1543 } else {
1544 pll_sclk =
1545 __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
1546 __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
1547 __APP_PLL_312_CNTLMT0_1(3U);
1548 pll_fclk =
1549 __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
1550 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
1551 __APP_PLL_425_JITLMT0_1(3U) |
1552 __APP_PLL_425_CNTLMT0_1(3U);
1553 }
1554
1555 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
1556 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
1557
1558 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
1559 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
1560 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
1561 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
1562 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
1563 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
1564
1565 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1566 __APP_PLL_312_LOGIC_SOFT_RESET);
1567 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1568 __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET);
1569 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1570 __APP_PLL_425_LOGIC_SOFT_RESET);
1571 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1572 __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET);
1573 bfa_os_udelay(2);
1574 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1575 __APP_PLL_312_LOGIC_SOFT_RESET);
1576 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1577 __APP_PLL_425_LOGIC_SOFT_RESET);
1578
1579 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1580 pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET);
1581 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1582 pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET);
1583
1584 /**
1585 * Wait for PLLs to lock.
1586 */
1587 bfa_os_udelay(2000);
1588 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
1589 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
1590
1591 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
1592 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
1593
1594 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
1595 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
1596 bfa_os_udelay(1000);
1597 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
1598 bfa_trc(ioc, r32);
1599 }
1600
1601 return BFA_STATUS_OK;
1602}
1603
1604/**
1605 * Interface used by diag module to do firmware boot with memory test 1260 * Interface used by diag module to do firmware boot with memory test
1606 * as the entry vector. 1261 * as the entry vector.
1607 */ 1262 */
@@ -1642,7 +1297,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1642void 1297void
1643bfa_ioc_auto_recover(bfa_boolean_t auto_recover) 1298bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1644{ 1299{
1645 bfa_auto_recover = BFA_FALSE; 1300 bfa_auto_recover = auto_recover;
1646} 1301}
1647 1302
1648 1303
@@ -1764,6 +1419,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1764 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); 1419 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
1765 ioc->cna = ioc->ctdev && !ioc->fcmode; 1420 ioc->cna = ioc->ctdev && !ioc->fcmode;
1766 1421
1422 /**
1423 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
1424 */
1425 if (ioc->ctdev)
1426 bfa_ioc_set_ct_hwif(ioc);
1427 else
1428 bfa_ioc_set_cb_hwif(ioc);
1429
1767 bfa_ioc_map_port(ioc); 1430 bfa_ioc_map_port(ioc);
1768 bfa_ioc_reg_init(ioc); 1431 bfa_ioc_reg_init(ioc);
1769} 1432}
@@ -1830,7 +1493,6 @@ return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1830void 1493void
1831bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) 1494bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1832{ 1495{
1833 bfa_assert(ioc->auto_recover);
1834 ioc->dbg_fwsave = dbg_fwsave; 1496 ioc->dbg_fwsave = dbg_fwsave;
1835 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); 1497 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1836} 1498}
@@ -1953,8 +1615,8 @@ bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1953bfa_boolean_t 1615bfa_boolean_t
1954bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) 1616bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1955{ 1617{
1956 return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) 1618 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
1957 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)); 1619 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1958} 1620}
1959 1621
1960/** 1622/**
@@ -1963,9 +1625,9 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1963bfa_boolean_t 1625bfa_boolean_t
1964bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) 1626bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1965{ 1627{
1966 return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) 1628 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
1967 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) 1629 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
1968 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch)); 1630 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
1969} 1631}
1970 1632
1971#define bfa_ioc_state_disabled(__sm) \ 1633#define bfa_ioc_state_disabled(__sm) \
@@ -1973,7 +1635,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1973 ((__sm) == BFI_IOC_INITING) || \ 1635 ((__sm) == BFI_IOC_INITING) || \
1974 ((__sm) == BFI_IOC_HWINIT) || \ 1636 ((__sm) == BFI_IOC_HWINIT) || \
1975 ((__sm) == BFI_IOC_DISABLED) || \ 1637 ((__sm) == BFI_IOC_DISABLED) || \
1976 ((__sm) == BFI_IOC_HBFAIL) || \ 1638 ((__sm) == BFI_IOC_FAIL) || \
1977 ((__sm) == BFI_IOC_CFG_DISABLED)) 1639 ((__sm) == BFI_IOC_CFG_DISABLED))
1978 1640
1979/** 1641/**
@@ -2017,46 +1679,28 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2017 struct bfa_adapter_attr_s *ad_attr) 1679 struct bfa_adapter_attr_s *ad_attr)
2018{ 1680{
2019 struct bfi_ioc_attr_s *ioc_attr; 1681 struct bfi_ioc_attr_s *ioc_attr;
2020 char model[BFA_ADAPTER_MODEL_NAME_LEN];
2021 1682
2022 ioc_attr = ioc->attr; 1683 ioc_attr = ioc->attr;
2023 bfa_os_memcpy((void *)&ad_attr->serial_num, 1684
2024 (void *)ioc_attr->brcd_serialnum, 1685 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2025 BFA_ADAPTER_SERIAL_NUM_LEN); 1686 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2026 1687 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2027 bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN); 1688 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2028 bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version,
2029 BFA_VERSION_LEN);
2030 bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME,
2031 BFA_ADAPTER_MFG_NAME_LEN);
2032 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, 1689 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2033 sizeof(struct bfa_mfg_vpd_s)); 1690 sizeof(struct bfa_mfg_vpd_s));
2034 1691
2035 ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop); 1692 ad_attr->nports = bfa_ioc_get_nports(ioc);
2036 ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop); 1693 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2037 1694
2038 /** 1695 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2039 * model name 1696 /* For now, model descr uses same model string */
2040 */ 1697 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2041 if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) {
2042 strcpy(model, "BR-10?0");
2043 model[5] = '0' + ad_attr->nports;
2044 } else {
2045 strcpy(model, "Brocade-??5");
2046 model[8] =
2047 '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
2048 model[9] = '0' + ad_attr->nports;
2049 }
2050 1698
2051 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 1699 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2052 ad_attr->prototype = 1; 1700 ad_attr->prototype = 1;
2053 else 1701 else
2054 ad_attr->prototype = 0; 1702 ad_attr->prototype = 0;
2055 1703
2056 bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN);
2057 bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model,
2058 BFA_ADAPTER_MODEL_NAME_LEN);
2059
2060 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 1704 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2061 ad_attr->mac = bfa_ioc_get_mac(ioc); 1705 ad_attr->mac = bfa_ioc_get_mac(ioc);
2062 1706
@@ -2064,41 +1708,122 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2064 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 1708 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2065 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 1709 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2066 ad_attr->asic_rev = ioc_attr->asic_rev; 1710 ad_attr->asic_rev = ioc_attr->asic_rev;
2067 ad_attr->hw_ver[0] = 'R'; 1711
2068 ad_attr->hw_ver[1] = 'e'; 1712 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2069 ad_attr->hw_ver[2] = 'v';
2070 ad_attr->hw_ver[3] = '-';
2071 ad_attr->hw_ver[4] = ioc_attr->asic_rev;
2072 ad_attr->hw_ver[5] = '\0';
2073 1713
2074 ad_attr->cna_capable = ioc->cna; 1714 ad_attr->cna_capable = ioc->cna;
2075} 1715}
2076 1716
1717enum bfa_ioc_type_e
1718bfa_ioc_get_type(struct bfa_ioc_s *ioc)
1719{
1720 if (!ioc->ctdev || ioc->fcmode)
1721 return BFA_IOC_TYPE_FC;
1722 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1723 return BFA_IOC_TYPE_FCoE;
1724 else if (ioc->ioc_mc == BFI_MC_LL)
1725 return BFA_IOC_TYPE_LL;
1726 else {
1727 bfa_assert(ioc->ioc_mc == BFI_MC_LL);
1728 return BFA_IOC_TYPE_LL;
1729 }
1730}
1731
1732void
1733bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
1734{
1735 bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1736 bfa_os_memcpy((void *)serial_num,
1737 (void *)ioc->attr->brcd_serialnum,
1738 BFA_ADAPTER_SERIAL_NUM_LEN);
1739}
1740
1741void
1742bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
1743{
1744 bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
1745 bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1746}
1747
1748void
1749bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
1750{
1751 bfa_assert(chip_rev);
1752
1753 bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1754
1755 chip_rev[0] = 'R';
1756 chip_rev[1] = 'e';
1757 chip_rev[2] = 'v';
1758 chip_rev[3] = '-';
1759 chip_rev[4] = ioc->attr->asic_rev;
1760 chip_rev[5] = '\0';
1761}
1762
1763void
1764bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1765{
1766 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1767 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1768 BFA_VERSION_LEN);
1769}
1770
1771void
1772bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1773{
1774 bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1775 bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1776}
1777
1778void
1779bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1780{
1781 struct bfi_ioc_attr_s *ioc_attr;
1782 u8 nports;
1783 u8 max_speed;
1784
1785 bfa_assert(model);
1786 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1787
1788 ioc_attr = ioc->attr;
1789
1790 nports = bfa_ioc_get_nports(ioc);
1791 max_speed = bfa_ioc_speed_sup(ioc);
1792
1793 /**
1794 * model name
1795 */
1796 if (max_speed == 10) {
1797 strcpy(model, "BR-10?0");
1798 model[5] = '0' + nports;
1799 } else {
1800 strcpy(model, "Brocade-??5");
1801 model[8] = '0' + max_speed;
1802 model[9] = '0' + nports;
1803 }
1804}
1805
1806enum bfa_ioc_state
1807bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1808{
1809 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1810}
1811
2077void 1812void
2078bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) 1813bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2079{ 1814{
2080 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); 1815 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2081 1816
2082 ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 1817 ioc_attr->state = bfa_ioc_get_state(ioc);
2083 ioc_attr->port_id = ioc->port_id; 1818 ioc_attr->port_id = ioc->port_id;
2084 1819
2085 if (!ioc->ctdev) 1820 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2086 ioc_attr->ioc_type = BFA_IOC_TYPE_FC;
2087 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2088 ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE;
2089 else if (ioc->ioc_mc == BFI_MC_LL)
2090 ioc_attr->ioc_type = BFA_IOC_TYPE_LL;
2091 1821
2092 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 1822 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2093 1823
2094 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; 1824 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2095 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; 1825 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2096 ioc_attr->pci_attr.chip_rev[0] = 'R'; 1826 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2097 ioc_attr->pci_attr.chip_rev[1] = 'e';
2098 ioc_attr->pci_attr.chip_rev[2] = 'v';
2099 ioc_attr->pci_attr.chip_rev[3] = '-';
2100 ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev;
2101 ioc_attr->pci_attr.chip_rev[5] = '\0';
2102} 1827}
2103 1828
2104/** 1829/**
@@ -2195,29 +1920,6 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2195} 1920}
2196 1921
2197/** 1922/**
2198 * Return true if interrupt should be claimed.
2199 */
2200bfa_boolean_t
2201bfa_ioc_intx_claim(struct bfa_ioc_s *ioc)
2202{
2203 u32 isr, msk;
2204
2205 /**
2206 * Always claim if not catapult.
2207 */
2208 if (!ioc->ctdev)
2209 return BFA_TRUE;
2210
2211 /**
2212 * FALSE if next device is claiming interrupt.
2213 * TRUE if next device is not interrupting or not present.
2214 */
2215 msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next);
2216 isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next);
2217 return !(isr & ~msk);
2218}
2219
2220/**
2221 * Send AEN notification 1923 * Send AEN notification
2222 */ 1924 */
2223static void 1925static void
@@ -2226,32 +1928,14 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2226 union bfa_aen_data_u aen_data; 1928 union bfa_aen_data_u aen_data;
2227 struct bfa_log_mod_s *logmod = ioc->logm; 1929 struct bfa_log_mod_s *logmod = ioc->logm;
2228 s32 inst_num = 0; 1930 s32 inst_num = 0;
2229 struct bfa_ioc_attr_s ioc_attr; 1931 enum bfa_ioc_type_e ioc_type;
2230 1932
2231 switch (event) { 1933 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
2232 case BFA_IOC_AEN_HBGOOD:
2233 bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num);
2234 break;
2235 case BFA_IOC_AEN_HBFAIL:
2236 bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num);
2237 break;
2238 case BFA_IOC_AEN_ENABLE:
2239 bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num);
2240 break;
2241 case BFA_IOC_AEN_DISABLE:
2242 bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num);
2243 break;
2244 case BFA_IOC_AEN_FWMISMATCH:
2245 bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num);
2246 break;
2247 default:
2248 break;
2249 }
2250 1934
2251 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); 1935 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
2252 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); 1936 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
2253 bfa_ioc_get_attr(ioc, &ioc_attr); 1937 ioc_type = bfa_ioc_get_type(ioc);
2254 switch (ioc_attr.ioc_type) { 1938 switch (ioc_type) {
2255 case BFA_IOC_TYPE_FC: 1939 case BFA_IOC_TYPE_FC:
2256 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); 1940 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
2257 break; 1941 break;
@@ -2263,10 +1947,10 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2263 aen_data.ioc.mac = bfa_ioc_get_mac(ioc); 1947 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2264 break; 1948 break;
2265 default: 1949 default:
2266 bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC); 1950 bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
2267 break; 1951 break;
2268 } 1952 }
2269 aen_data.ioc.ioc_type = ioc_attr.ioc_type; 1953 aen_data.ioc.ioc_type = ioc_type;
2270} 1954}
2271 1955
2272/** 1956/**
@@ -2290,6 +1974,15 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2290} 1974}
2291 1975
2292/** 1976/**
1977 * Clear saved firmware trace
1978 */
1979void
1980bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1981{
1982 ioc->dbg_fwsave_once = BFA_TRUE;
1983}
1984
1985/**
2293 * Retrieve saved firmware trace from a prior IOC failure. 1986 * Retrieve saved firmware trace from a prior IOC failure.
2294 */ 1987 */
2295bfa_status_t 1988bfa_status_t
@@ -2304,6 +1997,13 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2304 1997
2305 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1998 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2306 loff = bfa_ioc_smem_pgoff(ioc, loff); 1999 loff = bfa_ioc_smem_pgoff(ioc, loff);
2000
2001 /*
2002 * Hold semaphore to serialize pll init and fwtrc.
2003 */
2004 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
2005 return BFA_STATUS_FAILED;
2006
2307 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 2007 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2308 2008
2309 tlen = *trclen; 2009 tlen = *trclen;
@@ -2329,6 +2029,12 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2329 } 2029 }
2330 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 2030 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2331 bfa_ioc_smem_pgnum(ioc, 0)); 2031 bfa_ioc_smem_pgnum(ioc, 0));
2032
2033 /*
2034 * release semaphore.
2035 */
2036 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2037
2332 bfa_trc(ioc, pgnum); 2038 bfa_trc(ioc, pgnum);
2333 2039
2334 *trclen = tlen * sizeof(u32); 2040 *trclen = tlen * sizeof(u32);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 58efd4b13143..d0804406ea1a 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -74,15 +74,18 @@ struct bfa_ioc_regs_s {
74 bfa_os_addr_t lpu_mbox_cmd; 74 bfa_os_addr_t lpu_mbox_cmd;
75 bfa_os_addr_t lpu_mbox; 75 bfa_os_addr_t lpu_mbox;
76 bfa_os_addr_t pss_ctl_reg; 76 bfa_os_addr_t pss_ctl_reg;
77 bfa_os_addr_t pss_err_status_reg;
77 bfa_os_addr_t app_pll_fast_ctl_reg; 78 bfa_os_addr_t app_pll_fast_ctl_reg;
78 bfa_os_addr_t app_pll_slow_ctl_reg; 79 bfa_os_addr_t app_pll_slow_ctl_reg;
79 bfa_os_addr_t ioc_sem_reg; 80 bfa_os_addr_t ioc_sem_reg;
80 bfa_os_addr_t ioc_usage_sem_reg; 81 bfa_os_addr_t ioc_usage_sem_reg;
82 bfa_os_addr_t ioc_init_sem_reg;
81 bfa_os_addr_t ioc_usage_reg; 83 bfa_os_addr_t ioc_usage_reg;
82 bfa_os_addr_t host_page_num_fn; 84 bfa_os_addr_t host_page_num_fn;
83 bfa_os_addr_t heartbeat; 85 bfa_os_addr_t heartbeat;
84 bfa_os_addr_t ioc_fwstate; 86 bfa_os_addr_t ioc_fwstate;
85 bfa_os_addr_t ll_halt; 87 bfa_os_addr_t ll_halt;
88 bfa_os_addr_t err_set;
86 bfa_os_addr_t shirq_isr_next; 89 bfa_os_addr_t shirq_isr_next;
87 bfa_os_addr_t shirq_msk_next; 90 bfa_os_addr_t shirq_msk_next;
88 bfa_os_addr_t smem_page_start; 91 bfa_os_addr_t smem_page_start;
@@ -154,7 +157,6 @@ struct bfa_ioc_s {
154 struct bfa_timer_s ioc_timer; 157 struct bfa_timer_s ioc_timer;
155 struct bfa_timer_s sem_timer; 158 struct bfa_timer_s sem_timer;
156 u32 hb_count; 159 u32 hb_count;
157 u32 hb_fail;
158 u32 retry_count; 160 u32 retry_count;
159 struct list_head hb_notify_q; 161 struct list_head hb_notify_q;
160 void *dbg_fwsave; 162 void *dbg_fwsave;
@@ -177,20 +179,45 @@ struct bfa_ioc_s {
177 struct bfi_ioc_attr_s *attr; 179 struct bfi_ioc_attr_s *attr;
178 struct bfa_ioc_cbfn_s *cbfn; 180 struct bfa_ioc_cbfn_s *cbfn;
179 struct bfa_ioc_mbox_mod_s mbox_mod; 181 struct bfa_ioc_mbox_mod_s mbox_mod;
182 struct bfa_ioc_hwif_s *ioc_hwif;
180}; 183};
181 184
182#define bfa_ioc_pcifn(__ioc) (__ioc)->pcidev.pci_func 185struct bfa_ioc_hwif_s {
183#define bfa_ioc_devid(__ioc) (__ioc)->pcidev.device_id 186 bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc);
184#define bfa_ioc_bar0(__ioc) (__ioc)->pcidev.pci_bar_kva 187 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
188 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
189 u32 * (*ioc_fwimg_get_chunk) (struct bfa_ioc_s *ioc,
190 u32 off);
191 u32 (*ioc_fwimg_get_size) (struct bfa_ioc_s *ioc);
192 void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
193 void (*ioc_map_port) (struct bfa_ioc_s *ioc);
194 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
195 bfa_boolean_t msix);
196 void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc);
197 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
198};
199
200#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
201#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
202#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
185#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) 203#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
186#define bfa_ioc_fetch_stats(__ioc, __stats) \ 204#define bfa_ioc_fetch_stats(__ioc, __stats) \
187 ((__stats)->drv_stats) = (__ioc)->stats 205 (((__stats)->drv_stats) = (__ioc)->stats)
188#define bfa_ioc_clr_stats(__ioc) \ 206#define bfa_ioc_clr_stats(__ioc) \
189 bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) 207 bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
190#define bfa_ioc_maxfrsize(__ioc) (__ioc)->attr->maxfrsize 208#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
191#define bfa_ioc_rx_bbcredit(__ioc) (__ioc)->attr->rx_bbcredit 209#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
192#define bfa_ioc_speed_sup(__ioc) \ 210#define bfa_ioc_speed_sup(__ioc) \
193 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) 211 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
212#define bfa_ioc_get_nports(__ioc) \
213 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
214
215#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
216#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
217
218#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
219#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
220#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
194 221
195/** 222/**
196 * IOC mailbox interface 223 * IOC mailbox interface
@@ -207,6 +234,14 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
207/** 234/**
208 * IOC interfaces 235 * IOC interfaces
209 */ 236 */
237#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc))
238#define bfa_ioc_isr_mode_set(__ioc, __msix) \
239 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
240#define bfa_ioc_ownership_reset(__ioc) \
241 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
242
243void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
244void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
210void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, 245void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
211 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod, 246 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod,
212 struct bfa_trc_mod_s *trcmod, 247 struct bfa_trc_mod_s *trcmod,
@@ -223,13 +258,21 @@ bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
223void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param); 258void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param);
224void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); 259void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
225void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); 260void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
226void bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t intx);
227bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
228bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); 261bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
229bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); 262bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
230bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); 263bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
231bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); 264bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
232void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc); 265void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
266enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
267void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
268void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
269void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
270void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
271void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
272 char *manufacturer);
273void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
274enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
275
233void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr); 276void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
234void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, 277void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
235 struct bfa_adapter_attr_s *ad_attr); 278 struct bfa_adapter_attr_s *ad_attr);
@@ -237,6 +280,7 @@ int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
237void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave); 280void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
238bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, 281bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
239 int *trclen); 282 int *trclen);
283void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
240bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, 284bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
241 int *trclen); 285 int *trclen);
242u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr); 286u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
@@ -245,6 +289,13 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
245bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); 289bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
246void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 290void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
247 struct bfa_ioc_hbfail_notify_s *notify); 291 struct bfa_ioc_hbfail_notify_s *notify);
292bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg);
293void bfa_ioc_sem_release(bfa_os_addr_t sem_reg);
294void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
295void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
296 struct bfi_ioc_image_hdr_s *fwhdr);
297bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
298 struct bfi_ioc_image_hdr_s *fwhdr);
248 299
249/* 300/*
250 * bfa mfg wwn API functions 301 * bfa mfg wwn API functions
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
new file mode 100644
index 000000000000..3ce85319f739
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -0,0 +1,274 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_ioc.h>
20#include <bfa_fwimg_priv.h>
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_cbreg.h>
25#include <log/bfa_log_hal.h>
26#include <defs/bfa_defs_pci.h>
27
28BFA_TRC_FILE(CNA, IOC_CB);
29
30/*
31 * forward declarations
32 */
33static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
36static u32 *bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off);
37static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc);
38static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
39static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
40static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
41static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
42static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
43
44struct bfa_ioc_hwif_s hwif_cb = {
45 bfa_ioc_cb_pll_init,
46 bfa_ioc_cb_firmware_lock,
47 bfa_ioc_cb_firmware_unlock,
48 bfa_ioc_cb_fwimg_get_chunk,
49 bfa_ioc_cb_fwimg_get_size,
50 bfa_ioc_cb_reg_init,
51 bfa_ioc_cb_map_port,
52 bfa_ioc_cb_isr_mode_set,
53 bfa_ioc_cb_notify_hbfail,
54 bfa_ioc_cb_ownership_reset,
55};
56
57/**
58 * Called from bfa_ioc_attach() to map asic specific calls.
59 */
60void
61bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
62{
63 ioc->ioc_hwif = &hwif_cb;
64}
65
66static u32 *
67bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
68{
69 return bfi_image_cb_get_chunk(off);
70}
71
72static u32
73bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc)
74{
75 return bfi_image_cb_size;
76}
77
78/**
79 * Return true if firmware of current driver matches the running firmware.
80 */
81static bfa_boolean_t
82bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
83{
84 return BFA_TRUE;
85}
86
87static void
88bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
89{
90}
91
92/**
93 * Notify other functions on HB failure.
94 */
95static void
96bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
97{
98 bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
99 bfa_reg_read(ioc->ioc_regs.err_set);
100}
101
102/**
103 * Host to LPU mailbox message addresses
104 */
105static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
106 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
107 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
108};
109
110/**
111 * Host <-> LPU mailbox command/status registers
112 */
113static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
114 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
115 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
116};
117
118static void
119bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
120{
121 bfa_os_addr_t rb;
122 int pcifn = bfa_ioc_pcifn(ioc);
123
124 rb = bfa_ioc_bar0(ioc);
125
126 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
127 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
128 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
129
130 if (ioc->port_id == 0) {
131 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
132 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
133 } else {
134 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
135 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
136 }
137
138 /**
139 * Host <-> LPU mailbox command/status registers
140 */
141 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
142 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
143
144 /*
145 * PSS control registers
146 */
147 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
148 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
149 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG);
150 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG);
151
152 /*
153 * IOC semaphore registers and serialization
154 */
155 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
156 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
157
158 /**
159 * sram memory access
160 */
161 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
162 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
163
164 /*
165 * err set reg : for notification of hb failure
166 */
167 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
168}
169
170/**
171 * Initialize IOC to port mapping.
172 */
173static void
174bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
175{
176 /**
177 * For crossbow, port id is same as pci function.
178 */
179 ioc->port_id = bfa_ioc_pcifn(ioc);
180 bfa_trc(ioc, ioc->port_id);
181}
182
183/**
184 * Set interrupt mode for a function: INTX or MSIX
185 */
186static void
187bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
188{
189}
190
191static bfa_status_t
192bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc)
193{
194 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
195 u32 pll_sclk, pll_fclk;
196
197 /*
198 * Hold semaphore so that nobody can access the chip during init.
199 */
200 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
201
202 pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
203 __APP_PLL_212_P0_1(3U) |
204 __APP_PLL_212_JITLMT0_1(3U) |
205 __APP_PLL_212_CNTLMT0_1(3U);
206 pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
207 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
208 __APP_PLL_400_JITLMT0_1(3U) |
209 __APP_PLL_400_CNTLMT0_1(3U);
210
211 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
212 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
213
214 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
215 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
216 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
217 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
218 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
219 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
220
221 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
222 __APP_PLL_212_LOGIC_SOFT_RESET);
223 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
224 __APP_PLL_212_BYPASS |
225 __APP_PLL_212_LOGIC_SOFT_RESET);
226 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
227 __APP_PLL_400_LOGIC_SOFT_RESET);
228 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
229 __APP_PLL_400_BYPASS |
230 __APP_PLL_400_LOGIC_SOFT_RESET);
231 bfa_os_udelay(2);
232 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
233 __APP_PLL_212_LOGIC_SOFT_RESET);
234 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
235 __APP_PLL_400_LOGIC_SOFT_RESET);
236
237 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
238 pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
239 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
240 pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
241
242 /**
243 * Wait for PLLs to lock.
244 */
245 bfa_os_udelay(2000);
246 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
247 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
248
249 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
250 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
251
252 /*
253 * release semaphore.
254 */
255 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
256
257 return BFA_STATUS_OK;
258}
259
260/**
261 * Cleanup hw semaphore and usecnt registers
262 */
263static void
264bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
265{
266
267 /*
268 * Read the hw sem reg to make sure that it is locked
269 * before we clear it. If it is not locked, writing 1
270 * will lock it instead of clearing it.
271 */
272 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
273 bfa_ioc_hw_sem_release(ioc);
274}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
new file mode 100644
index 000000000000..20b58ad5f95c
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -0,0 +1,423 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_ioc.h>
20#include <bfa_fwimg_priv.h>
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h>
25#include <log/bfa_log_hal.h>
26#include <defs/bfa_defs_pci.h>
27
28BFA_TRC_FILE(CNA, IOC_CT);
29
30/*
31 * forward declarations
32 */
33static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
36static u32* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc,
37 u32 off);
38static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc);
39static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
40static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
42static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44
45struct bfa_ioc_hwif_s hwif_ct = {
46 bfa_ioc_ct_pll_init,
47 bfa_ioc_ct_firmware_lock,
48 bfa_ioc_ct_firmware_unlock,
49 bfa_ioc_ct_fwimg_get_chunk,
50 bfa_ioc_ct_fwimg_get_size,
51 bfa_ioc_ct_reg_init,
52 bfa_ioc_ct_map_port,
53 bfa_ioc_ct_isr_mode_set,
54 bfa_ioc_ct_notify_hbfail,
55 bfa_ioc_ct_ownership_reset,
56};
57
58/**
59 * Called from bfa_ioc_attach() to map asic specific calls.
60 */
61void
62bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
63{
64 ioc->ioc_hwif = &hwif_ct;
65}
66
67static u32*
68bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
69{
70 return bfi_image_ct_get_chunk(off);
71}
72
73static u32
74bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc)
75{
76 return bfi_image_ct_size;
77}
78
79/**
80 * Return true if firmware of current driver matches the running firmware.
81 */
82static bfa_boolean_t
83bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
84{
85 enum bfi_ioc_state ioc_fwstate;
86 u32 usecnt;
87 struct bfi_ioc_image_hdr_s fwhdr;
88
89 /**
90 * Firmware match check is relevant only for CNA.
91 */
92 if (!ioc->cna)
93 return BFA_TRUE;
94
95 /**
96 * If bios boot (flash based) -- do not increment usage count
97 */
98 if (bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
99 return BFA_TRUE;
100
101 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
102 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
103
104 /**
105 * If usage count is 0, always return TRUE.
106 */
107 if (usecnt == 0) {
108 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
109 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
110 bfa_trc(ioc, usecnt);
111 return BFA_TRUE;
112 }
113
114 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
115 bfa_trc(ioc, ioc_fwstate);
116
117 /**
118 * Use count cannot be non-zero and chip in uninitialized state.
119 */
120 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
121
122 /**
123 * Check if another driver with a different firmware is active
124 */
125 bfa_ioc_fwver_get(ioc, &fwhdr);
126 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
127 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
128 bfa_trc(ioc, usecnt);
129 return BFA_FALSE;
130 }
131
132 /**
133 * Same firmware version. Increment the reference count.
134 */
135 usecnt++;
136 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
137 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
138 bfa_trc(ioc, usecnt);
139 return BFA_TRUE;
140}
141
142static void
143bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
144{
145 u32 usecnt;
146
147 /**
148 * Firmware lock is relevant only for CNA.
149 * If bios boot (flash based) -- do not decrement usage count
150 */
151 if (!ioc->cna || bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
152 return;
153
154 /**
155 * decrement usage count
156 */
157 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
158 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
159 bfa_assert(usecnt > 0);
160
161 usecnt--;
162 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
163 bfa_trc(ioc, usecnt);
164
165 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
166}
167
168/**
169 * Notify other functions on HB failure.
170 */
171static void
172bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
173{
174 if (ioc->cna) {
175 bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
176 /* Wait for halt to take effect */
177 bfa_reg_read(ioc->ioc_regs.ll_halt);
178 } else {
179 bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
180 bfa_reg_read(ioc->ioc_regs.err_set);
181 }
182}
183
184/**
185 * Host to LPU mailbox message addresses
186 */
187static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
188 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
189 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
190 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
191 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
192};
193
194/**
195 * Host <-> LPU mailbox command/status registers - port 0
196 */
197static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
198 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
199 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
200 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
201 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
202};
203
204/**
205 * Host <-> LPU mailbox command/status registers - port 1
206 */
207static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
208 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
209 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
210 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
211 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
212};
213
214static void
215bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
216{
217 bfa_os_addr_t rb;
218 int pcifn = bfa_ioc_pcifn(ioc);
219
220 rb = bfa_ioc_bar0(ioc);
221
222 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
223 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
224 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
225
226 if (ioc->port_id == 0) {
227 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
228 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
229 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
230 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
231 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
232 } else {
233 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
234 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
235 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
236 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
237 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
238 }
239
240 /*
241 * PSS control registers
242 */
243 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
244 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
245 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
246 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
247
248 /*
249 * IOC semaphore registers and serialization
250 */
251 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
252 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
253 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
254 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
255
256 /**
257 * sram memory access
258 */
259 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
260 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
261
262 /*
263 * err set reg : for notification of hb failure in fcmode
264 */
265 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
266}
267
268/**
269 * Initialize IOC to port mapping.
270 */
271
272#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
273static void
274bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
275{
276 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
277 u32 r32;
278
279 /**
280 * For catapult, base port id on personality register and IOC type
281 */
282 r32 = bfa_reg_read(rb + FNC_PERS_REG);
283 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
284 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
285
286 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
287 bfa_trc(ioc, ioc->port_id);
288}
289
290/**
291 * Set interrupt mode for a function: INTX or MSIX
292 */
293static void
294bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
295{
296 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
297 u32 r32, mode;
298
299 r32 = bfa_reg_read(rb + FNC_PERS_REG);
300 bfa_trc(ioc, r32);
301
302 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
303 __F0_INTX_STATUS;
304
305 /**
306 * If already in desired mode, do not change anything
307 */
308 if (!msix && mode)
309 return;
310
311 if (msix)
312 mode = __F0_INTX_STATUS_MSIX;
313 else
314 mode = __F0_INTX_STATUS_INTA;
315
316 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
317 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
318 bfa_trc(ioc, r32);
319
320 bfa_reg_write(rb + FNC_PERS_REG, r32);
321}
322
323static bfa_status_t
324bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
325{
326 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
327 u32 pll_sclk, pll_fclk, r32;
328
329 /*
330 * Hold semaphore so that nobody can access the chip during init.
331 */
332 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
333
334 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
335 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
336 __APP_PLL_312_JITLMT0_1(3U) |
337 __APP_PLL_312_CNTLMT0_1(1U);
338 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
339 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
340 __APP_PLL_425_JITLMT0_1(3U) |
341 __APP_PLL_425_CNTLMT0_1(1U);
342
343 /**
344 * For catapult, choose operational mode FC/FCoE
345 */
346 if (ioc->fcmode) {
347 bfa_reg_write((rb + OP_MODE), 0);
348 bfa_reg_write((rb + ETH_MAC_SER_REG),
349 __APP_EMS_CMLCKSEL |
350 __APP_EMS_REFCKBUFEN2 |
351 __APP_EMS_CHANNEL_SEL);
352 } else {
353 ioc->pllinit = BFA_TRUE;
354 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
355 bfa_reg_write((rb + ETH_MAC_SER_REG),
356 __APP_EMS_REFCKBUFEN1);
357 }
358
359 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
360 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
361
362 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
363 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
364 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
365 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
366 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
367 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
368
369 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
370 __APP_PLL_312_LOGIC_SOFT_RESET);
371 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
372 __APP_PLL_425_LOGIC_SOFT_RESET);
373 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
374 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
375 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
376 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
377
378 /**
379 * Wait for PLLs to lock.
380 */
381 bfa_reg_read(rb + HOSTFN0_INT_MSK);
382 bfa_os_udelay(2000);
383 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
384 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
385
386 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
387 __APP_PLL_312_ENABLE);
388 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
389 __APP_PLL_425_ENABLE);
390
391 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
392 bfa_os_udelay(1000);
393 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
394 bfa_trc(ioc, r32);
395 /*
396 * release semaphore.
397 */
398 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
399
400 return BFA_STATUS_OK;
401}
402
403/**
404 * Cleanup hw semaphore and usecnt registers
405 */
406static void
407bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
408{
409
410 if (ioc->cna) {
411 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
412 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
413 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
414 }
415
416 /*
417 * Read the hw sem reg to make sure that it is locked
418 * before we clear it. If it is not locked, writing 1
419 * will lock it instead of clearing it.
420 */
421 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
422 bfa_ioc_hw_sem_release(ioc);
423}
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
index 12350b022d63..a76de2669bfc 100644
--- a/drivers/scsi/bfa/bfa_iocfc.c
+++ b/drivers/scsi/bfa/bfa_iocfc.c
@@ -172,6 +172,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
172 */ 172 */
173 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) { 173 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) {
174 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 174 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
175 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
175 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 176 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
176 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 177 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
177 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; 178 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
@@ -180,6 +181,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
180 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 181 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
181 } else { 182 } else {
182 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 183 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
184 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
183 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 185 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
184 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 186 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
185 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; 187 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
@@ -336,8 +338,10 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
336 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 338 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
337 else 339 else
338 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 340 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
339 } else 341 } else {
340 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 342 if (bfa->iocfc.cfgdone)
343 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
344 }
341} 345}
342 346
343static void 347static void
@@ -619,8 +623,6 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
619 623
620 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod, 624 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
621 bfa->trcmod, bfa->aen, bfa->logm); 625 bfa->trcmod, bfa->aen, bfa->logm);
622 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
623 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
624 626
625 /** 627 /**
626 * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode. 628 * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode.
@@ -628,6 +630,9 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
628 if (0) 630 if (0)
629 bfa_ioc_set_fcmode(&bfa->ioc); 631 bfa_ioc_set_fcmode(&bfa->ioc);
630 632
633 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
634 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
635
631 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 636 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
632 bfa_iocfc_mem_claim(bfa, cfg, meminfo); 637 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
633 bfa_timer_init(&bfa->timer_mod); 638 bfa_timer_init(&bfa->timer_mod);
@@ -654,7 +659,6 @@ bfa_iocfc_init(struct bfa_s *bfa)
654{ 659{
655 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 660 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
656 bfa_ioc_enable(&bfa->ioc); 661 bfa_ioc_enable(&bfa->ioc);
657 bfa_msix_install(bfa);
658} 662}
659 663
660/** 664/**
@@ -794,7 +798,12 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
794 798
795 if (iocfc->stats_busy) { 799 if (iocfc->stats_busy) {
796 bfa_trc(bfa, iocfc->stats_busy); 800 bfa_trc(bfa, iocfc->stats_busy);
797 return (BFA_STATUS_DEVBUSY); 801 return BFA_STATUS_DEVBUSY;
802 }
803
804 if (!bfa_iocfc_is_operational(bfa)) {
805 bfa_trc(bfa, 0);
806 return BFA_STATUS_IOC_NON_OP;
798 } 807 }
799 808
800 iocfc->stats_busy = BFA_TRUE; 809 iocfc->stats_busy = BFA_TRUE;
@@ -804,7 +813,7 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
804 813
805 bfa_iocfc_stats_query(bfa); 814 bfa_iocfc_stats_query(bfa);
806 815
807 return (BFA_STATUS_OK); 816 return BFA_STATUS_OK;
808} 817}
809 818
810bfa_status_t 819bfa_status_t
@@ -814,7 +823,12 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
814 823
815 if (iocfc->stats_busy) { 824 if (iocfc->stats_busy) {
816 bfa_trc(bfa, iocfc->stats_busy); 825 bfa_trc(bfa, iocfc->stats_busy);
817 return (BFA_STATUS_DEVBUSY); 826 return BFA_STATUS_DEVBUSY;
827 }
828
829 if (!bfa_iocfc_is_operational(bfa)) {
830 bfa_trc(bfa, 0);
831 return BFA_STATUS_IOC_NON_OP;
818 } 832 }
819 833
820 iocfc->stats_busy = BFA_TRUE; 834 iocfc->stats_busy = BFA_TRUE;
@@ -822,7 +836,7 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
822 iocfc->stats_cbarg = cbarg; 836 iocfc->stats_cbarg = cbarg;
823 837
824 bfa_iocfc_stats_clear(bfa); 838 bfa_iocfc_stats_clear(bfa);
825 return (BFA_STATUS_OK); 839 return BFA_STATUS_OK;
826} 840}
827 841
828/** 842/**
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
index 7ad177ed4cfc..fbb4bdc9d600 100644
--- a/drivers/scsi/bfa/bfa_iocfc.h
+++ b/drivers/scsi/bfa/bfa_iocfc.h
@@ -54,6 +54,7 @@ struct bfa_msix_s {
54 */ 54 */
55struct bfa_hwif_s { 55struct bfa_hwif_s {
56 void (*hw_reginit)(struct bfa_s *bfa); 56 void (*hw_reginit)(struct bfa_s *bfa);
57 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
57 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); 58 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
58 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); 59 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
59 void (*hw_msix_install)(struct bfa_s *bfa); 60 void (*hw_msix_install)(struct bfa_s *bfa);
@@ -107,13 +108,13 @@ struct bfa_iocfc_s {
107 108
108#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc) 109#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc)
109#define bfa_msix_init(__bfa, __nvecs) \ 110#define bfa_msix_init(__bfa, __nvecs) \
110 (__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs) 111 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
111#define bfa_msix_install(__bfa) \ 112#define bfa_msix_install(__bfa) \
112 (__bfa)->iocfc.hwif.hw_msix_install(__bfa) 113 ((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
113#define bfa_msix_uninstall(__bfa) \ 114#define bfa_msix_uninstall(__bfa) \
114 (__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa) 115 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
115#define bfa_isr_mode_set(__bfa, __msix) \ 116#define bfa_isr_mode_set(__bfa, __msix) \
116 (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix) 117 ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
117#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ 118#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
118 (__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) 119 (__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)
119 120
@@ -143,6 +144,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
143void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); 144void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
144 145
145void bfa_hwcb_reginit(struct bfa_s *bfa); 146void bfa_hwcb_reginit(struct bfa_s *bfa);
147void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
146void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); 148void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
147void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); 149void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
148void bfa_hwcb_msix_install(struct bfa_s *bfa); 150void bfa_hwcb_msix_install(struct bfa_s *bfa);
@@ -151,6 +153,7 @@ void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
151void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, 153void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
152 u32 *nvecs, u32 *maxvec); 154 u32 *nvecs, u32 *maxvec);
153void bfa_hwct_reginit(struct bfa_s *bfa); 155void bfa_hwct_reginit(struct bfa_s *bfa);
156void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
154void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 157void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
155void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); 158void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
156void bfa_hwct_msix_install(struct bfa_s *bfa); 159void bfa_hwct_msix_install(struct bfa_s *bfa);
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index 7ae2552e1e14..5b107abe46e5 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -105,13 +105,13 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
105 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 105 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
106 list_del(&ioim->qe); 106 list_del(&ioim->qe);
107 list_add_tail(&ioim->qe, 107 list_add_tail(&ioim->qe,
108 &ioim->fcpim->ioim_comp_q); 108 &ioim->fcpim->ioim_comp_q);
109 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 109 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
110 __bfa_cb_ioim_pathtov, ioim); 110 __bfa_cb_ioim_pathtov, ioim);
111 } else { 111 } else {
112 list_del(&ioim->qe); 112 list_del(&ioim->qe);
113 list_add_tail(&ioim->qe, 113 list_add_tail(&ioim->qe,
114 &ioim->itnim->pending_q); 114 &ioim->itnim->pending_q);
115 } 115 }
116 break; 116 break;
117 } 117 }
@@ -149,7 +149,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
149 break; 149 break;
150 150
151 default: 151 default:
152 bfa_assert(0); 152 bfa_sm_fault(ioim->bfa, event);
153 } 153 }
154} 154}
155 155
@@ -194,7 +194,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
194 break; 194 break;
195 195
196 default: 196 default:
197 bfa_assert(0); 197 bfa_sm_fault(ioim->bfa, event);
198 } 198 }
199} 199}
200 200
@@ -259,7 +259,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
259 break; 259 break;
260 260
261 default: 261 default:
262 bfa_assert(0); 262 bfa_sm_fault(ioim->bfa, event);
263 } 263 }
264} 264}
265 265
@@ -317,7 +317,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
317 break; 317 break;
318 318
319 default: 319 default:
320 bfa_assert(0); 320 bfa_sm_fault(ioim->bfa, event);
321 } 321 }
322} 322}
323 323
@@ -377,7 +377,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
377 break; 377 break;
378 378
379 default: 379 default:
380 bfa_assert(0); 380 bfa_sm_fault(ioim->bfa, event);
381 } 381 }
382} 382}
383 383
@@ -419,7 +419,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
419 break; 419 break;
420 420
421 default: 421 default:
422 bfa_assert(0); 422 bfa_sm_fault(ioim->bfa, event);
423 } 423 }
424} 424}
425 425
@@ -467,7 +467,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
467 break; 467 break;
468 468
469 default: 469 default:
470 bfa_assert(0); 470 bfa_sm_fault(ioim->bfa, event);
471 } 471 }
472} 472}
473 473
@@ -516,7 +516,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
516 break; 516 break;
517 517
518 default: 518 default:
519 bfa_assert(0); 519 bfa_sm_fault(ioim->bfa, event);
520 } 520 }
521} 521}
522 522
@@ -544,7 +544,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
544 break; 544 break;
545 545
546 default: 546 default:
547 bfa_assert(0); 547 bfa_sm_fault(ioim->bfa, event);
548 } 548 }
549} 549}
550 550
@@ -577,7 +577,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
577 break; 577 break;
578 578
579 default: 579 default:
580 bfa_assert(0); 580 bfa_sm_fault(ioim->bfa, event);
581 } 581 }
582} 582}
583 583
@@ -605,7 +605,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
605 break; 605 break;
606 606
607 default: 607 default:
608 bfa_assert(0); 608 bfa_sm_fault(ioim->bfa, event);
609 } 609 }
610} 610}
611 611
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c
index 4d5c61a4f85c..a914ff255135 100644
--- a/drivers/scsi/bfa/bfa_itnim.c
+++ b/drivers/scsi/bfa/bfa_itnim.c
@@ -144,7 +144,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
144 break; 144 break;
145 145
146 default: 146 default:
147 bfa_assert(0); 147 bfa_sm_fault(itnim->bfa, event);
148 } 148 }
149} 149}
150 150
@@ -175,7 +175,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
175 break; 175 break;
176 176
177 default: 177 default:
178 bfa_assert(0); 178 bfa_sm_fault(itnim->bfa, event);
179 } 179 }
180} 180}
181 181
@@ -212,7 +212,7 @@ bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
212 break; 212 break;
213 213
214 default: 214 default:
215 bfa_assert(0); 215 bfa_sm_fault(itnim->bfa, event);
216 } 216 }
217} 217}
218 218
@@ -247,7 +247,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
247 break; 247 break;
248 248
249 default: 249 default:
250 bfa_assert(0); 250 bfa_sm_fault(itnim->bfa, event);
251 } 251 }
252} 252}
253 253
@@ -275,7 +275,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
275 break; 275 break;
276 276
277 default: 277 default:
278 bfa_assert(0); 278 bfa_sm_fault(itnim->bfa, event);
279 } 279 }
280} 280}
281 281
@@ -317,7 +317,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
317 break; 317 break;
318 318
319 default: 319 default:
320 bfa_assert(0); 320 bfa_sm_fault(itnim->bfa, event);
321 } 321 }
322} 322}
323 323
@@ -348,7 +348,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
348 break; 348 break;
349 349
350 default: 350 default:
351 bfa_assert(0); 351 bfa_sm_fault(itnim->bfa, event);
352 } 352 }
353} 353}
354 354
@@ -385,7 +385,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
385 break; 385 break;
386 386
387 default: 387 default:
388 bfa_assert(0); 388 bfa_sm_fault(itnim->bfa, event);
389 } 389 }
390} 390}
391 391
@@ -413,7 +413,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
413 break; 413 break;
414 414
415 default: 415 default:
416 bfa_assert(0); 416 bfa_sm_fault(itnim->bfa, event);
417 } 417 }
418} 418}
419 419
@@ -442,7 +442,7 @@ bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
442 break; 442 break;
443 443
444 default: 444 default:
445 bfa_assert(0); 445 bfa_sm_fault(itnim->bfa, event);
446 } 446 }
447} 447}
448 448
@@ -470,7 +470,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
470 break; 470 break;
471 471
472 default: 472 default:
473 bfa_assert(0); 473 bfa_sm_fault(itnim->bfa, event);
474 } 474 }
475} 475}
476 476
@@ -502,7 +502,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
502 break; 502 break;
503 503
504 default: 504 default:
505 bfa_assert(0); 505 bfa_sm_fault(itnim->bfa, event);
506 } 506 }
507} 507}
508 508
@@ -538,7 +538,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
538 break; 538 break;
539 539
540 default: 540 default:
541 bfa_assert(0); 541 bfa_sm_fault(itnim->bfa, event);
542 } 542 }
543} 543}
544 544
@@ -559,7 +559,7 @@ bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
559 break; 559 break;
560 560
561 default: 561 default:
562 bfa_assert(0); 562 bfa_sm_fault(itnim->bfa, event);
563 } 563 }
564} 564}
565 565
@@ -583,7 +583,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
583 break; 583 break;
584 584
585 default: 585 default:
586 bfa_assert(0); 586 bfa_sm_fault(itnim->bfa, event);
587 } 587 }
588} 588}
589 589
@@ -1029,7 +1029,7 @@ bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1029 bfa_stats(itnim, creates); 1029 bfa_stats(itnim, creates);
1030 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); 1030 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1031 1031
1032 return (itnim); 1032 return itnim;
1033} 1033}
1034 1034
1035void 1035void
@@ -1061,7 +1061,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim)
1061bfa_boolean_t 1061bfa_boolean_t
1062bfa_itnim_hold_io(struct bfa_itnim_s *itnim) 1062bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1063{ 1063{
1064 return ( 1064 return
1065 itnim->fcpim->path_tov && itnim->iotov_active && 1065 itnim->fcpim->path_tov && itnim->iotov_active &&
1066 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || 1066 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1067 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || 1067 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
@@ -1069,7 +1069,7 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1069 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || 1069 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1070 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || 1070 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1071 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)) 1071 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable))
1072); 1072 ;
1073} 1073}
1074 1074
1075void 1075void
diff --git a/drivers/scsi/bfa/bfa_log.c b/drivers/scsi/bfa/bfa_log.c
index c2735e55cf03..e7514016c9c6 100644
--- a/drivers/scsi/bfa/bfa_log.c
+++ b/drivers/scsi/bfa/bfa_log.c
@@ -231,9 +231,9 @@ bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id)
231 return BFA_LOG_INVALID; 231 return BFA_LOG_INVALID;
232 232
233 if (log_mod) 233 if (log_mod)
234 return (log_mod->log_level[mod_id]); 234 return log_mod->log_level[mod_id];
235 else 235 else
236 return (bfa_log_info[mod_id].level); 236 return bfa_log_info[mod_id].level;
237} 237}
238 238
239enum bfa_log_severity 239enum bfa_log_severity
diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c
index 9844b45412b6..ad06f6189092 100644
--- a/drivers/scsi/bfa/bfa_lps.c
+++ b/drivers/scsi/bfa/bfa_lps.c
@@ -18,6 +18,7 @@
18#include <bfa.h> 18#include <bfa.h>
19#include <bfi/bfi_lps.h> 19#include <bfi/bfi_lps.h>
20#include <cs/bfa_debug.h> 20#include <cs/bfa_debug.h>
21#include <defs/bfa_defs_pci.h>
21 22
22BFA_TRC_FILE(HAL, LPS); 23BFA_TRC_FILE(HAL, LPS);
23BFA_MODULE(lps); 24BFA_MODULE(lps);
@@ -25,6 +26,12 @@ BFA_MODULE(lps);
25#define BFA_LPS_MIN_LPORTS (1) 26#define BFA_LPS_MIN_LPORTS (1)
26#define BFA_LPS_MAX_LPORTS (256) 27#define BFA_LPS_MAX_LPORTS (256)
27 28
29/*
30 * Maximum Vports supported per physical port or vf.
31 */
32#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
33#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
34
28/** 35/**
29 * forward declarations 36 * forward declarations
30 */ 37 */
@@ -49,7 +56,7 @@ static void bfa_lps_send_login(struct bfa_lps_s *lps);
49static void bfa_lps_send_logout(struct bfa_lps_s *lps); 56static void bfa_lps_send_logout(struct bfa_lps_s *lps);
50static void bfa_lps_login_comp(struct bfa_lps_s *lps); 57static void bfa_lps_login_comp(struct bfa_lps_s *lps);
51static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 58static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
52 59static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
53 60
54/** 61/**
55 * lps_pvt BFA LPS private functions 62 * lps_pvt BFA LPS private functions
@@ -62,6 +69,7 @@ enum bfa_lps_event {
62 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ 69 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
63 BFA_LPS_SM_DELETE = 5, /* lps delete from user */ 70 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
64 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ 71 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
72 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
65}; 73};
66 74
67static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); 75static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -91,6 +99,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
91 bfa_sm_set_state(lps, bfa_lps_sm_login); 99 bfa_sm_set_state(lps, bfa_lps_sm_login);
92 bfa_lps_send_login(lps); 100 bfa_lps_send_login(lps);
93 } 101 }
102 if (lps->fdisc)
103 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
104 BFA_PL_EID_LOGIN, 0, "FDISC Request");
105 else
106 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
107 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
94 break; 108 break;
95 109
96 case BFA_LPS_SM_LOGOUT: 110 case BFA_LPS_SM_LOGOUT:
@@ -101,6 +115,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
101 bfa_lps_free(lps); 115 bfa_lps_free(lps);
102 break; 116 break;
103 117
118 case BFA_LPS_SM_RX_CVL:
104 case BFA_LPS_SM_OFFLINE: 119 case BFA_LPS_SM_OFFLINE:
105 break; 120 break;
106 121
@@ -112,7 +127,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
112 break; 127 break;
113 128
114 default: 129 default:
115 bfa_assert(0); 130 bfa_sm_fault(lps->bfa, event);
116 } 131 }
117} 132}
118 133
@@ -127,10 +142,25 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
127 142
128 switch (event) { 143 switch (event) {
129 case BFA_LPS_SM_FWRSP: 144 case BFA_LPS_SM_FWRSP:
130 if (lps->status == BFA_STATUS_OK) 145 if (lps->status == BFA_STATUS_OK) {
131 bfa_sm_set_state(lps, bfa_lps_sm_online); 146 bfa_sm_set_state(lps, bfa_lps_sm_online);
132 else 147 if (lps->fdisc)
148 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
149 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
150 else
151 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
152 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
153 } else {
133 bfa_sm_set_state(lps, bfa_lps_sm_init); 154 bfa_sm_set_state(lps, bfa_lps_sm_init);
155 if (lps->fdisc)
156 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
157 BFA_PL_EID_LOGIN, 0,
158 "FDISC Fail (RJT or timeout)");
159 else
160 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
161 BFA_PL_EID_LOGIN, 0,
162 "FLOGI Fail (RJT or timeout)");
163 }
134 bfa_lps_login_comp(lps); 164 bfa_lps_login_comp(lps);
135 break; 165 break;
136 166
@@ -139,7 +169,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
139 break; 169 break;
140 170
141 default: 171 default:
142 bfa_assert(0); 172 bfa_sm_fault(lps->bfa, event);
143 } 173 }
144} 174}
145 175
@@ -162,8 +192,16 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
162 bfa_reqq_wcancel(&lps->wqe); 192 bfa_reqq_wcancel(&lps->wqe);
163 break; 193 break;
164 194
195 case BFA_LPS_SM_RX_CVL:
196 /*
197 * Login was not even sent out; so when getting out
198 * of this state, it will appear like a login retry
199 * after Clear virtual link
200 */
201 break;
202
165 default: 203 default:
166 bfa_assert(0); 204 bfa_sm_fault(lps->bfa, event);
167 } 205 }
168} 206}
169 207
@@ -185,6 +223,17 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
185 bfa_sm_set_state(lps, bfa_lps_sm_logout); 223 bfa_sm_set_state(lps, bfa_lps_sm_logout);
186 bfa_lps_send_logout(lps); 224 bfa_lps_send_logout(lps);
187 } 225 }
226 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
227 BFA_PL_EID_LOGO, 0, "Logout");
228 break;
229
230 case BFA_LPS_SM_RX_CVL:
231 bfa_sm_set_state(lps, bfa_lps_sm_init);
232
233 /* Let the vport module know about this event */
234 bfa_lps_cvl_event(lps);
235 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
236 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
188 break; 237 break;
189 238
190 case BFA_LPS_SM_OFFLINE: 239 case BFA_LPS_SM_OFFLINE:
@@ -193,7 +242,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
193 break; 242 break;
194 243
195 default: 244 default:
196 bfa_assert(0); 245 bfa_sm_fault(lps->bfa, event);
197 } 246 }
198} 247}
199 248
@@ -217,7 +266,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
217 break; 266 break;
218 267
219 default: 268 default:
220 bfa_assert(0); 269 bfa_sm_fault(lps->bfa, event);
221 } 270 }
222} 271}
223 272
@@ -242,7 +291,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
242 break; 291 break;
243 292
244 default: 293 default:
245 bfa_assert(0); 294 bfa_sm_fault(lps->bfa, event);
246 } 295 }
247} 296}
248 297
@@ -396,6 +445,20 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
396} 445}
397 446
398/** 447/**
448 * Firmware received a Clear virtual link request (for FCoE)
449 */
450static void
451bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
452{
453 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
454 struct bfa_lps_s *lps;
455
456 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
457
458 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
459}
460
461/**
399 * Space is available in request queue, resume queueing request to firmware. 462 * Space is available in request queue, resume queueing request to firmware.
400 */ 463 */
401static void 464static void
@@ -531,7 +594,48 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps)
531 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg); 594 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
532} 595}
533 596
597/**
598 * Clear virtual link completion handler for non-fcs
599 */
600static void
601bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
602{
603 struct bfa_lps_s *lps = arg;
604
605 if (!complete)
606 return;
607
608 /* Clear virtual link to base port will result in link down */
609 if (lps->fdisc)
610 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
611}
612
613/**
614 * Received Clear virtual link event --direct call for fcs,
615 * queue for others
616 */
617static void
618bfa_lps_cvl_event(struct bfa_lps_s *lps)
619{
620 if (!lps->bfa->fcs) {
621 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
622 lps);
623 return;
624 }
625
626 /* Clear virtual link to base port will result in link down */
627 if (lps->fdisc)
628 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
629}
534 630
631u32
632bfa_lps_get_max_vport(struct bfa_s *bfa)
633{
634 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
635 return BFA_LPS_MAX_VPORTS_SUPP_CT;
636 else
637 return BFA_LPS_MAX_VPORTS_SUPP_CB;
638}
535 639
536/** 640/**
537 * lps_public BFA LPS public functions 641 * lps_public BFA LPS public functions
@@ -752,6 +856,14 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
752 return lps->lsrjt_expl; 856 return lps->lsrjt_expl;
753} 857}
754 858
859/**
860 * Return fpma/spma MAC for lport
861 */
862struct mac_s
863bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
864{
865 return lps->lp_mac;
866}
755 867
756/** 868/**
757 * LPS firmware message class handler. 869 * LPS firmware message class handler.
@@ -773,6 +885,10 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
773 bfa_lps_logout_rsp(bfa, msg.logout_rsp); 885 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
774 break; 886 break;
775 887
888 case BFI_LPS_H2I_CVL_EVENT:
889 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
890 break;
891
776 default: 892 default:
777 bfa_trc(bfa, m->mhdr.msg_id); 893 bfa_trc(bfa, m->mhdr.msg_id);
778 bfa_assert(0); 894 bfa_assert(0);
diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_module.c
index 32eda8e1ec65..a7fcc80c177e 100644
--- a/drivers/scsi/bfa/bfa_module.c
+++ b/drivers/scsi/bfa/bfa_module.c
@@ -24,7 +24,7 @@
24 */ 24 */
25struct bfa_module_s *hal_mods[] = { 25struct bfa_module_s *hal_mods[] = {
26 &hal_mod_sgpg, 26 &hal_mod_sgpg,
27 &hal_mod_pport, 27 &hal_mod_fcport,
28 &hal_mod_fcxp, 28 &hal_mod_fcxp,
29 &hal_mod_lps, 29 &hal_mod_lps,
30 &hal_mod_uf, 30 &hal_mod_uf,
@@ -45,7 +45,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
45 bfa_isr_unhandled, /* BFI_MC_DIAG */ 45 bfa_isr_unhandled, /* BFI_MC_DIAG */
46 bfa_isr_unhandled, /* BFI_MC_FLASH */ 46 bfa_isr_unhandled, /* BFI_MC_FLASH */
47 bfa_isr_unhandled, /* BFI_MC_CEE */ 47 bfa_isr_unhandled, /* BFI_MC_CEE */
48 bfa_pport_isr, /* BFI_MC_PORT */ 48 bfa_fcport_isr, /* BFI_MC_FCPORT */
49 bfa_isr_unhandled, /* BFI_MC_IOCFC */ 49 bfa_isr_unhandled, /* BFI_MC_IOCFC */
50 bfa_isr_unhandled, /* BFI_MC_LL */ 50 bfa_isr_unhandled, /* BFI_MC_LL */
51 bfa_uf_isr, /* BFI_MC_UF */ 51 bfa_uf_isr, /* BFI_MC_UF */
diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h
index 96f70534593c..f554c2fad6a9 100644
--- a/drivers/scsi/bfa/bfa_modules_priv.h
+++ b/drivers/scsi/bfa/bfa_modules_priv.h
@@ -29,7 +29,7 @@
29 29
30 30
31struct bfa_modules_s { 31struct bfa_modules_s {
32 struct bfa_pport_s pport; /* physical port module */ 32 struct bfa_fcport_s fcport; /* fc port module */
33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ 33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
34 struct bfa_lps_mod_s lps_mod; /* fcxp module */ 34 struct bfa_lps_mod_s lps_mod; /* fcxp module */
35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */ 35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
index 4b97e2759908..40e256ec67ff 100644
--- a/drivers/scsi/bfa/bfa_port_priv.h
+++ b/drivers/scsi/bfa/bfa_port_priv.h
@@ -23,9 +23,19 @@
23#include "bfa_intr_priv.h" 23#include "bfa_intr_priv.h"
24 24
25/** 25/**
26 * BFA physical port data structure 26 * Link notification data structure
27 */ 27 */
28struct bfa_pport_s { 28struct bfa_fcport_ln_s {
29 struct bfa_fcport_s *fcport;
30 bfa_sm_t sm;
31 struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
32 enum bfa_pport_linkstate ln_event; /* ln event for callback */
33};
34
35/**
36 * BFA FC port data structure
37 */
38struct bfa_fcport_s {
29 struct bfa_s *bfa; /* parent BFA instance */ 39 struct bfa_s *bfa; /* parent BFA instance */
30 bfa_sm_t sm; /* port state machine */ 40 bfa_sm_t sm; /* port state machine */
31 wwn_t nwwn; /* node wwn of physical port */ 41 wwn_t nwwn; /* node wwn of physical port */
@@ -36,6 +46,8 @@ struct bfa_pport_s {
36 enum bfa_pport_topology topology; /* current topology */ 46 enum bfa_pport_topology topology; /* current topology */
37 u8 myalpa; /* my ALPA in LOOP topology */ 47 u8 myalpa; /* my ALPA in LOOP topology */
38 u8 rsvd[3]; 48 u8 rsvd[3];
49 u32 mypid:24;
50 u32 rsvd_b:8;
39 struct bfa_pport_cfg_s cfg; /* current port configuration */ 51 struct bfa_pport_cfg_s cfg; /* current port configuration */
40 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 52 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
41 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ 53 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
@@ -49,42 +61,31 @@ struct bfa_pport_s {
49 void (*event_cbfn) (void *cbarg, 61 void (*event_cbfn) (void *cbarg,
50 bfa_pport_event_t event); 62 bfa_pport_event_t event);
51 union { 63 union {
52 union bfi_pport_i2h_msg_u i2hmsg; 64 union bfi_fcport_i2h_msg_u i2hmsg;
53 } event_arg; 65 } event_arg;
54 void *bfad; /* BFA driver handle */ 66 void *bfad; /* BFA driver handle */
67 struct bfa_fcport_ln_s ln; /* Link Notification */
55 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ 68 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
56 enum bfa_pport_linkstate hcb_event; 69 struct bfa_timer_s timer; /* timer */
57 /* link event for callback */
58 u32 msgtag; /* fimrware msg tag for reply */ 70 u32 msgtag; /* fimrware msg tag for reply */
59 u8 *stats_kva; 71 u8 *stats_kva;
60 u64 stats_pa; 72 u64 stats_pa;
61 union bfa_pport_stats_u *stats; /* pport stats */ 73 union bfa_fcport_stats_u *stats;
62 u32 mypid : 24; 74 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
63 u32 rsvd_b : 8; 75 bfa_status_t stats_status; /* stats/statsclr status */
64 struct bfa_timer_s timer; /* timer */ 76 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
65 union bfa_pport_stats_u *stats_ret; 77 bfa_boolean_t stats_qfull;
66 /* driver stats location */ 78 bfa_cb_pport_t stats_cbfn; /* driver callback function */
67 bfa_status_t stats_status; 79 void *stats_cbarg; /* *!< user callback arg */
68 /* stats/statsclr status */ 80 bfa_boolean_t diag_busy; /* diag busy status */
69 bfa_boolean_t stats_busy; 81 bfa_boolean_t beacon; /* port beacon status */
70 /* outstanding stats/statsclr */ 82 bfa_boolean_t link_e2e_beacon; /* link beacon status */
71 bfa_boolean_t stats_qfull;
72 bfa_boolean_t diag_busy;
73 /* diag busy status */
74 bfa_boolean_t beacon;
75 /* port beacon status */
76 bfa_boolean_t link_e2e_beacon;
77 /* link beacon status */
78 bfa_cb_pport_t stats_cbfn;
79 /* driver callback function */
80 void *stats_cbarg;
81 /* *!< user callback arg */
82}; 83};
83 84
84#define BFA_PORT_MOD(__bfa) (&(__bfa)->modules.pport) 85#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
85 86
86/* 87/*
87 * public functions 88 * public functions
88 */ 89 */
89void bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 90void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
90#endif /* __BFA_PORT_PRIV_H__ */ 91#endif /* __BFA_PORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_priv.h
index 0747a6b26f7b..be80fc7e1b0e 100644
--- a/drivers/scsi/bfa/bfa_priv.h
+++ b/drivers/scsi/bfa/bfa_priv.h
@@ -101,7 +101,7 @@ extern bfa_boolean_t bfa_auto_recover;
101extern struct bfa_module_s hal_mod_flash; 101extern struct bfa_module_s hal_mod_flash;
102extern struct bfa_module_s hal_mod_fcdiag; 102extern struct bfa_module_s hal_mod_fcdiag;
103extern struct bfa_module_s hal_mod_sgpg; 103extern struct bfa_module_s hal_mod_sgpg;
104extern struct bfa_module_s hal_mod_pport; 104extern struct bfa_module_s hal_mod_fcport;
105extern struct bfa_module_s hal_mod_fcxp; 105extern struct bfa_module_s hal_mod_fcxp;
106extern struct bfa_module_s hal_mod_lps; 106extern struct bfa_module_s hal_mod_lps;
107extern struct bfa_module_s hal_mod_uf; 107extern struct bfa_module_s hal_mod_uf;
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
index 16da77a8db28..7c509fa244e4 100644
--- a/drivers/scsi/bfa/bfa_rport.c
+++ b/drivers/scsi/bfa/bfa_rport.c
@@ -114,7 +114,7 @@ bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
114 114
115 default: 115 default:
116 bfa_stats(rp, sm_un_unexp); 116 bfa_stats(rp, sm_un_unexp);
117 bfa_assert(0); 117 bfa_sm_fault(rp->bfa, event);
118 } 118 }
119} 119}
120 120
@@ -146,7 +146,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
146 146
147 default: 147 default:
148 bfa_stats(rp, sm_cr_unexp); 148 bfa_stats(rp, sm_cr_unexp);
149 bfa_assert(0); 149 bfa_sm_fault(rp->bfa, event);
150 } 150 }
151} 151}
152 152
@@ -183,7 +183,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
183 183
184 default: 184 default:
185 bfa_stats(rp, sm_fwc_unexp); 185 bfa_stats(rp, sm_fwc_unexp);
186 bfa_assert(0); 186 bfa_sm_fault(rp->bfa, event);
187 } 187 }
188} 188}
189 189
@@ -224,7 +224,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
224 224
225 default: 225 default:
226 bfa_stats(rp, sm_fwc_unexp); 226 bfa_stats(rp, sm_fwc_unexp);
227 bfa_assert(0); 227 bfa_sm_fault(rp->bfa, event);
228 } 228 }
229} 229}
230 230
@@ -296,7 +296,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
296 296
297 default: 297 default:
298 bfa_stats(rp, sm_on_unexp); 298 bfa_stats(rp, sm_on_unexp);
299 bfa_assert(0); 299 bfa_sm_fault(rp->bfa, event);
300 } 300 }
301} 301}
302 302
@@ -329,7 +329,7 @@ bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
329 329
330 default: 330 default:
331 bfa_stats(rp, sm_fwd_unexp); 331 bfa_stats(rp, sm_fwd_unexp);
332 bfa_assert(0); 332 bfa_sm_fault(rp->bfa, event);
333 } 333 }
334} 334}
335 335
@@ -359,7 +359,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
359 359
360 default: 360 default:
361 bfa_stats(rp, sm_fwd_unexp); 361 bfa_stats(rp, sm_fwd_unexp);
362 bfa_assert(0); 362 bfa_sm_fault(rp->bfa, event);
363 } 363 }
364} 364}
365 365
@@ -394,7 +394,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
394 394
395 default: 395 default:
396 bfa_stats(rp, sm_off_unexp); 396 bfa_stats(rp, sm_off_unexp);
397 bfa_assert(0); 397 bfa_sm_fault(rp->bfa, event);
398 } 398 }
399} 399}
400 400
@@ -421,7 +421,7 @@ bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
421 break; 421 break;
422 422
423 default: 423 default:
424 bfa_assert(0); 424 bfa_sm_fault(rp->bfa, event);
425 } 425 }
426} 426}
427 427
@@ -446,7 +446,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
446 break; 446 break;
447 447
448 default: 448 default:
449 bfa_assert(0); 449 bfa_sm_fault(rp->bfa, event);
450 } 450 }
451} 451}
452 452
@@ -477,7 +477,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
477 477
478 default: 478 default:
479 bfa_stats(rp, sm_delp_unexp); 479 bfa_stats(rp, sm_delp_unexp);
480 bfa_assert(0); 480 bfa_sm_fault(rp->bfa, event);
481 } 481 }
482} 482}
483 483
@@ -512,7 +512,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
512 512
513 default: 513 default:
514 bfa_stats(rp, sm_offp_unexp); 514 bfa_stats(rp, sm_offp_unexp);
515 bfa_assert(0); 515 bfa_sm_fault(rp->bfa, event);
516 } 516 }
517} 517}
518 518
@@ -550,7 +550,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
550 550
551 default: 551 default:
552 bfa_stats(rp, sm_iocd_unexp); 552 bfa_stats(rp, sm_iocd_unexp);
553 bfa_assert(0); 553 bfa_sm_fault(rp->bfa, event);
554 } 554 }
555} 555}
556 556
@@ -677,7 +677,7 @@ bfa_rport_alloc(struct bfa_rport_mod_s *mod)
677 if (rport) 677 if (rport)
678 list_add_tail(&rport->qe, &mod->rp_active_q); 678 list_add_tail(&rport->qe, &mod->rp_active_q);
679 679
680 return (rport); 680 return rport;
681} 681}
682 682
683static void 683static void
@@ -834,7 +834,7 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
834 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); 834 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
835 835
836 if (rp == NULL) 836 if (rp == NULL)
837 return (NULL); 837 return NULL;
838 838
839 rp->bfa = bfa; 839 rp->bfa = bfa;
840 rp->rport_drv = rport_drv; 840 rp->rport_drv = rport_drv;
@@ -843,7 +843,7 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
843 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); 843 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
844 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); 844 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
845 845
846 return (rp); 846 return rp;
847} 847}
848 848
849void 849void
diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h
index b3562dce7e9f..a7a82610db85 100644
--- a/drivers/scsi/bfa/bfa_trcmod_priv.h
+++ b/drivers/scsi/bfa/bfa_trcmod_priv.h
@@ -29,38 +29,36 @@
29 * !!! needed between trace utility and driver version 29 * !!! needed between trace utility and driver version
30 */ 30 */
31enum { 31enum {
32 BFA_TRC_HAL_IOC = 1, 32 BFA_TRC_HAL_INTR = 1,
33 BFA_TRC_HAL_INTR = 2, 33 BFA_TRC_HAL_FCXP = 2,
34 BFA_TRC_HAL_FCXP = 3, 34 BFA_TRC_HAL_UF = 3,
35 BFA_TRC_HAL_UF = 4, 35 BFA_TRC_HAL_RPORT = 4,
36 BFA_TRC_HAL_DIAG = 5, 36 BFA_TRC_HAL_FCPIM = 5,
37 BFA_TRC_HAL_RPORT = 6, 37 BFA_TRC_HAL_IOIM = 6,
38 BFA_TRC_HAL_FCPIM = 7, 38 BFA_TRC_HAL_TSKIM = 7,
39 BFA_TRC_HAL_IOIM = 8, 39 BFA_TRC_HAL_ITNIM = 8,
40 BFA_TRC_HAL_TSKIM = 9, 40 BFA_TRC_HAL_FCPORT = 9,
41 BFA_TRC_HAL_ITNIM = 10, 41 BFA_TRC_HAL_SGPG = 10,
42 BFA_TRC_HAL_PPORT = 11, 42 BFA_TRC_HAL_FLASH = 11,
43 BFA_TRC_HAL_SGPG = 12, 43 BFA_TRC_HAL_DEBUG = 12,
44 BFA_TRC_HAL_FLASH = 13, 44 BFA_TRC_HAL_WWN = 13,
45 BFA_TRC_HAL_DEBUG = 14, 45 BFA_TRC_HAL_FLASH_RAW = 14,
46 BFA_TRC_HAL_WWN = 15, 46 BFA_TRC_HAL_SBOOT = 15,
47 BFA_TRC_HAL_FLASH_RAW = 16, 47 BFA_TRC_HAL_SBOOT_IO = 16,
48 BFA_TRC_HAL_SBOOT = 17, 48 BFA_TRC_HAL_SBOOT_INTR = 17,
49 BFA_TRC_HAL_SBOOT_IO = 18, 49 BFA_TRC_HAL_SBTEST = 18,
50 BFA_TRC_HAL_SBOOT_INTR = 19, 50 BFA_TRC_HAL_IPFC = 19,
51 BFA_TRC_HAL_SBTEST = 20, 51 BFA_TRC_HAL_IOCFC = 20,
52 BFA_TRC_HAL_IPFC = 21, 52 BFA_TRC_HAL_FCPTM = 21,
53 BFA_TRC_HAL_IOCFC = 22, 53 BFA_TRC_HAL_IOTM = 22,
54 BFA_TRC_HAL_FCPTM = 23, 54 BFA_TRC_HAL_TSKTM = 23,
55 BFA_TRC_HAL_IOTM = 24, 55 BFA_TRC_HAL_TIN = 24,
56 BFA_TRC_HAL_TSKTM = 25, 56 BFA_TRC_HAL_LPS = 25,
57 BFA_TRC_HAL_TIN = 26, 57 BFA_TRC_HAL_FCDIAG = 26,
58 BFA_TRC_HAL_LPS = 27, 58 BFA_TRC_HAL_PBIND = 27,
59 BFA_TRC_HAL_FCDIAG = 28, 59 BFA_TRC_HAL_IOCFC_CT = 28,
60 BFA_TRC_HAL_PBIND = 29, 60 BFA_TRC_HAL_IOCFC_CB = 29,
61 BFA_TRC_HAL_IOCFC_CT = 30, 61 BFA_TRC_HAL_IOCFC_Q = 30,
62 BFA_TRC_HAL_IOCFC_CB = 31,
63 BFA_TRC_HAL_IOCFC_Q = 32,
64}; 62};
65 63
66#endif /* __BFA_TRCMOD_PRIV_H__ */ 64#endif /* __BFA_TRCMOD_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c
index 010d40d1e5d3..ad9aaaedd3f1 100644
--- a/drivers/scsi/bfa/bfa_tskim.c
+++ b/drivers/scsi/bfa/bfa_tskim.c
@@ -23,13 +23,14 @@ BFA_TRC_FILE(HAL, TSKIM);
23/** 23/**
24 * task management completion handling 24 * task management completion handling
25 */ 25 */
26#define bfa_tskim_qcomp(__tskim, __cbfn) do { \ 26#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
27 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim)); \ 27 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, \
28 __cbfn, (__tskim)); \
28 bfa_tskim_notify_comp(__tskim); \ 29 bfa_tskim_notify_comp(__tskim); \
29} while (0) 30} while (0)
30 31
31#define bfa_tskim_notify_comp(__tskim) do { \ 32#define bfa_tskim_notify_comp(__tskim) do { \
32 if ((__tskim)->notify) \ 33 if ((__tskim)->notify) \
33 bfa_itnim_tskdone((__tskim)->itnim); \ 34 bfa_itnim_tskdone((__tskim)->itnim); \
34} while (0) 35} while (0)
35 36
@@ -109,7 +110,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
109 break; 110 break;
110 111
111 default: 112 default:
112 bfa_assert(0); 113 bfa_sm_fault(tskim->bfa, event);
113 } 114 }
114} 115}
115 116
@@ -145,7 +146,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
145 break; 146 break;
146 147
147 default: 148 default:
148 bfa_assert(0); 149 bfa_sm_fault(tskim->bfa, event);
149 } 150 }
150} 151}
151 152
@@ -177,7 +178,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
177 break; 178 break;
178 179
179 default: 180 default:
180 bfa_assert(0); 181 bfa_sm_fault(tskim->bfa, event);
181 } 182 }
182} 183}
183 184
@@ -206,7 +207,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
206 break; 207 break;
207 208
208 default: 209 default:
209 bfa_assert(0); 210 bfa_sm_fault(tskim->bfa, event);
210 } 211 }
211} 212}
212 213
@@ -241,7 +242,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
241 break; 242 break;
242 243
243 default: 244 default:
244 bfa_assert(0); 245 bfa_sm_fault(tskim->bfa, event);
245 } 246 }
246} 247}
247 248
@@ -276,7 +277,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
276 break; 277 break;
277 278
278 default: 279 default:
279 bfa_assert(0); 280 bfa_sm_fault(tskim->bfa, event);
280 } 281 }
281} 282}
282 283
@@ -302,7 +303,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
302 break; 303 break;
303 304
304 default: 305 default:
305 bfa_assert(0); 306 bfa_sm_fault(tskim->bfa, event);
306 } 307 }
307} 308}
308 309
diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c
index ff5f9deb1b22..4b3c2417d180 100644
--- a/drivers/scsi/bfa/bfa_uf.c
+++ b/drivers/scsi/bfa/bfa_uf.c
@@ -185,7 +185,7 @@ bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
185 struct bfa_uf_s *uf; 185 struct bfa_uf_s *uf;
186 186
187 bfa_q_deq(&uf_mod->uf_free_q, &uf); 187 bfa_q_deq(&uf_mod->uf_free_q, &uf);
188 return (uf); 188 return uf;
189} 189}
190 190
191static void 191static void
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 6f2be5abf561..13f5feb308c2 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -19,7 +19,9 @@
19 * bfad.c Linux driver PCI interface module. 19 * bfad.c Linux driver PCI interface module.
20 */ 20 */
21 21
22#include <linux/slab.h>
22#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/kthread.h>
23#include "bfad_drv.h" 25#include "bfad_drv.h"
24#include "bfad_im.h" 26#include "bfad_im.h"
25#include "bfad_tm.h" 27#include "bfad_tm.h"
@@ -53,6 +55,7 @@ static int log_level = BFA_LOG_WARNING;
53static int ioc_auto_recover = BFA_TRUE; 55static int ioc_auto_recover = BFA_TRUE;
54static int ipfc_enable = BFA_FALSE; 56static int ipfc_enable = BFA_FALSE;
55static int ipfc_mtu = -1; 57static int ipfc_mtu = -1;
58static int fdmi_enable = BFA_TRUE;
56int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 59int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
57int bfa_linkup_delay = -1; 60int bfa_linkup_delay = -1;
58 61
@@ -74,6 +77,7 @@ module_param(log_level, int, S_IRUGO | S_IWUSR);
74module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 77module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
75module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); 78module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
76module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR); 79module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR);
80module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
77module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 81module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
78 82
79/* 83/*
@@ -95,6 +99,8 @@ bfad_fc4_probe(struct bfad_s *bfad)
95 99
96 if (ipfc_enable) 100 if (ipfc_enable)
97 bfad_ipfc_probe(bfad); 101 bfad_ipfc_probe(bfad);
102
103 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
98ext: 104ext:
99 return rc; 105 return rc;
100} 106}
@@ -106,6 +112,7 @@ bfad_fc4_probe_undo(struct bfad_s *bfad)
106 bfad_tm_probe_undo(bfad); 112 bfad_tm_probe_undo(bfad);
107 if (ipfc_enable) 113 if (ipfc_enable)
108 bfad_ipfc_probe_undo(bfad); 114 bfad_ipfc_probe_undo(bfad);
115 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
109} 116}
110 117
111static void 118static void
@@ -173,9 +180,19 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
173{ 180{
174 struct bfad_s *bfad = drv; 181 struct bfad_s *bfad = drv;
175 182
176 if (init_status == BFA_STATUS_OK) 183 if (init_status == BFA_STATUS_OK) {
177 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 184 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
178 185
186 /* If BFAD_HAL_INIT_FAIL flag is set:
187 * Wake up the kernel thread to start
188 * the bfad operations after HAL init done
189 */
190 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
191 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
192 wake_up_process(bfad->bfad_tsk);
193 }
194 }
195
179 complete(&bfad->comp); 196 complete(&bfad->comp);
180} 197}
181 198
@@ -188,8 +205,8 @@ static struct bfad_port_s *
188bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv, 205bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv,
189 struct bfad_vport_s *vp_drv) 206 struct bfad_vport_s *vp_drv)
190{ 207{
191 return ((vp_drv) ? (&(vp_drv)->drv_port) 208 return (vp_drv) ? (&(vp_drv)->drv_port)
192 : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport))); 209 : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport));
193} 210}
194 211
195struct bfad_port_s * 212struct bfad_port_s *
@@ -648,7 +665,7 @@ bfad_fcs_port_cfg(struct bfad_s *bfad)
648 665
649 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 666 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
650 memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 667 memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
651 bfa_pport_get_attr(&bfad->bfa, &attr); 668 bfa_fcport_get_attr(&bfad->bfa, &attr);
652 port_cfg.nwwn = attr.nwwn; 669 port_cfg.nwwn = attr.nwwn;
653 port_cfg.pwwn = attr.pwwn; 670 port_cfg.pwwn = attr.pwwn;
654 671
@@ -661,7 +678,6 @@ bfad_drv_init(struct bfad_s *bfad)
661 bfa_status_t rc; 678 bfa_status_t rc;
662 unsigned long flags; 679 unsigned long flags;
663 struct bfa_fcs_driver_info_s driver_info; 680 struct bfa_fcs_driver_info_s driver_info;
664 int i;
665 681
666 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 682 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
667 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 683 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
@@ -681,12 +697,7 @@ bfad_drv_init(struct bfad_s *bfad)
681 bfa_init_log(&bfad->bfa, bfad->logmod); 697 bfa_init_log(&bfad->bfa, bfad->logmod);
682 bfa_init_trc(&bfad->bfa, bfad->trcmod); 698 bfa_init_trc(&bfad->bfa, bfad->trcmod);
683 bfa_init_aen(&bfad->bfa, bfad->aen); 699 bfa_init_aen(&bfad->bfa, bfad->aen);
684 INIT_LIST_HEAD(&bfad->file_q); 700 memset(bfad->file_map, 0, sizeof(bfad->file_map));
685 INIT_LIST_HEAD(&bfad->file_free_q);
686 for (i = 0; i < BFAD_AEN_MAX_APPS; i++) {
687 bfa_q_qe_init(&bfad->file_buf[i].qe);
688 list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q);
689 }
690 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 701 bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
691 bfa_plog_init(&bfad->plog_buf); 702 bfa_plog_init(&bfad->plog_buf);
692 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 703 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
@@ -716,7 +727,7 @@ bfad_drv_init(struct bfad_s *bfad)
716 if ((bfad->bfad_flags & BFAD_MSIX_ON) 727 if ((bfad->bfad_flags & BFAD_MSIX_ON)
717 && bfad_install_msix_handler(bfad)) { 728 && bfad_install_msix_handler(bfad)) {
718 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", 729 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
719 __FUNCTION__, bfad->inst_no); 730 __func__, bfad->inst_no);
720 } 731 }
721 732
722 bfad_init_timer(bfad); 733 bfad_init_timer(bfad);
@@ -746,8 +757,16 @@ bfad_drv_init(struct bfad_s *bfad)
746 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); 757 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod);
747 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 758 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
748 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); 759 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen);
749 bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 760 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
761
762 /* Do FCS init only when HAL init is done */
763 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
764 bfa_fcs_init(&bfad->bfa_fcs);
765 bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
766 }
767
750 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 768 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
769 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
751 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 770 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
752 771
753 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 772 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
@@ -763,12 +782,21 @@ out_hal_mem_alloc_failure:
763void 782void
764bfad_drv_uninit(struct bfad_s *bfad) 783bfad_drv_uninit(struct bfad_s *bfad)
765{ 784{
785 unsigned long flags;
786
787 spin_lock_irqsave(&bfad->bfad_lock, flags);
788 init_completion(&bfad->comp);
789 bfa_stop(&bfad->bfa);
790 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
791 wait_for_completion(&bfad->comp);
792
766 del_timer_sync(&bfad->hal_tmo); 793 del_timer_sync(&bfad->hal_tmo);
767 bfa_isr_disable(&bfad->bfa); 794 bfa_isr_disable(&bfad->bfa);
768 bfa_detach(&bfad->bfa); 795 bfa_detach(&bfad->bfa);
769 bfad_remove_intr(bfad); 796 bfad_remove_intr(bfad);
770 bfa_assert(list_empty(&bfad->file_q));
771 bfad_hal_mem_release(bfad); 797 bfad_hal_mem_release(bfad);
798
799 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
772} 800}
773 801
774void 802void
@@ -859,6 +887,86 @@ bfad_drv_log_level_set(struct bfad_s *bfad)
859 bfa_log_set_level_all(&bfad->log_data, log_level); 887 bfa_log_set_level_all(&bfad->log_data, log_level);
860} 888}
861 889
890bfa_status_t
891bfad_start_ops(struct bfad_s *bfad)
892{
893 int retval;
894
895 /* PPORT FCS config */
896 bfad_fcs_port_cfg(bfad);
897
898 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
899 if (retval != BFA_STATUS_OK)
900 goto out_cfg_pport_failure;
901
902 /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */
903 retval = bfad_fc4_probe(bfad);
904 if (retval != BFA_STATUS_OK) {
905 printk(KERN_WARNING "bfad_fc4_probe failed\n");
906 goto out_fc4_probe_failure;
907 }
908
909 bfad_drv_start(bfad);
910
911 /*
912 * If bfa_linkup_delay is set to -1 default; try to retrive the
913 * value using the bfad_os_get_linkup_delay(); else use the
914 * passed in module param value as the bfa_linkup_delay.
915 */
916 if (bfa_linkup_delay < 0) {
917
918 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
919 bfad_os_rport_online_wait(bfad);
920 bfa_linkup_delay = -1;
921
922 } else {
923 bfad_os_rport_online_wait(bfad);
924 }
925
926 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
927
928 return BFA_STATUS_OK;
929
930out_fc4_probe_failure:
931 bfad_fc4_probe_undo(bfad);
932 bfad_uncfg_pport(bfad);
933out_cfg_pport_failure:
934 return BFA_STATUS_FAILED;
935}
936
937int
938bfad_worker (void *ptr)
939{
940 struct bfad_s *bfad;
941 unsigned long flags;
942
943 bfad = (struct bfad_s *)ptr;
944
945 while (!kthread_should_stop()) {
946
947 /* Check if the FCS init is done from bfad_drv_init;
948 * if not done do FCS init and set the flag.
949 */
950 if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) {
951 spin_lock_irqsave(&bfad->bfad_lock, flags);
952 bfa_fcs_init(&bfad->bfa_fcs);
953 bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
954 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
955 }
956
957 /* Start the bfad operations after HAL init done */
958 bfad_start_ops(bfad);
959
960 spin_lock_irqsave(&bfad->bfad_lock, flags);
961 bfad->bfad_tsk = NULL;
962 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
963
964 break;
965 }
966
967 return 0;
968}
969
862 /* 970 /*
863 * PCI_entry PCI driver entries * { 971 * PCI_entry PCI driver entries * {
864 */ 972 */
@@ -871,7 +979,6 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
871{ 979{
872 struct bfad_s *bfad; 980 struct bfad_s *bfad;
873 int error = -ENODEV, retval; 981 int error = -ENODEV, retval;
874 char buf[16];
875 982
876 /* 983 /*
877 * For single port cards - only claim function 0 984 * For single port cards - only claim function 0
@@ -902,8 +1009,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
902 bfa_trc(bfad, bfad_inst); 1009 bfa_trc(bfad, bfad_inst);
903 1010
904 bfad->logmod = &bfad->log_data; 1011 bfad->logmod = &bfad->log_data;
905 sprintf(buf, "%d", bfad_inst); 1012 bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf);
906 bfa_log_init(bfad->logmod, buf, bfa_os_printf);
907 1013
908 bfad_drv_log_level_set(bfad); 1014 bfad_drv_log_level_set(bfad);
909 1015
@@ -933,57 +1039,39 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
933 bfad->ref_count = 0; 1039 bfad->ref_count = 0;
934 bfad->pport.bfad = bfad; 1040 bfad->pport.bfad = bfad;
935 1041
1042 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s",
1043 "bfad_worker");
1044 if (IS_ERR(bfad->bfad_tsk)) {
1045 printk(KERN_INFO "bfad[%d]: Kernel thread"
1046 " creation failed!\n",
1047 bfad->inst_no);
1048 goto out_kthread_create_failure;
1049 }
1050
936 retval = bfad_drv_init(bfad); 1051 retval = bfad_drv_init(bfad);
937 if (retval != BFA_STATUS_OK) 1052 if (retval != BFA_STATUS_OK)
938 goto out_drv_init_failure; 1053 goto out_drv_init_failure;
939 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1054 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1055 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
940 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no); 1056 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no);
941 goto ok; 1057 goto ok;
942 } 1058 }
943 1059
944 /* 1060 retval = bfad_start_ops(bfad);
945 * PPORT FCS config
946 */
947 bfad_fcs_port_cfg(bfad);
948
949 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
950 if (retval != BFA_STATUS_OK) 1061 if (retval != BFA_STATUS_OK)
951 goto out_cfg_pport_failure; 1062 goto out_start_ops_failure;
952
953 /*
954 * BFAD level FC4 (IM/TM/IPFC) specific resource allocation
955 */
956 retval = bfad_fc4_probe(bfad);
957 if (retval != BFA_STATUS_OK) {
958 printk(KERN_WARNING "bfad_fc4_probe failed\n");
959 goto out_fc4_probe_failure;
960 }
961 1063
962 bfad_drv_start(bfad); 1064 kthread_stop(bfad->bfad_tsk);
963 1065 bfad->bfad_tsk = NULL;
964 /*
965 * If bfa_linkup_delay is set to -1 default; try to retrive the
966 * value using the bfad_os_get_linkup_delay(); else use the
967 * passed in module param value as the bfa_linkup_delay.
968 */
969 if (bfa_linkup_delay < 0) {
970 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
971 bfad_os_rport_online_wait(bfad);
972 bfa_linkup_delay = -1;
973 } else {
974 bfad_os_rport_online_wait(bfad);
975 }
976 1066
977 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
978ok: 1067ok:
979 return 0; 1068 return 0;
980 1069
981out_fc4_probe_failure: 1070out_start_ops_failure:
982 bfad_fc4_probe_undo(bfad);
983 bfad_uncfg_pport(bfad);
984out_cfg_pport_failure:
985 bfad_drv_uninit(bfad); 1071 bfad_drv_uninit(bfad);
986out_drv_init_failure: 1072out_drv_init_failure:
1073 kthread_stop(bfad->bfad_tsk);
1074out_kthread_create_failure:
987 mutex_lock(&bfad_mutex); 1075 mutex_lock(&bfad_mutex);
988 bfad_inst--; 1076 bfad_inst--;
989 list_del(&bfad->list_entry); 1077 list_del(&bfad->list_entry);
@@ -1008,6 +1096,11 @@ bfad_pci_remove(struct pci_dev *pdev)
1008 1096
1009 bfa_trc(bfad, bfad->inst_no); 1097 bfa_trc(bfad, bfad->inst_no);
1010 1098
1099 spin_lock_irqsave(&bfad->bfad_lock, flags);
1100 if (bfad->bfad_tsk != NULL)
1101 kthread_stop(bfad->bfad_tsk);
1102 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1103
1011 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE) 1104 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE)
1012 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1105 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1013 1106
@@ -1024,13 +1117,25 @@ bfad_pci_remove(struct pci_dev *pdev)
1024 goto remove_sysfs; 1117 goto remove_sysfs;
1025 } 1118 }
1026 1119
1027 if (bfad->bfad_flags & BFAD_HAL_START_DONE) 1120 if (bfad->bfad_flags & BFAD_HAL_START_DONE) {
1028 bfad_drv_stop(bfad); 1121 bfad_drv_stop(bfad);
1122 } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) {
1123 /* Invoking bfa_stop() before bfa_detach
1124 * when HAL and DRV init are success
1125 * but HAL start did not occur.
1126 */
1127 spin_lock_irqsave(&bfad->bfad_lock, flags);
1128 init_completion(&bfad->comp);
1129 bfa_stop(&bfad->bfa);
1130 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1131 wait_for_completion(&bfad->comp);
1132 }
1029 1133
1030 bfad_remove_intr(bfad); 1134 bfad_remove_intr(bfad);
1031
1032 del_timer_sync(&bfad->hal_tmo); 1135 del_timer_sync(&bfad->hal_tmo);
1033 bfad_fc4_probe_undo(bfad); 1136
1137 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE)
1138 bfad_fc4_probe_undo(bfad);
1034 1139
1035 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 1140 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
1036 bfad_uncfg_pport(bfad); 1141 bfad_uncfg_pport(bfad);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9129ae3040ff..6a2efdd5ef24 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -19,6 +19,7 @@
19 * bfa_attr.c Linux driver configuration interface module. 19 * bfa_attr.c Linux driver configuration interface module.
20 */ 20 */
21 21
22#include <linux/slab.h>
22#include "bfad_drv.h" 23#include "bfad_drv.h"
23#include "bfad_im.h" 24#include "bfad_im.h"
24#include "bfad_trcmod.h" 25#include "bfad_trcmod.h"
@@ -141,7 +142,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
141 struct bfad_s *bfad = im_port->bfad; 142 struct bfad_s *bfad = im_port->bfad;
142 struct bfa_pport_attr_s attr; 143 struct bfa_pport_attr_s attr;
143 144
144 bfa_pport_get_attr(&bfad->bfa, &attr); 145 bfa_fcport_get_attr(&bfad->bfa, &attr);
145 146
146 switch (attr.port_type) { 147 switch (attr.port_type) {
147 case BFA_PPORT_TYPE_NPORT: 148 case BFA_PPORT_TYPE_NPORT:
@@ -173,7 +174,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
173 struct bfad_s *bfad = im_port->bfad; 174 struct bfad_s *bfad = im_port->bfad;
174 struct bfa_pport_attr_s attr; 175 struct bfa_pport_attr_s attr;
175 176
176 bfa_pport_get_attr(&bfad->bfa, &attr); 177 bfa_fcport_get_attr(&bfad->bfa, &attr);
177 178
178 switch (attr.port_state) { 179 switch (attr.port_state) {
179 case BFA_PPORT_ST_LINKDOWN: 180 case BFA_PPORT_ST_LINKDOWN:
@@ -229,8 +230,10 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
229 (struct bfad_im_port_s *) shost->hostdata[0]; 230 (struct bfad_im_port_s *) shost->hostdata[0];
230 struct bfad_s *bfad = im_port->bfad; 231 struct bfad_s *bfad = im_port->bfad;
231 struct bfa_pport_attr_s attr; 232 struct bfa_pport_attr_s attr;
233 unsigned long flags;
232 234
233 bfa_pport_get_attr(&bfad->bfa, &attr); 235 spin_lock_irqsave(shost->host_lock, flags);
236 bfa_fcport_get_attr(&bfad->bfa, &attr);
234 switch (attr.speed) { 237 switch (attr.speed) {
235 case BFA_PPORT_SPEED_8GBPS: 238 case BFA_PPORT_SPEED_8GBPS:
236 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 239 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
@@ -248,6 +251,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
248 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 251 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
249 break; 252 break;
250 } 253 }
254 spin_unlock_irqrestore(shost->host_lock, flags);
251} 255}
252 256
253/** 257/**
@@ -285,7 +289,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
285 init_completion(&fcomp.comp); 289 init_completion(&fcomp.comp);
286 spin_lock_irqsave(&bfad->bfad_lock, flags); 290 spin_lock_irqsave(&bfad->bfad_lock, flags);
287 memset(hstats, 0, sizeof(struct fc_host_statistics)); 291 memset(hstats, 0, sizeof(struct fc_host_statistics));
288 rc = bfa_pport_get_stats(&bfad->bfa, 292 rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
289 (union bfa_pport_stats_u *) hstats, 293 (union bfa_pport_stats_u *) hstats,
290 bfad_hcb_comp, &fcomp); 294 bfad_hcb_comp, &fcomp);
291 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 295 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -312,7 +316,8 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
312 316
313 init_completion(&fcomp.comp); 317 init_completion(&fcomp.comp);
314 spin_lock_irqsave(&bfad->bfad_lock, flags); 318 spin_lock_irqsave(&bfad->bfad_lock, flags);
315 rc = bfa_pport_clear_stats(&bfad->bfa, bfad_hcb_comp, &fcomp); 319 rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
320 &fcomp);
316 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 321 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
317 322
318 if (rc != BFA_STATUS_OK) 323 if (rc != BFA_STATUS_OK)
@@ -421,12 +426,10 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
421 struct bfad_im_port_s *im_port = 426 struct bfad_im_port_s *im_port =
422 (struct bfad_im_port_s *) shost->hostdata[0]; 427 (struct bfad_im_port_s *) shost->hostdata[0];
423 struct bfad_s *bfad = im_port->bfad; 428 struct bfad_s *bfad = im_port->bfad;
424 struct bfa_ioc_attr_s ioc_attr; 429 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
425 430
426 memset(&ioc_attr, 0, sizeof(ioc_attr)); 431 bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
427 bfa_get_attr(&bfad->bfa, &ioc_attr); 432 return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
428 return snprintf(buf, PAGE_SIZE, "%s\n",
429 ioc_attr.adapter_attr.serial_num);
430} 433}
431 434
432static ssize_t 435static ssize_t
@@ -437,11 +440,10 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
437 struct bfad_im_port_s *im_port = 440 struct bfad_im_port_s *im_port =
438 (struct bfad_im_port_s *) shost->hostdata[0]; 441 (struct bfad_im_port_s *) shost->hostdata[0];
439 struct bfad_s *bfad = im_port->bfad; 442 struct bfad_s *bfad = im_port->bfad;
440 struct bfa_ioc_attr_s ioc_attr; 443 char model[BFA_ADAPTER_MODEL_NAME_LEN];
441 444
442 memset(&ioc_attr, 0, sizeof(ioc_attr)); 445 bfa_get_adapter_model(&bfad->bfa, model);
443 bfa_get_attr(&bfad->bfa, &ioc_attr); 446 return snprintf(buf, PAGE_SIZE, "%s\n", model);
444 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.model);
445} 447}
446 448
447static ssize_t 449static ssize_t
@@ -452,12 +454,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
452 struct bfad_im_port_s *im_port = 454 struct bfad_im_port_s *im_port =
453 (struct bfad_im_port_s *) shost->hostdata[0]; 455 (struct bfad_im_port_s *) shost->hostdata[0];
454 struct bfad_s *bfad = im_port->bfad; 456 struct bfad_s *bfad = im_port->bfad;
455 struct bfa_ioc_attr_s ioc_attr; 457 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
456 458
457 memset(&ioc_attr, 0, sizeof(ioc_attr)); 459 bfa_get_adapter_model(&bfad->bfa, model_descr);
458 bfa_get_attr(&bfad->bfa, &ioc_attr); 460 return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
459 return snprintf(buf, PAGE_SIZE, "%s\n",
460 ioc_attr.adapter_attr.model_descr);
461} 461}
462 462
463static ssize_t 463static ssize_t
@@ -482,14 +482,13 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
482 struct bfad_im_port_s *im_port = 482 struct bfad_im_port_s *im_port =
483 (struct bfad_im_port_s *) shost->hostdata[0]; 483 (struct bfad_im_port_s *) shost->hostdata[0];
484 struct bfad_s *bfad = im_port->bfad; 484 struct bfad_s *bfad = im_port->bfad;
485 struct bfa_ioc_attr_s ioc_attr; 485 char model[BFA_ADAPTER_MODEL_NAME_LEN];
486 486 char fw_ver[BFA_VERSION_LEN];
487 memset(&ioc_attr, 0, sizeof(ioc_attr));
488 bfa_get_attr(&bfad->bfa, &ioc_attr);
489 487
488 bfa_get_adapter_model(&bfad->bfa, model);
489 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
490 return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n", 490 return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n",
491 ioc_attr.adapter_attr.model, 491 model, fw_ver, BFAD_DRIVER_VERSION);
492 ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
493} 492}
494 493
495static ssize_t 494static ssize_t
@@ -500,11 +499,10 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
500 struct bfad_im_port_s *im_port = 499 struct bfad_im_port_s *im_port =
501 (struct bfad_im_port_s *) shost->hostdata[0]; 500 (struct bfad_im_port_s *) shost->hostdata[0];
502 struct bfad_s *bfad = im_port->bfad; 501 struct bfad_s *bfad = im_port->bfad;
503 struct bfa_ioc_attr_s ioc_attr; 502 char hw_ver[BFA_VERSION_LEN];
504 503
505 memset(&ioc_attr, 0, sizeof(ioc_attr)); 504 bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
506 bfa_get_attr(&bfad->bfa, &ioc_attr); 505 return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
507 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.hw_ver);
508} 506}
509 507
510static ssize_t 508static ssize_t
@@ -522,12 +520,10 @@ bfad_im_optionrom_version_show(struct device *dev,
522 struct bfad_im_port_s *im_port = 520 struct bfad_im_port_s *im_port =
523 (struct bfad_im_port_s *) shost->hostdata[0]; 521 (struct bfad_im_port_s *) shost->hostdata[0];
524 struct bfad_s *bfad = im_port->bfad; 522 struct bfad_s *bfad = im_port->bfad;
525 struct bfa_ioc_attr_s ioc_attr; 523 char optrom_ver[BFA_VERSION_LEN];
526 524
527 memset(&ioc_attr, 0, sizeof(ioc_attr)); 525 bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
528 bfa_get_attr(&bfad->bfa, &ioc_attr); 526 return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
529 return snprintf(buf, PAGE_SIZE, "%s\n",
530 ioc_attr.adapter_attr.optrom_ver);
531} 527}
532 528
533static ssize_t 529static ssize_t
@@ -538,11 +534,10 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
538 struct bfad_im_port_s *im_port = 534 struct bfad_im_port_s *im_port =
539 (struct bfad_im_port_s *) shost->hostdata[0]; 535 (struct bfad_im_port_s *) shost->hostdata[0];
540 struct bfad_s *bfad = im_port->bfad; 536 struct bfad_s *bfad = im_port->bfad;
541 struct bfa_ioc_attr_s ioc_attr; 537 char fw_ver[BFA_VERSION_LEN];
542 538
543 memset(&ioc_attr, 0, sizeof(ioc_attr)); 539 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
544 bfa_get_attr(&bfad->bfa, &ioc_attr); 540 return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
545 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.fw_ver);
546} 541}
547 542
548static ssize_t 543static ssize_t
@@ -553,11 +548,9 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
553 struct bfad_im_port_s *im_port = 548 struct bfad_im_port_s *im_port =
554 (struct bfad_im_port_s *) shost->hostdata[0]; 549 (struct bfad_im_port_s *) shost->hostdata[0];
555 struct bfad_s *bfad = im_port->bfad; 550 struct bfad_s *bfad = im_port->bfad;
556 struct bfa_ioc_attr_s ioc_attr;
557 551
558 memset(&ioc_attr, 0, sizeof(ioc_attr)); 552 return snprintf(buf, PAGE_SIZE, "%d\n",
559 bfa_get_attr(&bfad->bfa, &ioc_attr); 553 bfa_get_nports(&bfad->bfa));
560 return snprintf(buf, PAGE_SIZE, "%d\n", ioc_attr.adapter_attr.nports);
561} 554}
562 555
563static ssize_t 556static ssize_t
diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h
index 4d3312da6a81..bf0102076508 100644
--- a/drivers/scsi/bfa/bfad_attr.h
+++ b/drivers/scsi/bfa/bfad_attr.h
@@ -17,9 +17,6 @@
17 17
18#ifndef __BFAD_ATTR_H__ 18#ifndef __BFAD_ATTR_H__
19#define __BFAD_ATTR_H__ 19#define __BFAD_ATTR_H__
20/**
21 * bfad_attr.h VMware driver configuration interface module.
22 */
23 20
24/** 21/**
25 * FC_transport_template FC transport template 22 * FC_transport_template FC transport template
@@ -52,12 +49,6 @@ bfad_im_get_starget_port_name(struct scsi_target *starget);
52void 49void
53bfad_im_get_host_port_id(struct Scsi_Host *shost); 50bfad_im_get_host_port_id(struct Scsi_Host *shost);
54 51
55/**
56 * FC transport template entry, issue a LIP.
57 */
58int
59bfad_im_issue_fc_host_lip(struct Scsi_Host *shost);
60
61struct Scsi_Host* 52struct Scsi_Host*
62bfad_os_starget_to_shost(struct scsi_target *starget); 53bfad_os_starget_to_shost(struct scsi_target *starget);
63 54
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 172c81e25c1c..107848cd3b6d 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -46,7 +46,7 @@
46#ifdef BFA_DRIVER_VERSION 46#ifdef BFA_DRIVER_VERSION
47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
48#else 48#else
49#define BFAD_DRIVER_VERSION "2.0.0.0" 49#define BFAD_DRIVER_VERSION "2.1.2.1"
50#endif 50#endif
51 51
52 52
@@ -62,7 +62,9 @@
62#define BFAD_HAL_START_DONE 0x00000010 62#define BFAD_HAL_START_DONE 0x00000010
63#define BFAD_PORT_ONLINE 0x00000020 63#define BFAD_PORT_ONLINE 0x00000020
64#define BFAD_RPORT_ONLINE 0x00000040 64#define BFAD_RPORT_ONLINE 0x00000040
65 65#define BFAD_FCS_INIT_DONE 0x00000080
66#define BFAD_HAL_INIT_FAIL 0x00000100
67#define BFAD_FC4_PROBE_DONE 0x00000200
66#define BFAD_PORT_DELETE 0x00000001 68#define BFAD_PORT_DELETE 0x00000001
67 69
68/* 70/*
@@ -137,12 +139,16 @@ struct bfad_cfg_param_s {
137 u32 binding_method; 139 u32 binding_method;
138}; 140};
139 141
140#define BFAD_AEN_MAX_APPS 8 142union bfad_tmp_buf {
141struct bfad_aen_file_s { 143 /* From struct bfa_adapter_attr_s */
142 struct list_head qe; 144 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
143 struct bfad_s *bfad; 145 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
144 s32 ri; 146 char model[BFA_ADAPTER_MODEL_NAME_LEN];
145 s32 app_id; 147 char fw_ver[BFA_VERSION_LEN];
148 char optrom_ver[BFA_VERSION_LEN];
149
150 /* From struct bfa_ioc_pci_attr_s */
151 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
146}; 152};
147 153
148/* 154/*
@@ -168,6 +174,7 @@ struct bfad_s {
168 u32 inst_no; /* BFAD instance number */ 174 u32 inst_no; /* BFAD instance number */
169 u32 bfad_flags; 175 u32 bfad_flags;
170 spinlock_t bfad_lock; 176 spinlock_t bfad_lock;
177 struct task_struct *bfad_tsk;
171 struct bfad_cfg_param_s cfg_data; 178 struct bfad_cfg_param_s cfg_data;
172 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; 179 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
173 int nvec; 180 int nvec;
@@ -183,18 +190,12 @@ struct bfad_s {
183 struct bfa_log_mod_s *logmod; 190 struct bfa_log_mod_s *logmod;
184 struct bfa_aen_s *aen; 191 struct bfa_aen_s *aen;
185 struct bfa_aen_s aen_buf; 192 struct bfa_aen_s aen_buf;
186 struct bfad_aen_file_s file_buf[BFAD_AEN_MAX_APPS]; 193 void *file_map[BFA_AEN_MAX_APP];
187 struct list_head file_q;
188 struct list_head file_free_q;
189 struct bfa_plog_s plog_buf; 194 struct bfa_plog_s plog_buf;
190 int ref_count; 195 int ref_count;
191 bfa_boolean_t ipfc_enabled; 196 bfa_boolean_t ipfc_enabled;
197 union bfad_tmp_buf tmp_buf;
192 struct fc_host_statistics link_stats; 198 struct fc_host_statistics link_stats;
193
194 struct kobject *bfa_kobj;
195 struct kobject *ioc_kobj;
196 struct kobject *pport_kobj;
197 struct kobject *lport_kobj;
198}; 199};
199 200
200/* 201/*
@@ -258,6 +259,7 @@ bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
258 struct bfa_port_cfg_s *port_cfg); 259 struct bfa_port_cfg_s *port_cfg);
259bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role); 260bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
260bfa_status_t bfad_drv_init(struct bfad_s *bfad); 261bfa_status_t bfad_drv_init(struct bfad_s *bfad);
262bfa_status_t bfad_start_ops(struct bfad_s *bfad);
261void bfad_drv_start(struct bfad_s *bfad); 263void bfad_drv_start(struct bfad_s *bfad);
262void bfad_uncfg_pport(struct bfad_s *bfad); 264void bfad_uncfg_pport(struct bfad_s *bfad);
263void bfad_drv_stop(struct bfad_s *bfad); 265void bfad_drv_stop(struct bfad_s *bfad);
@@ -279,6 +281,7 @@ void bfad_drv_uninit(struct bfad_s *bfad);
279void bfad_drv_log_level_set(struct bfad_s *bfad); 281void bfad_drv_log_level_set(struct bfad_s *bfad);
280bfa_status_t bfad_fc4_module_init(void); 282bfa_status_t bfad_fc4_module_init(void);
281void bfad_fc4_module_exit(void); 283void bfad_fc4_module_exit(void);
284int bfad_worker (void *ptr);
282 285
283void bfad_pci_remove(struct pci_dev *pdev); 286void bfad_pci_remove(struct pci_dev *pdev);
284int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); 287int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c
index bd34b0db2d6b..2ad65f275a92 100644
--- a/drivers/scsi/bfa/bfad_fwimg.c
+++ b/drivers/scsi/bfa/bfad_fwimg.c
@@ -65,10 +65,10 @@ bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
65 memcpy(*bfi_image, fw->data, fw->size); 65 memcpy(*bfi_image, fw->data, fw->size);
66 *bfi_image_size = fw->size/sizeof(u32); 66 *bfi_image_size = fw->size/sizeof(u32);
67 67
68 return(*bfi_image); 68 return *bfi_image;
69 69
70error: 70error:
71 return(NULL); 71 return NULL;
72} 72}
73 73
74u32 * 74u32 *
@@ -78,12 +78,12 @@ bfad_get_firmware_buf(struct pci_dev *pdev)
78 if (bfi_image_ct_size == 0) 78 if (bfi_image_ct_size == 0)
79 bfad_read_firmware(pdev, &bfi_image_ct, 79 bfad_read_firmware(pdev, &bfi_image_ct,
80 &bfi_image_ct_size, BFAD_FW_FILE_CT); 80 &bfi_image_ct_size, BFAD_FW_FILE_CT);
81 return(bfi_image_ct); 81 return bfi_image_ct;
82 } else { 82 } else {
83 if (bfi_image_cb_size == 0) 83 if (bfi_image_cb_size == 0)
84 bfad_read_firmware(pdev, &bfi_image_cb, 84 bfad_read_firmware(pdev, &bfi_image_cb,
85 &bfi_image_cb_size, BFAD_FW_FILE_CB); 85 &bfi_image_cb_size, BFAD_FW_FILE_CB);
86 return(bfi_image_cb); 86 return bfi_image_cb;
87 } 87 }
88} 88}
89 89
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 55d012a9a668..78f42aa57369 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -19,6 +19,7 @@
19 * bfad_im.c Linux driver IM module. 19 * bfad_im.c Linux driver IM module.
20 */ 20 */
21 21
22#include <linux/slab.h>
22#include "bfad_drv.h" 23#include "bfad_drv.h"
23#include "bfad_im.h" 24#include "bfad_im.h"
24#include "bfad_trcmod.h" 25#include "bfad_trcmod.h"
@@ -43,11 +44,11 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
43 struct bfad_s *bfad = drv; 44 struct bfad_s *bfad = drv;
44 struct bfad_itnim_data_s *itnim_data; 45 struct bfad_itnim_data_s *itnim_data;
45 struct bfad_itnim_s *itnim; 46 struct bfad_itnim_s *itnim;
47 u8 host_status = DID_OK;
46 48
47 switch (io_status) { 49 switch (io_status) {
48 case BFI_IOIM_STS_OK: 50 case BFI_IOIM_STS_OK:
49 bfa_trc(bfad, scsi_status); 51 bfa_trc(bfad, scsi_status);
50 cmnd->result = ScsiResult(DID_OK, scsi_status);
51 scsi_set_resid(cmnd, 0); 52 scsi_set_resid(cmnd, 0);
52 53
53 if (sns_len > 0) { 54 if (sns_len > 0) {
@@ -56,8 +57,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
56 sns_len = SCSI_SENSE_BUFFERSIZE; 57 sns_len = SCSI_SENSE_BUFFERSIZE;
57 memcpy(cmnd->sense_buffer, sns_info, sns_len); 58 memcpy(cmnd->sense_buffer, sns_info, sns_len);
58 } 59 }
59 if (residue > 0) 60 if (residue > 0) {
61 bfa_trc(bfad, residue);
60 scsi_set_resid(cmnd, residue); 62 scsi_set_resid(cmnd, residue);
63 if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
64 (scsi_bufflen(cmnd) - residue) <
65 cmnd->underflow) {
66 bfa_trc(bfad, 0);
67 host_status = DID_ERROR;
68 }
69 }
70 cmnd->result = ScsiResult(host_status, scsi_status);
71
61 break; 72 break;
62 73
63 case BFI_IOIM_STS_ABORTED: 74 case BFI_IOIM_STS_ABORTED:
@@ -167,17 +178,15 @@ bfad_im_info(struct Scsi_Host *shost)
167 static char bfa_buf[256]; 178 static char bfa_buf[256];
168 struct bfad_im_port_s *im_port = 179 struct bfad_im_port_s *im_port =
169 (struct bfad_im_port_s *) shost->hostdata[0]; 180 (struct bfad_im_port_s *) shost->hostdata[0];
170 struct bfa_ioc_attr_s ioc_attr;
171 struct bfad_s *bfad = im_port->bfad; 181 struct bfad_s *bfad = im_port->bfad;
182 char model[BFA_ADAPTER_MODEL_NAME_LEN];
172 183
173 memset(&ioc_attr, 0, sizeof(ioc_attr)); 184 bfa_get_adapter_model(&bfad->bfa, model);
174 bfa_get_attr(&bfad->bfa, &ioc_attr);
175 185
176 memset(bfa_buf, 0, sizeof(bfa_buf)); 186 memset(bfa_buf, 0, sizeof(bfa_buf));
177 snprintf(bfa_buf, sizeof(bfa_buf), 187 snprintf(bfa_buf, sizeof(bfa_buf),
178 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", 188 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
179 ioc_attr.adapter_attr.model, bfad->pci_name, 189 model, bfad->pci_name, BFAD_DRIVER_VERSION);
180 BFAD_DRIVER_VERSION);
181 return bfa_buf; 190 return bfa_buf;
182} 191}
183 192
@@ -501,16 +510,6 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
501} 510}
502 511
503/** 512/**
504 * Path TOV processing begin notification -- dummy for linux
505 */
506void
507bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim)
508{
509}
510
511
512
513/**
514 * Allocate a Scsi_Host for a port. 513 * Allocate a Scsi_Host for a port.
515 */ 514 */
516int 515int
@@ -931,10 +930,9 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
931 struct Scsi_Host *host = im_port->shost; 930 struct Scsi_Host *host = im_port->shost;
932 struct bfad_s *bfad = im_port->bfad; 931 struct bfad_s *bfad = im_port->bfad;
933 struct bfad_port_s *port = im_port->port; 932 struct bfad_port_s *port = im_port->port;
934 union attr { 933 struct bfa_pport_attr_s pattr;
935 struct bfa_pport_attr_s pattr; 934 char model[BFA_ADAPTER_MODEL_NAME_LEN];
936 struct bfa_ioc_attr_s ioc_attr; 935 char fw_ver[BFA_VERSION_LEN];
937 } attr;
938 936
939 fc_host_node_name(host) = 937 fc_host_node_name(host) =
940 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port))); 938 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
@@ -954,20 +952,18 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
954 /* For fibre channel services type 0x20 */ 952 /* For fibre channel services type 0x20 */
955 fc_host_supported_fc4s(host)[7] = 1; 953 fc_host_supported_fc4s(host)[7] = 1;
956 954
957 memset(&attr.ioc_attr, 0, sizeof(attr.ioc_attr)); 955 bfa_get_adapter_model(&bfad->bfa, model);
958 bfa_get_attr(&bfad->bfa, &attr.ioc_attr); 956 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
959 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s", 957 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
960 attr.ioc_attr.adapter_attr.model, 958 model, fw_ver, BFAD_DRIVER_VERSION);
961 attr.ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
962 959
963 fc_host_supported_speeds(host) = 0; 960 fc_host_supported_speeds(host) = 0;
964 fc_host_supported_speeds(host) |= 961 fc_host_supported_speeds(host) |=
965 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 962 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
966 FC_PORTSPEED_1GBIT; 963 FC_PORTSPEED_1GBIT;
967 964
968 memset(&attr.pattr, 0, sizeof(attr.pattr)); 965 bfa_fcport_get_attr(&bfad->bfa, &pattr);
969 bfa_pport_get_attr(&bfad->bfa, &attr.pattr); 966 fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
970 fc_host_maxframe_size(host) = attr.pattr.pport_cfg.maxfrsize;
971} 967}
972 968
973static void 969static void
@@ -1050,7 +1046,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
1050 } else { 1046 } else {
1051 printk(KERN_WARNING 1047 printk(KERN_WARNING
1052 "%s: itnim %llx is already in online state\n", 1048 "%s: itnim %llx is already in online state\n",
1053 __FUNCTION__, 1049 __func__,
1054 bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 1050 bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
1055 } 1051 }
1056 1052
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 189a5b29e21a..85ab2da21321 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -23,7 +23,6 @@
23 23
24#define FCPI_NAME " fcpim" 24#define FCPI_NAME " fcpim"
25 25
26void bfad_flags_set(struct bfad_s *bfad, u32 flags);
27bfa_status_t bfad_im_module_init(void); 26bfa_status_t bfad_im_module_init(void);
28void bfad_im_module_exit(void); 27void bfad_im_module_exit(void);
29bfa_status_t bfad_im_probe(struct bfad_s *bfad); 28bfa_status_t bfad_im_probe(struct bfad_s *bfad);
@@ -126,7 +125,6 @@ bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
126void bfad_os_destroy_workq(struct bfad_im_s *im); 125void bfad_os_destroy_workq(struct bfad_im_s *im);
127void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv); 126void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
128void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); 127void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
129void bfad_os_init_work(struct bfad_im_port_s *im_port);
130void bfad_os_scsi_host_free(struct bfad_s *bfad, 128void bfad_os_scsi_host_free(struct bfad_s *bfad,
131 struct bfad_im_port_s *im_port); 129 struct bfad_im_port_s *im_port);
132void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, 130void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
@@ -136,9 +134,6 @@ struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
136int bfad_os_scsi_add_host(struct Scsi_Host *shost, 134int bfad_os_scsi_add_host(struct Scsi_Host *shost,
137 struct bfad_im_port_s *im_port, struct bfad_s *bfad); 135 struct bfad_im_port_s *im_port, struct bfad_s *bfad);
138 136
139/*
140 * scsi_host_template entries
141 */
142void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port, 137void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
143 struct bfad_itnim_s *itnim); 138 struct bfad_itnim_s *itnim);
144 139
diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h
index 1d3e74ec338c..b36be15044a4 100644
--- a/drivers/scsi/bfa/bfad_im_compat.h
+++ b/drivers/scsi/bfa/bfad_im_compat.h
@@ -31,7 +31,7 @@ u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
31static inline u32 * 31static inline u32 *
32bfad_load_fwimg(struct pci_dev *pdev) 32bfad_load_fwimg(struct pci_dev *pdev)
33{ 33{
34 return(bfad_get_firmware_buf(pdev)); 34 return bfad_get_firmware_buf(pdev);
35} 35}
36 36
37static inline void 37static inline void
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
index f104e029cac9..2b7dbecbebca 100644
--- a/drivers/scsi/bfa/bfad_intr.c
+++ b/drivers/scsi/bfa/bfad_intr.c
@@ -23,13 +23,14 @@ BFA_TRC_FILE(LDRV, INTR);
23/** 23/**
24 * bfa_isr BFA driver interrupt functions 24 * bfa_isr BFA driver interrupt functions
25 */ 25 */
26irqreturn_t bfad_intx(int irq, void *dev_id); 26static int msix_disable_cb;
27static int msix_disable; 27static int msix_disable_ct;
28module_param(msix_disable, int, S_IRUGO | S_IWUSR); 28module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
29module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
29/** 30/**
30 * Line based interrupt handler. 31 * Line based interrupt handler.
31 */ 32 */
32irqreturn_t 33static irqreturn_t
33bfad_intx(int irq, void *dev_id) 34bfad_intx(int irq, void *dev_id)
34{ 35{
35 struct bfad_s *bfad = dev_id; 36 struct bfad_s *bfad = dev_id;
@@ -142,6 +143,7 @@ bfad_setup_intr(struct bfad_s *bfad)
142 int error = 0; 143 int error = 0;
143 u32 mask = 0, i, num_bit = 0, max_bit = 0; 144 u32 mask = 0, i, num_bit = 0, max_bit = 0;
144 struct msix_entry msix_entries[MAX_MSIX_ENTRY]; 145 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
146 struct pci_dev *pdev = bfad->pcidev;
145 147
146 /* Call BFA to get the msix map for this PCI function. */ 148 /* Call BFA to get the msix map for this PCI function. */
147 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); 149 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
@@ -149,7 +151,9 @@ bfad_setup_intr(struct bfad_s *bfad)
149 /* Set up the msix entry table */ 151 /* Set up the msix entry table */
150 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); 152 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
151 153
152 if (!msix_disable) { 154 if ((pdev->device == BFA_PCI_DEVICE_ID_CT && !msix_disable_ct) ||
155 (pdev->device != BFA_PCI_DEVICE_ID_CT && !msix_disable_cb)) {
156
153 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); 157 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
154 if (error) { 158 if (error) {
155 /* 159 /*
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
index a8b14c47b009..8166e9745ec0 100644
--- a/drivers/scsi/bfa/fabric.c
+++ b/drivers/scsi/bfa/fabric.c
@@ -36,12 +36,12 @@ BFA_TRC_FILE(FCS, FABRIC);
36#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ 36#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */
37#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ 37#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
38 38
39#define bfa_fcs_fabric_set_opertype(__fabric) do { \ 39#define bfa_fcs_fabric_set_opertype(__fabric) do { \
40 if (bfa_pport_get_topology((__fabric)->fcs->bfa) \ 40 if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
41 == BFA_PPORT_TOPOLOGY_P2P) \ 41 == BFA_PPORT_TOPOLOGY_P2P) \
42 (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ 42 (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \
43 else \ 43 else \
44 (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \ 44 (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \
45} while (0) 45} while (0)
46 46
47/* 47/*
@@ -136,8 +136,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
136 case BFA_FCS_FABRIC_SM_CREATE: 136 case BFA_FCS_FABRIC_SM_CREATE:
137 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); 137 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
138 bfa_fcs_fabric_init(fabric); 138 bfa_fcs_fabric_init(fabric);
139 bfa_fcs_lport_init(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, 139 bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
140 &fabric->bport.port_cfg, NULL);
141 break; 140 break;
142 141
143 case BFA_FCS_FABRIC_SM_LINK_UP: 142 case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -161,7 +160,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
161 160
162 switch (event) { 161 switch (event) {
163 case BFA_FCS_FABRIC_SM_START: 162 case BFA_FCS_FABRIC_SM_START:
164 if (bfa_pport_is_linkup(fabric->fcs->bfa)) { 163 if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
165 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); 164 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
166 bfa_fcs_fabric_login(fabric); 165 bfa_fcs_fabric_login(fabric);
167 } else 166 } else
@@ -225,7 +224,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
225 switch (event) { 224 switch (event) {
226 case BFA_FCS_FABRIC_SM_CONT_OP: 225 case BFA_FCS_FABRIC_SM_CONT_OP:
227 226
228 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 227 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
229 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; 228 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
230 229
231 if (fabric->auth_reqd && fabric->is_auth) { 230 if (fabric->auth_reqd && fabric->is_auth) {
@@ -252,7 +251,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
252 251
253 case BFA_FCS_FABRIC_SM_NO_FABRIC: 252 case BFA_FCS_FABRIC_SM_NO_FABRIC:
254 fabric->fab_type = BFA_FCS_FABRIC_N2N; 253 fabric->fab_type = BFA_FCS_FABRIC_N2N;
255 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 254 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
256 bfa_fcs_fabric_notify_online(fabric); 255 bfa_fcs_fabric_notify_online(fabric);
257 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); 256 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
258 break; 257 break;
@@ -419,7 +418,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
419 418
420 case BFA_FCS_FABRIC_SM_NO_FABRIC: 419 case BFA_FCS_FABRIC_SM_NO_FABRIC:
421 bfa_trc(fabric->fcs, fabric->bb_credit); 420 bfa_trc(fabric->fcs, fabric->bb_credit);
422 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 421 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
423 break; 422 break;
424 423
425 default: 424 default:
@@ -563,17 +562,15 @@ void
563bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) 562bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
564{ 563{
565 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg; 564 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
566 struct bfa_adapter_attr_s adapter_attr; 565 char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
567 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; 566 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
568 567
569 bfa_os_memset((void *)&adapter_attr, 0, 568 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
570 sizeof(struct bfa_adapter_attr_s));
571 bfa_ioc_get_adapter_attr(&fabric->fcs->bfa->ioc, &adapter_attr);
572 569
573 /* 570 /*
574 * Model name/number 571 * Model name/number
575 */ 572 */
576 strncpy((char *)&port_cfg->sym_name, adapter_attr.model, 573 strncpy((char *)&port_cfg->sym_name, model,
577 BFA_FCS_PORT_SYMBNAME_MODEL_SZ); 574 BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
578 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, 575 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
579 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 576 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
@@ -719,10 +716,10 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
719 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg; 716 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
720 u8 alpa = 0; 717 u8 alpa = 0;
721 718
722 if (bfa_pport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP) 719 if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
723 alpa = bfa_pport_get_myalpa(bfa); 720 alpa = bfa_fcport_get_myalpa(bfa);
724 721
725 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_pport_get_maxfrsize(bfa), 722 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
726 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); 723 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
727 724
728 fabric->stats.flogi_sent++; 725 fabric->stats.flogi_sent++;
@@ -814,10 +811,10 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
814 */ 811 */
815 812
816/** 813/**
817 * Module initialization 814 * Attach time initialization
818 */ 815 */
819void 816void
820bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) 817bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
821{ 818{
822 struct bfa_fcs_fabric_s *fabric; 819 struct bfa_fcs_fabric_s *fabric;
823 820
@@ -841,7 +838,13 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
841 bfa_wc_up(&fabric->wc); /* For the base port */ 838 bfa_wc_up(&fabric->wc); /* For the base port */
842 839
843 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); 840 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
844 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CREATE); 841 bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
842}
843
844void
845bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
846{
847 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
845 bfa_trc(fcs, 0); 848 bfa_trc(fcs, 0);
846} 849}
847 850
@@ -887,7 +890,13 @@ bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
887bfa_boolean_t 890bfa_boolean_t
888bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) 891bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
889{ 892{
890 return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback)); 893 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
894}
895
896bfa_boolean_t
897bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
898{
899 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
891} 900}
892 901
893enum bfa_pport_type 902enum bfa_pport_type
@@ -974,7 +983,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
974int 983int
975bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric) 984bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
976{ 985{
977 return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online)); 986 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
978} 987}
979 988
980 989
@@ -1015,7 +1024,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
1015u16 1024u16
1016bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric) 1025bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
1017{ 1026{
1018 return (fabric->num_vports); 1027 return fabric->num_vports;
1019} 1028}
1020 1029
1021/** 1030/**
@@ -1165,8 +1174,8 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1165 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 1174 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1166 bfa_os_hton3b(FC_FABRIC_PORT), 1175 bfa_os_hton3b(FC_FABRIC_PORT),
1167 n2n_port->reply_oxid, pcfg->pwwn, 1176 n2n_port->reply_oxid, pcfg->pwwn,
1168 pcfg->nwwn, bfa_pport_get_maxfrsize(bfa), 1177 pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa),
1169 bfa_pport_get_rx_bbcredit(bfa)); 1178 bfa_fcport_get_rx_bbcredit(bfa));
1170 1179
1171 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps), 1180 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
1172 BFA_FALSE, FC_CLASS_3, reqlen, &fchs, 1181 BFA_FALSE, FC_CLASS_3, reqlen, &fchs,
@@ -1224,14 +1233,8 @@ bfa_fcs_fabric_aen_post(struct bfa_fcs_port_s *port,
1224 wwn2str(pwwn_ptr, pwwn); 1233 wwn2str(pwwn_ptr, pwwn);
1225 wwn2str(fwwn_ptr, fwwn); 1234 wwn2str(fwwn_ptr, fwwn);
1226 1235
1227 switch (event) { 1236 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event),
1228 case BFA_PORT_AEN_FABRIC_NAME_CHANGE: 1237 pwwn_ptr, fwwn_ptr);
1229 bfa_log(logmod, BFA_AEN_PORT_FABRIC_NAME_CHANGE, pwwn_ptr,
1230 fwwn_ptr);
1231 break;
1232 default:
1233 break;
1234 }
1235 1238
1236 aen_data.port.pwwn = pwwn; 1239 aen_data.port.pwwn = pwwn;
1237 aen_data.port.fwwn = fwwn; 1240 aen_data.port.fwwn = fwwn;
diff --git a/drivers/scsi/bfa/fcbuild.c b/drivers/scsi/bfa/fcbuild.c
index d174706b9caa..fee5456451cb 100644
--- a/drivers/scsi/bfa/fcbuild.c
+++ b/drivers/scsi/bfa/fcbuild.c
@@ -188,14 +188,14 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
188 switch (els_cmd->els_code) { 188 switch (els_cmd->els_code) {
189 case FC_ELS_LS_RJT: 189 case FC_ELS_LS_RJT:
190 if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) 190 if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
191 return (FC_PARSE_BUSY); 191 return FC_PARSE_BUSY;
192 else 192 else
193 return (FC_PARSE_FAILURE); 193 return FC_PARSE_FAILURE;
194 194
195 case FC_ELS_ACC: 195 case FC_ELS_ACC:
196 return (FC_PARSE_OK); 196 return FC_PARSE_OK;
197 } 197 }
198 return (FC_PARSE_OK); 198 return FC_PARSE_OK;
199} 199}
200 200
201static void 201static void
@@ -228,7 +228,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
228 bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); 228 bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
229 bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); 229 bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
230 230
231 return (sizeof(struct fc_logi_s)); 231 return sizeof(struct fc_logi_s);
232} 232}
233 233
234u16 234u16
@@ -267,7 +267,7 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
267 flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ 267 flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */
268 vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); 268 vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD);
269 269
270 return (sizeof(struct fc_logi_s)); 270 return sizeof(struct fc_logi_s);
271} 271}
272 272
273u16 273u16
@@ -287,7 +287,7 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
287 287
288 flogi->csp.bbcred = bfa_os_htons(local_bb_credits); 288 flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
289 289
290 return (sizeof(struct fc_logi_s)); 290 return sizeof(struct fc_logi_s);
291} 291}
292 292
293u16 293u16
@@ -306,7 +306,7 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
306 flogi->port_name = port_name; 306 flogi->port_name = port_name;
307 flogi->node_name = node_name; 307 flogi->node_name = node_name;
308 308
309 return (sizeof(struct fc_logi_s)); 309 return sizeof(struct fc_logi_s);
310} 310}
311 311
312u16 312u16
@@ -338,26 +338,26 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
338 case FC_ELS_LS_RJT: 338 case FC_ELS_LS_RJT:
339 ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); 339 ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1);
340 if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) 340 if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
341 return (FC_PARSE_BUSY); 341 return FC_PARSE_BUSY;
342 else 342 else
343 return (FC_PARSE_FAILURE); 343 return FC_PARSE_FAILURE;
344 case FC_ELS_ACC: 344 case FC_ELS_ACC:
345 plogi = (struct fc_logi_s *) (fchs + 1); 345 plogi = (struct fc_logi_s *) (fchs + 1);
346 if (len < sizeof(struct fc_logi_s)) 346 if (len < sizeof(struct fc_logi_s))
347 return (FC_PARSE_FAILURE); 347 return FC_PARSE_FAILURE;
348 348
349 if (!wwn_is_equal(plogi->port_name, port_name)) 349 if (!wwn_is_equal(plogi->port_name, port_name))
350 return (FC_PARSE_FAILURE); 350 return FC_PARSE_FAILURE;
351 351
352 if (!plogi->class3.class_valid) 352 if (!plogi->class3.class_valid)
353 return (FC_PARSE_FAILURE); 353 return FC_PARSE_FAILURE;
354 354
355 if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) 355 if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
356 return (FC_PARSE_FAILURE); 356 return FC_PARSE_FAILURE;
357 357
358 return (FC_PARSE_OK); 358 return FC_PARSE_OK;
359 default: 359 default:
360 return (FC_PARSE_FAILURE); 360 return FC_PARSE_FAILURE;
361 } 361 }
362} 362}
363 363
@@ -372,7 +372,7 @@ fc_plogi_parse(struct fchs_s *fchs)
372 if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) 372 if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ)
373 || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) 373 || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ)
374 || (plogi->class3.rxsz == 0)) 374 || (plogi->class3.rxsz == 0))
375 return (FC_PARSE_FAILURE); 375 return FC_PARSE_FAILURE;
376 376
377 return FC_PARSE_OK; 377 return FC_PARSE_OK;
378} 378}
@@ -393,7 +393,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
393 prli->parampage.servparams.task_retry_id = 0; 393 prli->parampage.servparams.task_retry_id = 0;
394 prli->parampage.servparams.confirm = 1; 394 prli->parampage.servparams.confirm = 1;
395 395
396 return (sizeof(struct fc_prli_s)); 396 return sizeof(struct fc_prli_s);
397} 397}
398 398
399u16 399u16
@@ -414,41 +414,41 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
414 414
415 prli->parampage.rspcode = FC_PRLI_ACC_XQTD; 415 prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
416 416
417 return (sizeof(struct fc_prli_s)); 417 return sizeof(struct fc_prli_s);
418} 418}
419 419
420enum fc_parse_status 420enum fc_parse_status
421fc_prli_rsp_parse(struct fc_prli_s *prli, int len) 421fc_prli_rsp_parse(struct fc_prli_s *prli, int len)
422{ 422{
423 if (len < sizeof(struct fc_prli_s)) 423 if (len < sizeof(struct fc_prli_s))
424 return (FC_PARSE_FAILURE); 424 return FC_PARSE_FAILURE;
425 425
426 if (prli->command != FC_ELS_ACC) 426 if (prli->command != FC_ELS_ACC)
427 return (FC_PARSE_FAILURE); 427 return FC_PARSE_FAILURE;
428 428
429 if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) 429 if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD)
430 && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) 430 && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG))
431 return (FC_PARSE_FAILURE); 431 return FC_PARSE_FAILURE;
432 432
433 if (prli->parampage.servparams.target != 1) 433 if (prli->parampage.servparams.target != 1)
434 return (FC_PARSE_FAILURE); 434 return FC_PARSE_FAILURE;
435 435
436 return (FC_PARSE_OK); 436 return FC_PARSE_OK;
437} 437}
438 438
439enum fc_parse_status 439enum fc_parse_status
440fc_prli_parse(struct fc_prli_s *prli) 440fc_prli_parse(struct fc_prli_s *prli)
441{ 441{
442 if (prli->parampage.type != FC_TYPE_FCP) 442 if (prli->parampage.type != FC_TYPE_FCP)
443 return (FC_PARSE_FAILURE); 443 return FC_PARSE_FAILURE;
444 444
445 if (!prli->parampage.imagepair) 445 if (!prli->parampage.imagepair)
446 return (FC_PARSE_FAILURE); 446 return FC_PARSE_FAILURE;
447 447
448 if (!prli->parampage.servparams.initiator) 448 if (!prli->parampage.servparams.initiator)
449 return (FC_PARSE_FAILURE); 449 return FC_PARSE_FAILURE;
450 450
451 return (FC_PARSE_OK); 451 return FC_PARSE_OK;
452} 452}
453 453
454u16 454u16
@@ -462,7 +462,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
462 logo->nport_id = (s_id); 462 logo->nport_id = (s_id);
463 logo->orig_port_name = port_name; 463 logo->orig_port_name = port_name;
464 464
465 return (sizeof(struct fc_logo_s)); 465 return sizeof(struct fc_logo_s);
466} 466}
467 467
468static u16 468static u16
@@ -484,7 +484,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
484 adisc->orig_node_name = node_name; 484 adisc->orig_node_name = node_name;
485 adisc->nport_id = (s_id); 485 adisc->nport_id = (s_id);
486 486
487 return (sizeof(struct fc_adisc_s)); 487 return sizeof(struct fc_adisc_s);
488} 488}
489 489
490u16 490u16
@@ -511,15 +511,15 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
511{ 511{
512 512
513 if (len < sizeof(struct fc_adisc_s)) 513 if (len < sizeof(struct fc_adisc_s))
514 return (FC_PARSE_FAILURE); 514 return FC_PARSE_FAILURE;
515 515
516 if (adisc->els_cmd.els_code != FC_ELS_ACC) 516 if (adisc->els_cmd.els_code != FC_ELS_ACC)
517 return (FC_PARSE_FAILURE); 517 return FC_PARSE_FAILURE;
518 518
519 if (!wwn_is_equal(adisc->orig_port_name, port_name)) 519 if (!wwn_is_equal(adisc->orig_port_name, port_name))
520 return (FC_PARSE_FAILURE); 520 return FC_PARSE_FAILURE;
521 521
522 return (FC_PARSE_OK); 522 return FC_PARSE_OK;
523} 523}
524 524
525enum fc_parse_status 525enum fc_parse_status
@@ -529,14 +529,14 @@ fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
529 struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; 529 struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
530 530
531 if (adisc->els_cmd.els_code != FC_ELS_ACC) 531 if (adisc->els_cmd.els_code != FC_ELS_ACC)
532 return (FC_PARSE_FAILURE); 532 return FC_PARSE_FAILURE;
533 533
534 if ((adisc->nport_id == (host_dap)) 534 if ((adisc->nport_id == (host_dap))
535 && wwn_is_equal(adisc->orig_port_name, port_name) 535 && wwn_is_equal(adisc->orig_port_name, port_name)
536 && wwn_is_equal(adisc->orig_node_name, node_name)) 536 && wwn_is_equal(adisc->orig_node_name, node_name))
537 return (FC_PARSE_OK); 537 return FC_PARSE_OK;
538 538
539 return (FC_PARSE_FAILURE); 539 return FC_PARSE_FAILURE;
540} 540}
541 541
542enum fc_parse_status 542enum fc_parse_status
@@ -550,13 +550,13 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
550 if ((bfa_os_ntohs(pdisc->class3.rxsz) < 550 if ((bfa_os_ntohs(pdisc->class3.rxsz) <
551 (FC_MIN_PDUSZ - sizeof(struct fchs_s))) 551 (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
552 || (pdisc->class3.rxsz == 0)) 552 || (pdisc->class3.rxsz == 0))
553 return (FC_PARSE_FAILURE); 553 return FC_PARSE_FAILURE;
554 554
555 if (!wwn_is_equal(pdisc->port_name, port_name)) 555 if (!wwn_is_equal(pdisc->port_name, port_name))
556 return (FC_PARSE_FAILURE); 556 return FC_PARSE_FAILURE;
557 557
558 if (!wwn_is_equal(pdisc->node_name, node_name)) 558 if (!wwn_is_equal(pdisc->node_name, node_name))
559 return (FC_PARSE_FAILURE); 559 return FC_PARSE_FAILURE;
560 560
561 return FC_PARSE_OK; 561 return FC_PARSE_OK;
562} 562}
@@ -570,7 +570,7 @@ fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
570 fchs->s_id = (s_id); 570 fchs->s_id = (s_id);
571 fchs->ox_id = bfa_os_htons(ox_id); 571 fchs->ox_id = bfa_os_htons(ox_id);
572 572
573 return (sizeof(struct fchs_s)); 573 return sizeof(struct fchs_s);
574} 574}
575 575
576enum fc_parse_status 576enum fc_parse_status
@@ -578,9 +578,9 @@ fc_abts_rsp_parse(struct fchs_s *fchs, int len)
578{ 578{
579 if ((fchs->cat_info == FC_CAT_BA_ACC) 579 if ((fchs->cat_info == FC_CAT_BA_ACC)
580 || (fchs->cat_info == FC_CAT_BA_RJT)) 580 || (fchs->cat_info == FC_CAT_BA_RJT))
581 return (FC_PARSE_OK); 581 return FC_PARSE_OK;
582 582
583 return (FC_PARSE_FAILURE); 583 return FC_PARSE_FAILURE;
584} 584}
585 585
586u16 586u16
@@ -597,7 +597,7 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id,
597 rrq->ox_id = bfa_os_htons(rrq_oxid); 597 rrq->ox_id = bfa_os_htons(rrq_oxid);
598 rrq->rx_id = FC_RXID_ANY; 598 rrq->rx_id = FC_RXID_ANY;
599 599
600 return (sizeof(struct fc_rrq_s)); 600 return sizeof(struct fc_rrq_s);
601} 601}
602 602
603u16 603u16
@@ -611,7 +611,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
611 memset(acc, 0, sizeof(struct fc_els_cmd_s)); 611 memset(acc, 0, sizeof(struct fc_els_cmd_s));
612 acc->els_code = FC_ELS_ACC; 612 acc->els_code = FC_ELS_ACC;
613 613
614 return (sizeof(struct fc_els_cmd_s)); 614 return sizeof(struct fc_els_cmd_s);
615} 615}
616 616
617u16 617u16
@@ -627,7 +627,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
627 ls_rjt->reason_code_expl = reason_code_expl; 627 ls_rjt->reason_code_expl = reason_code_expl;
628 ls_rjt->vendor_unique = 0x00; 628 ls_rjt->vendor_unique = 0x00;
629 629
630 return (sizeof(struct fc_ls_rjt_s)); 630 return sizeof(struct fc_ls_rjt_s);
631} 631}
632 632
633u16 633u16
@@ -643,7 +643,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
643 ba_acc->ox_id = fchs->ox_id; 643 ba_acc->ox_id = fchs->ox_id;
644 ba_acc->rx_id = fchs->rx_id; 644 ba_acc->rx_id = fchs->rx_id;
645 645
646 return (sizeof(struct fc_ba_acc_s)); 646 return sizeof(struct fc_ba_acc_s);
647} 647}
648 648
649u16 649u16
@@ -654,7 +654,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
654 memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); 654 memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
655 els_cmd->els_code = FC_ELS_ACC; 655 els_cmd->els_code = FC_ELS_ACC;
656 656
657 return (sizeof(struct fc_els_cmd_s)); 657 return sizeof(struct fc_els_cmd_s);
658} 658}
659 659
660int 660int
@@ -696,7 +696,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
696 tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; 696 tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
697 tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; 697 tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
698 } 698 }
699 return (bfa_os_ntohs(tprlo_acc->payload_len)); 699 return bfa_os_ntohs(tprlo_acc->payload_len);
700} 700}
701 701
702u16 702u16
@@ -721,7 +721,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
721 prlo_acc->prlo_acc_params[page].resp_process_assc = 0; 721 prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
722 } 722 }
723 723
724 return (bfa_os_ntohs(prlo_acc->payload_len)); 724 return bfa_os_ntohs(prlo_acc->payload_len);
725} 725}
726 726
727u16 727u16
@@ -735,7 +735,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
735 rnid->els_cmd.els_code = FC_ELS_RNID; 735 rnid->els_cmd.els_code = FC_ELS_RNID;
736 rnid->node_id_data_format = data_format; 736 rnid->node_id_data_format = data_format;
737 737
738 return (sizeof(struct fc_rnid_cmd_s)); 738 return sizeof(struct fc_rnid_cmd_s);
739} 739}
740 740
741u16 741u16
@@ -759,10 +759,10 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
759 rnid_acc->specific_id_data_length = 759 rnid_acc->specific_id_data_length =
760 sizeof(struct fc_rnid_general_topology_data_s); 760 sizeof(struct fc_rnid_general_topology_data_s);
761 bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); 761 bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data);
762 return (sizeof(struct fc_rnid_acc_s)); 762 return sizeof(struct fc_rnid_acc_s);
763 } else { 763 } else {
764 return (sizeof(struct fc_rnid_acc_s) - 764 return sizeof(struct fc_rnid_acc_s) -
765 sizeof(struct fc_rnid_general_topology_data_s)); 765 sizeof(struct fc_rnid_general_topology_data_s);
766 } 766 }
767 767
768} 768}
@@ -776,7 +776,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
776 memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); 776 memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
777 777
778 rpsc->els_cmd.els_code = FC_ELS_RPSC; 778 rpsc->els_cmd.els_code = FC_ELS_RPSC;
779 return (sizeof(struct fc_rpsc_cmd_s)); 779 return sizeof(struct fc_rpsc_cmd_s);
780} 780}
781 781
782u16 782u16
@@ -797,8 +797,8 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
797 for (i = 0; i < npids; i++) 797 for (i = 0; i < npids; i++)
798 rpsc2->pid_list[i].pid = pid_list[i]; 798 rpsc2->pid_list[i].pid = pid_list[i];
799 799
800 return (sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * 800 return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) *
801 (sizeof(u32)))); 801 (sizeof(u32)));
802} 802}
803 803
804u16 804u16
@@ -819,7 +819,7 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
819 rpsc_acc->speed_info[0].port_op_speed = 819 rpsc_acc->speed_info[0].port_op_speed =
820 bfa_os_htons(oper_speed->port_op_speed); 820 bfa_os_htons(oper_speed->port_op_speed);
821 821
822 return (sizeof(struct fc_rpsc_acc_s)); 822 return sizeof(struct fc_rpsc_acc_s);
823 823
824} 824}
825 825
@@ -856,7 +856,7 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
856 pdisc->port_name = port_name; 856 pdisc->port_name = port_name;
857 pdisc->node_name = node_name; 857 pdisc->node_name = node_name;
858 858
859 return (sizeof(struct fc_logi_s)); 859 return sizeof(struct fc_logi_s);
860} 860}
861 861
862u16 862u16
@@ -865,21 +865,21 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
865 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 865 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
866 866
867 if (len < sizeof(struct fc_logi_s)) 867 if (len < sizeof(struct fc_logi_s))
868 return (FC_PARSE_LEN_INVAL); 868 return FC_PARSE_LEN_INVAL;
869 869
870 if (pdisc->els_cmd.els_code != FC_ELS_ACC) 870 if (pdisc->els_cmd.els_code != FC_ELS_ACC)
871 return (FC_PARSE_ACC_INVAL); 871 return FC_PARSE_ACC_INVAL;
872 872
873 if (!wwn_is_equal(pdisc->port_name, port_name)) 873 if (!wwn_is_equal(pdisc->port_name, port_name))
874 return (FC_PARSE_PWWN_NOT_EQUAL); 874 return FC_PARSE_PWWN_NOT_EQUAL;
875 875
876 if (!pdisc->class3.class_valid) 876 if (!pdisc->class3.class_valid)
877 return (FC_PARSE_NWWN_NOT_EQUAL); 877 return FC_PARSE_NWWN_NOT_EQUAL;
878 878
879 if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) 879 if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
880 return (FC_PARSE_RXSZ_INVAL); 880 return FC_PARSE_RXSZ_INVAL;
881 881
882 return (FC_PARSE_OK); 882 return FC_PARSE_OK;
883} 883}
884 884
885u16 885u16
@@ -903,7 +903,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
903 prlo->prlo_params[page].resp_process_assc = 0; 903 prlo->prlo_params[page].resp_process_assc = 0;
904 } 904 }
905 905
906 return (bfa_os_ntohs(prlo->payload_len)); 906 return bfa_os_ntohs(prlo->payload_len);
907} 907}
908 908
909u16 909u16
@@ -916,7 +916,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
916 len = len; 916 len = len;
917 917
918 if (prlo->command != FC_ELS_ACC) 918 if (prlo->command != FC_ELS_ACC)
919 return (FC_PARSE_FAILURE); 919 return FC_PARSE_FAILURE;
920 920
921 num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; 921 num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16;
922 922
@@ -936,7 +936,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
936 if (prlo->prlo_acc_params[page].resp_process_assc != 0) 936 if (prlo->prlo_acc_params[page].resp_process_assc != 0)
937 return FC_PARSE_FAILURE; 937 return FC_PARSE_FAILURE;
938 } 938 }
939 return (FC_PARSE_OK); 939 return FC_PARSE_OK;
940 940
941} 941}
942 942
@@ -968,7 +968,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
968 } 968 }
969 } 969 }
970 970
971 return (bfa_os_ntohs(tprlo->payload_len)); 971 return bfa_os_ntohs(tprlo->payload_len);
972} 972}
973 973
974u16 974u16
@@ -981,23 +981,23 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
981 len = len; 981 len = len;
982 982
983 if (tprlo->command != FC_ELS_ACC) 983 if (tprlo->command != FC_ELS_ACC)
984 return (FC_PARSE_ACC_INVAL); 984 return FC_PARSE_ACC_INVAL;
985 985
986 num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; 986 num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
987 987
988 for (page = 0; page < num_pages; page++) { 988 for (page = 0; page < num_pages; page++) {
989 if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) 989 if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
990 return (FC_PARSE_NOT_FCP); 990 return FC_PARSE_NOT_FCP;
991 if (tprlo->tprlo_acc_params[page].opa_valid != 0) 991 if (tprlo->tprlo_acc_params[page].opa_valid != 0)
992 return (FC_PARSE_OPAFLAG_INVAL); 992 return FC_PARSE_OPAFLAG_INVAL;
993 if (tprlo->tprlo_acc_params[page].rpa_valid != 0) 993 if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
994 return (FC_PARSE_RPAFLAG_INVAL); 994 return FC_PARSE_RPAFLAG_INVAL;
995 if (tprlo->tprlo_acc_params[page].orig_process_assc != 0) 995 if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
996 return (FC_PARSE_OPA_INVAL); 996 return FC_PARSE_OPA_INVAL;
997 if (tprlo->tprlo_acc_params[page].resp_process_assc != 0) 997 if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
998 return (FC_PARSE_RPA_INVAL); 998 return FC_PARSE_RPA_INVAL;
999 } 999 }
1000 return (FC_PARSE_OK); 1000 return FC_PARSE_OK;
1001} 1001}
1002 1002
1003enum fc_parse_status 1003enum fc_parse_status
@@ -1024,7 +1024,7 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
1024 fchs->cat_info = FC_CAT_BA_RJT; 1024 fchs->cat_info = FC_CAT_BA_RJT;
1025 ba_rjt->reason_code = reason_code; 1025 ba_rjt->reason_code = reason_code;
1026 ba_rjt->reason_expl = reason_expl; 1026 ba_rjt->reason_expl = reason_expl;
1027 return (sizeof(struct fc_ba_rjt_s)); 1027 return sizeof(struct fc_ba_rjt_s);
1028} 1028}
1029 1029
1030static void 1030static void
@@ -1073,7 +1073,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1073 1073
1074 bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); 1074 bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
1075 gidpn->port_name = port_name; 1075 gidpn->port_name = port_name;
1076 return (sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s)); 1076 return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
1077} 1077}
1078 1078
1079u16 1079u16
@@ -1090,7 +1090,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1090 1090
1091 bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); 1091 bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
1092 gpnid->dap = port_id; 1092 gpnid->dap = port_id;
1093 return (sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s)); 1093 return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
1094} 1094}
1095 1095
1096u16 1096u16
@@ -1107,7 +1107,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1107 1107
1108 bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); 1108 bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
1109 gnnid->dap = port_id; 1109 gnnid->dap = port_id;
1110 return (sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s)); 1110 return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
1111} 1111}
1112 1112
1113u16 1113u16
@@ -1137,7 +1137,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
1137 if (set_br_reg) 1137 if (set_br_reg)
1138 scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; 1138 scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE;
1139 1139
1140 return (sizeof(struct fc_scr_s)); 1140 return sizeof(struct fc_scr_s);
1141} 1141}
1142 1142
1143u16 1143u16
@@ -1157,7 +1157,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
1157 rscn->event[0].format = FC_RSCN_FORMAT_PORTID; 1157 rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
1158 rscn->event[0].portid = s_id; 1158 rscn->event[0].portid = s_id;
1159 1159
1160 return (sizeof(struct fc_rscn_pl_s)); 1160 return sizeof(struct fc_rscn_pl_s);
1161} 1161}
1162 1162
1163u16 1163u16
@@ -1188,7 +1188,7 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1188 rftid->fc4_type[index] |= bfa_os_htonl(type_value); 1188 rftid->fc4_type[index] |= bfa_os_htonl(type_value);
1189 } 1189 }
1190 1190
1191 return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s)); 1191 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
1192} 1192}
1193 1193
1194u16 1194u16
@@ -1210,7 +1210,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
1210 bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, 1210 bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
1211 (bitmap_size < 32 ? bitmap_size : 32)); 1211 (bitmap_size < 32 ? bitmap_size : 32));
1212 1212
1213 return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s)); 1213 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
1214} 1214}
1215 1215
1216u16 1216u16
@@ -1231,7 +1231,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1231 rffid->fc4ftr_bits = fc4_ftrs; 1231 rffid->fc4ftr_bits = fc4_ftrs;
1232 rffid->fc4_type = fc4_type; 1232 rffid->fc4_type = fc4_type;
1233 1233
1234 return (sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s)); 1234 return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
1235} 1235}
1236 1236
1237u16 1237u16
@@ -1253,7 +1253,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1253 rspnid->spn_len = (u8) strlen((char *)name); 1253 rspnid->spn_len = (u8) strlen((char *)name);
1254 strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len); 1254 strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
1255 1255
1256 return (sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s)); 1256 return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
1257} 1257}
1258 1258
1259u16 1259u16
@@ -1275,7 +1275,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1275 gidft->domain_id = 0; 1275 gidft->domain_id = 0;
1276 gidft->area_id = 0; 1276 gidft->area_id = 0;
1277 1277
1278 return (sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s)); 1278 return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s);
1279} 1279}
1280 1280
1281u16 1281u16
@@ -1294,7 +1294,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1294 rpnid->port_id = port_id; 1294 rpnid->port_id = port_id;
1295 rpnid->port_name = port_name; 1295 rpnid->port_name = port_name;
1296 1296
1297 return (sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s)); 1297 return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s);
1298} 1298}
1299 1299
1300u16 1300u16
@@ -1313,7 +1313,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1313 rnnid->port_id = port_id; 1313 rnnid->port_id = port_id;
1314 rnnid->node_name = node_name; 1314 rnnid->node_name = node_name;
1315 1315
1316 return (sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s)); 1316 return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s);
1317} 1317}
1318 1318
1319u16 1319u16
@@ -1332,7 +1332,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1332 rcsid->port_id = port_id; 1332 rcsid->port_id = port_id;
1333 rcsid->cos = cos; 1333 rcsid->cos = cos;
1334 1334
1335 return (sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s)); 1335 return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s);
1336} 1336}
1337 1337
1338u16 1338u16
@@ -1351,7 +1351,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1351 rptid->port_id = port_id; 1351 rptid->port_id = port_id;
1352 rptid->port_type = port_type; 1352 rptid->port_type = port_type;
1353 1353
1354 return (sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s)); 1354 return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s);
1355} 1355}
1356 1356
1357u16 1357u16
@@ -1368,7 +1368,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
1368 bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); 1368 bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
1369 ganxt->port_id = port_id; 1369 ganxt->port_id = port_id;
1370 1370
1371 return (sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s)); 1371 return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
1372} 1372}
1373 1373
1374/* 1374/*
@@ -1385,7 +1385,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1385 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1385 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1386 fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); 1386 fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
1387 1387
1388 return (sizeof(struct ct_hdr_s)); 1388 return sizeof(struct ct_hdr_s);
1389} 1389}
1390 1390
1391/* 1391/*
@@ -1425,7 +1425,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1425 bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); 1425 bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t));
1426 gmal->wwn = wwn; 1426 gmal->wwn = wwn;
1427 1427
1428 return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t)); 1428 return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
1429} 1429}
1430 1430
1431/* 1431/*
@@ -1445,5 +1445,5 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1445 bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); 1445 bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t));
1446 gfn->wwn = wwn; 1446 gfn->wwn = wwn;
1447 1447
1448 return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t)); 1448 return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
1449} 1449}
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h
index 4d248424f7b3..981d98d542b9 100644
--- a/drivers/scsi/bfa/fcbuild.h
+++ b/drivers/scsi/bfa/fcbuild.h
@@ -32,8 +32,8 @@
32 * Utility Macros/functions 32 * Utility Macros/functions
33 */ 33 */
34 34
35#define fcif_sof_set(_ifhdr, _sof) (_ifhdr)->sof = FC_ ## _sof 35#define fcif_sof_set(_ifhdr, _sof) ((_ifhdr)->sof = FC_ ## _sof)
36#define fcif_eof_set(_ifhdr, _eof) (_ifhdr)->eof = FC_ ## _eof 36#define fcif_eof_set(_ifhdr, _eof) ((_ifhdr)->eof = FC_ ## _eof)
37 37
38#define wwn_is_equal(_wwn1, _wwn2) \ 38#define wwn_is_equal(_wwn1, _wwn2) \
39 (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0) 39 (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
@@ -49,7 +49,7 @@
49static inline u32 49static inline u32
50fc_get_ctresp_pyld_len(u32 resp_len) 50fc_get_ctresp_pyld_len(u32 resp_len)
51{ 51{
52 return (resp_len - sizeof(struct ct_hdr_s)); 52 return resp_len - sizeof(struct ct_hdr_s);
53} 53}
54 54
55/* 55/*
@@ -72,6 +72,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed_s speed)
72 case RPSC_OP_SPEED_8G: 72 case RPSC_OP_SPEED_8G:
73 return BFA_PPORT_SPEED_8GBPS; 73 return BFA_PPORT_SPEED_8GBPS;
74 74
75 case RPSC_OP_SPEED_10G:
76 return BFA_PPORT_SPEED_10GBPS;
77
75 default: 78 default:
76 return BFA_PPORT_SPEED_UNKNOWN; 79 return BFA_PPORT_SPEED_UNKNOWN;
77 } 80 }
@@ -97,6 +100,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_pport_speed op_speed)
97 case BFA_PPORT_SPEED_8GBPS: 100 case BFA_PPORT_SPEED_8GBPS:
98 return RPSC_OP_SPEED_8G; 101 return RPSC_OP_SPEED_8G;
99 102
103 case BFA_PPORT_SPEED_10GBPS:
104 return RPSC_OP_SPEED_10G;
105
100 default: 106 default:
101 return RPSC_OP_SPEED_NOT_EST; 107 return RPSC_OP_SPEED_NOT_EST;
102 } 108 }
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c
index 8ce5d8934677..8ae4a2cfa85b 100644
--- a/drivers/scsi/bfa/fcpim.c
+++ b/drivers/scsi/bfa/fcpim.c
@@ -126,7 +126,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
126 break; 126 break;
127 127
128 default: 128 default:
129 bfa_assert(0); 129 bfa_sm_fault(itnim->fcs, event);
130 } 130 }
131 131
132} 132}
@@ -161,7 +161,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
161 break; 161 break;
162 162
163 default: 163 default:
164 bfa_assert(0); 164 bfa_sm_fault(itnim->fcs, event);
165 } 165 }
166} 166}
167 167
@@ -205,7 +205,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
205 break; 205 break;
206 206
207 default: 207 default:
208 bfa_assert(0); 208 bfa_sm_fault(itnim->fcs, event);
209 } 209 }
210} 210}
211 211
@@ -240,7 +240,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
240 break; 240 break;
241 241
242 default: 242 default:
243 bfa_assert(0); 243 bfa_sm_fault(itnim->fcs, event);
244 } 244 }
245} 245}
246 246
@@ -270,7 +270,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
270 break; 270 break;
271 271
272 default: 272 default:
273 bfa_assert(0); 273 bfa_sm_fault(itnim->fcs, event);
274 } 274 }
275} 275}
276 276
@@ -286,11 +286,10 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
286 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); 286 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
287 bfa_fcb_itnim_offline(itnim->itnim_drv); 287 bfa_fcb_itnim_offline(itnim->itnim_drv);
288 bfa_itnim_offline(itnim->bfa_itnim); 288 bfa_itnim_offline(itnim->bfa_itnim);
289 if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) { 289 if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE)
290 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); 290 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
291 } else { 291 else
292 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); 292 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
293 }
294 break; 293 break;
295 294
296 case BFA_FCS_ITNIM_SM_DELETE: 295 case BFA_FCS_ITNIM_SM_DELETE:
@@ -299,7 +298,7 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
299 break; 298 break;
300 299
301 default: 300 default:
302 bfa_assert(0); 301 bfa_sm_fault(itnim->fcs, event);
303 } 302 }
304} 303}
305 304
@@ -322,7 +321,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
322 break; 321 break;
323 322
324 default: 323 default:
325 bfa_assert(0); 324 bfa_sm_fault(itnim->fcs, event);
326 } 325 }
327} 326}
328 327
@@ -355,7 +354,7 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
355 break; 354 break;
356 355
357 default: 356 default:
358 bfa_assert(0); 357 bfa_sm_fault(itnim->fcs, event);
359 } 358 }
360} 359}
361 360
@@ -386,19 +385,8 @@ bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
386 wwn2str(lpwwn_ptr, lpwwn); 385 wwn2str(lpwwn_ptr, lpwwn);
387 wwn2str(rpwwn_ptr, rpwwn); 386 wwn2str(rpwwn_ptr, rpwwn);
388 387
389 switch (event) { 388 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event),
390 case BFA_ITNIM_AEN_ONLINE: 389 rpwwn_ptr, lpwwn_ptr);
391 bfa_log(logmod, BFA_AEN_ITNIM_ONLINE, rpwwn_ptr, lpwwn_ptr);
392 break;
393 case BFA_ITNIM_AEN_OFFLINE:
394 bfa_log(logmod, BFA_AEN_ITNIM_OFFLINE, rpwwn_ptr, lpwwn_ptr);
395 break;
396 case BFA_ITNIM_AEN_DISCONNECT:
397 bfa_log(logmod, BFA_AEN_ITNIM_DISCONNECT, rpwwn_ptr, lpwwn_ptr);
398 break;
399 default:
400 break;
401 }
402 390
403 aen_data.itnim.vf_id = rport->port->fabric->vf_id; 391 aen_data.itnim.vf_id = rport->port->fabric->vf_id;
404 aen_data.itnim.ppwwn = 392 aen_data.itnim.ppwwn =
@@ -690,7 +678,6 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
690 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 678 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
691 679
692 bfa_trc(itnim->fcs, itnim->rport->pwwn); 680 bfa_trc(itnim->fcs, itnim->rport->pwwn);
693 bfa_fcb_itnim_tov_begin(itnim->itnim_drv);
694} 681}
695 682
696/** 683/**
@@ -732,7 +719,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
732 return NULL; 719 return NULL;
733 720
734 bfa_assert(rport->itnim != NULL); 721 bfa_assert(rport->itnim != NULL);
735 return (rport->itnim); 722 return rport->itnim;
736} 723}
737 724
738bfa_status_t 725bfa_status_t
@@ -823,22 +810,3 @@ void
823bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim) 810bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
824{ 811{
825} 812}
826
827/**
828 * Module initialization
829 */
830void
831bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs)
832{
833}
834
835/**
836 * Module cleanup
837 */
838void
839bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs)
840{
841 bfa_fcs_modexit_comp(fcs);
842}
843
844
diff --git a/drivers/scsi/bfa/fcs.h b/drivers/scsi/bfa/fcs.h
index deee685e8478..8d08230e6295 100644
--- a/drivers/scsi/bfa/fcs.h
+++ b/drivers/scsi/bfa/fcs.h
@@ -23,7 +23,7 @@
23#ifndef __FCS_H__ 23#ifndef __FCS_H__
24#define __FCS_H__ 24#define __FCS_H__
25 25
26#define __fcs_min_cfg(__fcs) (__fcs)->min_cfg 26#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg)
27 27
28void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs); 28void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
29 29
diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h
index eee960820f86..244c3f00c50c 100644
--- a/drivers/scsi/bfa/fcs_fabric.h
+++ b/drivers/scsi/bfa/fcs_fabric.h
@@ -29,6 +29,7 @@
29/* 29/*
30* fcs friend functions: only between fcs modules 30* fcs friend functions: only between fcs modules
31 */ 31 */
32void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
32void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); 33void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
33void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); 34void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
34void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs); 35void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
@@ -46,6 +47,7 @@ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
46 struct fchs_s *fchs, u16 len); 47 struct fchs_s *fchs, u16 len);
47u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); 48u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
48bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric); 49bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
50bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
49enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric); 51enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
50void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); 52void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
51void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric); 53void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h
index 61e9e2687de3..11e6e7bce9f6 100644
--- a/drivers/scsi/bfa/fcs_fcpim.h
+++ b/drivers/scsi/bfa/fcs_fcpim.h
@@ -34,11 +34,6 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
34void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim); 34void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim);
35void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim); 35void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim);
36 36
37/*
38 * Modudle init/cleanup routines.
39 */
40void bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs);
41void bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs);
42void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, 37void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
43 u16 len); 38 u16 len);
44#endif /* __FCS_FCPIM_H__ */ 39#endif /* __FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h
index ae744ba35671..a6508c8ab184 100644
--- a/drivers/scsi/bfa/fcs_lport.h
+++ b/drivers/scsi/bfa/fcs_lport.h
@@ -84,9 +84,10 @@ void bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
84 * Following routines will be called by Fabric to indicate port 84 * Following routines will be called by Fabric to indicate port
85 * online/offline to vport. 85 * online/offline to vport.
86 */ 86 */
87void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 87void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
88 u16 vf_id, struct bfa_port_cfg_s *port_cfg, 88 uint16_t vf_id, struct bfa_fcs_vport_s *vport);
89 struct bfa_fcs_vport_s *vport); 89void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
90 struct bfa_port_cfg_s *port_cfg);
90void bfa_fcs_port_online(struct bfa_fcs_port_s *port); 91void bfa_fcs_port_online(struct bfa_fcs_port_s *port);
91void bfa_fcs_port_offline(struct bfa_fcs_port_s *port); 92void bfa_fcs_port_offline(struct bfa_fcs_port_s *port);
92void bfa_fcs_port_delete(struct bfa_fcs_port_s *port); 93void bfa_fcs_port_delete(struct bfa_fcs_port_s *port);
diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h
index abb65191dd27..408c06a7d164 100644
--- a/drivers/scsi/bfa/fcs_port.h
+++ b/drivers/scsi/bfa/fcs_port.h
@@ -26,7 +26,6 @@
26/* 26/*
27 * fcs friend functions: only between fcs modules 27 * fcs friend functions: only between fcs modules
28 */ 28 */
29void bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs); 29void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs);
30void bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs);
31 30
32#endif /* __FCS_PPORT_H__ */ 31#endif /* __FCS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h
index f601e9d74236..9c8d1d292380 100644
--- a/drivers/scsi/bfa/fcs_rport.h
+++ b/drivers/scsi/bfa/fcs_rport.h
@@ -24,9 +24,6 @@
24 24
25#include <fcs/bfa_fcs_rport.h> 25#include <fcs/bfa_fcs_rport.h>
26 26
27void bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs);
28void bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs);
29
30void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, 27void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
31 u16 len); 28 u16 len);
32void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); 29void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h
index 96f1bdcb31ed..f591072214fe 100644
--- a/drivers/scsi/bfa/fcs_uf.h
+++ b/drivers/scsi/bfa/fcs_uf.h
@@ -26,7 +26,6 @@
26/* 26/*
27 * fcs friend functions: only between fcs modules 27 * fcs friend functions: only between fcs modules
28 */ 28 */
29void bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs); 29void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
30void bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs);
31 30
32#endif /* __FCS_UF_H__ */ 31#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h
index 9e80b6a97b7f..13c32ebf946c 100644
--- a/drivers/scsi/bfa/fcs_vport.h
+++ b/drivers/scsi/bfa/fcs_vport.h
@@ -22,18 +22,10 @@
22#include <fcs/bfa_fcs_vport.h> 22#include <fcs/bfa_fcs_vport.h>
23#include <defs/bfa_defs_pci.h> 23#include <defs/bfa_defs_pci.h>
24 24
25/*
26 * Modudle init/cleanup routines.
27 */
28
29void bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs);
30void bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs);
31
32void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); 25void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
33void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); 26void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
34void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); 27void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
35void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); 28void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
36u32 bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs);
37 29
38#endif /* __FCS_VPORT_H__ */ 30#endif /* __FCS_VPORT_H__ */
39 31
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
index b845eb272c78..8f17076d1a87 100644
--- a/drivers/scsi/bfa/fdmi.c
+++ b/drivers/scsi/bfa/fdmi.c
@@ -72,9 +72,9 @@ static u16 bfa_fcs_port_fdmi_build_rpa_pyld(
72 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); 72 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
73static u16 bfa_fcs_port_fdmi_build_portattr_block( 73static u16 bfa_fcs_port_fdmi_build_portattr_block(
74 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); 74 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
75void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, 75static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
76 struct bfa_fcs_fdmi_hba_attr_s *hba_attr); 76 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
77void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, 77static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
78 struct bfa_fcs_fdmi_port_attr_s *port_attr); 78 struct bfa_fcs_fdmi_port_attr_s *port_attr);
79/** 79/**
80 * fcs_fdmi_sm FCS FDMI state machine 80 * fcs_fdmi_sm FCS FDMI state machine
@@ -116,6 +116,9 @@ static void bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
116 enum port_fdmi_event event); 116 enum port_fdmi_event event);
117static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, 117static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
118 enum port_fdmi_event event); 118 enum port_fdmi_event event);
119static void bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
120 enum port_fdmi_event event);
121
119/** 122/**
120 * Start in offline state - awaiting MS to send start. 123 * Start in offline state - awaiting MS to send start.
121 */ 124 */
@@ -155,7 +158,7 @@ bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
155 break; 158 break;
156 159
157 default: 160 default:
158 bfa_assert(0); 161 bfa_sm_fault(port->fcs, event);
159 } 162 }
160} 163}
161 164
@@ -180,7 +183,7 @@ bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
180 break; 183 break;
181 184
182 default: 185 default:
183 bfa_assert(0); 186 bfa_sm_fault(port->fcs, event);
184 } 187 }
185} 188}
186 189
@@ -227,7 +230,7 @@ bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
227 break; 230 break;
228 231
229 default: 232 default:
230 bfa_assert(0); 233 bfa_sm_fault(port->fcs, event);
231 } 234 }
232} 235}
233 236
@@ -255,7 +258,7 @@ bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
255 break; 258 break;
256 259
257 default: 260 default:
258 bfa_assert(0); 261 bfa_sm_fault(port->fcs, event);
259 } 262 }
260} 263}
261 264
@@ -283,7 +286,7 @@ bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
283 break; 286 break;
284 287
285 default: 288 default:
286 bfa_assert(0); 289 bfa_sm_fault(port->fcs, event);
287 } 290 }
288} 291}
289 292
@@ -328,7 +331,7 @@ bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
328 break; 331 break;
329 332
330 default: 333 default:
331 bfa_assert(0); 334 bfa_sm_fault(port->fcs, event);
332 } 335 }
333} 336}
334 337
@@ -356,7 +359,7 @@ bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
356 break; 359 break;
357 360
358 default: 361 default:
359 bfa_assert(0); 362 bfa_sm_fault(port->fcs, event);
360 } 363 }
361} 364}
362 365
@@ -384,7 +387,7 @@ bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
384 break; 387 break;
385 388
386 default: 389 default:
387 bfa_assert(0); 390 bfa_sm_fault(port->fcs, event);
388 } 391 }
389} 392}
390 393
@@ -428,7 +431,7 @@ bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
428 break; 431 break;
429 432
430 default: 433 default:
431 bfa_assert(0); 434 bfa_sm_fault(port->fcs, event);
432 } 435 }
433} 436}
434 437
@@ -456,7 +459,7 @@ bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
456 break; 459 break;
457 460
458 default: 461 default:
459 bfa_assert(0); 462 bfa_sm_fault(port->fcs, event);
460 } 463 }
461} 464}
462 465
@@ -475,10 +478,24 @@ bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
475 break; 478 break;
476 479
477 default: 480 default:
478 bfa_assert(0); 481 bfa_sm_fault(port->fcs, event);
479 } 482 }
480} 483}
481 484
485/**
486 * FDMI is disabled state.
487 */
488static void
489bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
490 enum port_fdmi_event event)
491{
492 struct bfa_fcs_port_s *port = fdmi->ms->port;
493
494 bfa_trc(port->fcs, port->port_cfg.pwwn);
495 bfa_trc(port->fcs, event);
496
497 /* No op State. It can only be enabled at Driver Init. */
498}
482 499
483/** 500/**
484* RHBA : Register HBA Attributes. 501* RHBA : Register HBA Attributes.
@@ -1091,42 +1108,29 @@ bfa_fcs_port_fdmi_timeout(void *arg)
1091 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); 1108 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
1092} 1109}
1093 1110
1094void 1111static void
1095bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, 1112bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
1096 struct bfa_fcs_fdmi_hba_attr_s *hba_attr) 1113 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
1097{ 1114{
1098 struct bfa_fcs_port_s *port = fdmi->ms->port; 1115 struct bfa_fcs_port_s *port = fdmi->ms->port;
1099 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 1116 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
1100 struct bfa_adapter_attr_s adapter_attr;
1101 1117
1102 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); 1118 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
1103 bfa_os_memset(&adapter_attr, 0, sizeof(struct bfa_adapter_attr_s));
1104
1105 bfa_ioc_get_adapter_attr(&port->fcs->bfa->ioc, &adapter_attr);
1106
1107 strncpy(hba_attr->manufacturer, adapter_attr.manufacturer,
1108 sizeof(adapter_attr.manufacturer));
1109
1110 strncpy(hba_attr->serial_num, adapter_attr.serial_num,
1111 sizeof(adapter_attr.serial_num));
1112
1113 strncpy(hba_attr->model, adapter_attr.model, sizeof(hba_attr->model));
1114 1119
1115 strncpy(hba_attr->model_desc, adapter_attr.model_descr, 1120 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
1116 sizeof(hba_attr->model_desc)); 1121 hba_attr->manufacturer);
1117 1122 bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
1118 strncpy(hba_attr->hw_version, adapter_attr.hw_ver, 1123 hba_attr->serial_num);
1119 sizeof(hba_attr->hw_version)); 1124 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model);
1125 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc);
1126 bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version);
1127 bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
1128 hba_attr->option_rom_ver);
1129 bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version);
1120 1130
1121 strncpy(hba_attr->driver_version, (char *)driver_info->version, 1131 strncpy(hba_attr->driver_version, (char *)driver_info->version,
1122 sizeof(hba_attr->driver_version)); 1132 sizeof(hba_attr->driver_version));
1123 1133
1124 strncpy(hba_attr->option_rom_ver, adapter_attr.optrom_ver,
1125 sizeof(hba_attr->option_rom_ver));
1126
1127 strncpy(hba_attr->fw_version, adapter_attr.fw_ver,
1128 sizeof(hba_attr->fw_version));
1129
1130 strncpy(hba_attr->os_name, driver_info->host_os_name, 1134 strncpy(hba_attr->os_name, driver_info->host_os_name,
1131 sizeof(hba_attr->os_name)); 1135 sizeof(hba_attr->os_name));
1132 1136
@@ -1145,7 +1149,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
1145 1149
1146} 1150}
1147 1151
1148void 1152static void
1149bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, 1153bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
1150 struct bfa_fcs_fdmi_port_attr_s *port_attr) 1154 struct bfa_fcs_fdmi_port_attr_s *port_attr)
1151{ 1155{
@@ -1158,7 +1162,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
1158 /* 1162 /*
1159 * get pport attributes from hal 1163 * get pport attributes from hal
1160 */ 1164 */
1161 bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 1165 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
1162 1166
1163 /* 1167 /*
1164 * get FC4 type Bitmask 1168 * get FC4 type Bitmask
@@ -1201,7 +1205,10 @@ bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms)
1201 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi; 1205 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1202 1206
1203 fdmi->ms = ms; 1207 fdmi->ms = ms;
1204 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); 1208 if (ms->port->fcs->fdmi_enabled)
1209 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
1210 else
1211 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled);
1205} 1212}
1206 1213
1207void 1214void
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h
index da8cac093d3d..6abbab005db6 100644
--- a/drivers/scsi/bfa/include/aen/bfa_aen.h
+++ b/drivers/scsi/bfa/include/aen/bfa_aen.h
@@ -18,21 +18,24 @@
18#define __BFA_AEN_H__ 18#define __BFA_AEN_H__
19 19
20#include "defs/bfa_defs_aen.h" 20#include "defs/bfa_defs_aen.h"
21#include "defs/bfa_defs_status.h"
22#include "cs/bfa_debug.h"
21 23
22#define BFA_AEN_MAX_ENTRY 512 24#define BFA_AEN_MAX_ENTRY 512
23 25
24extern s32 bfa_aen_max_cfg_entry; 26extern int bfa_aen_max_cfg_entry;
25struct bfa_aen_s { 27struct bfa_aen_s {
26 void *bfad; 28 void *bfad;
27 s32 max_entry; 29 int max_entry;
28 s32 write_index; 30 int write_index;
29 s32 read_index; 31 int read_index;
30 u32 bfad_num; 32 int bfad_num;
31 u32 seq_num; 33 int seq_num;
32 void (*aen_cb_notify)(void *bfad); 34 void (*aen_cb_notify)(void *bfad);
33 void (*gettimeofday)(struct bfa_timeval_s *tv); 35 void (*gettimeofday)(struct bfa_timeval_s *tv);
34 struct bfa_trc_mod_s *trcmod; 36 struct bfa_trc_mod_s *trcmod;
35 struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */ 37 int app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */
38 struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
36}; 39};
37 40
38 41
@@ -45,48 +48,49 @@ bfa_aen_set_max_cfg_entry(int max_entry)
45 bfa_aen_max_cfg_entry = max_entry; 48 bfa_aen_max_cfg_entry = max_entry;
46} 49}
47 50
48static inline s32 51static inline int
49bfa_aen_get_max_cfg_entry(void) 52bfa_aen_get_max_cfg_entry(void)
50{ 53{
51 return bfa_aen_max_cfg_entry; 54 return bfa_aen_max_cfg_entry;
52} 55}
53 56
54static inline s32 57static inline int
55bfa_aen_get_meminfo(void) 58bfa_aen_get_meminfo(void)
56{ 59{
57 return (sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry()); 60 return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry();
58} 61}
59 62
60static inline s32 63static inline int
61bfa_aen_get_wi(struct bfa_aen_s *aen) 64bfa_aen_get_wi(struct bfa_aen_s *aen)
62{ 65{
63 return aen->write_index; 66 return aen->write_index;
64} 67}
65 68
66static inline s32 69static inline int
67bfa_aen_get_ri(struct bfa_aen_s *aen) 70bfa_aen_get_ri(struct bfa_aen_s *aen)
68{ 71{
69 return aen->read_index; 72 return aen->read_index;
70} 73}
71 74
72static inline s32 75static inline int
73bfa_aen_fetch_count(struct bfa_aen_s *aen, s32 read_index) 76bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app app_id)
74{ 77{
75 return ((aen->write_index + aen->max_entry) - read_index) 78 bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu));
79 return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id])
76 % aen->max_entry; 80 % aen->max_entry;
77} 81}
78 82
79s32 bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod, 83int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
80 void *bfad, u32 inst_id, void (*aen_cb_notify)(void *), 84 void *bfad, int bfad_num, void (*aen_cb_notify)(void *),
81 void (*gettimeofday)(struct bfa_timeval_s *)); 85 void (*gettimeofday)(struct bfa_timeval_s *));
82 86
83s32 bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category, 87void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
84 int aen_type, union bfa_aen_data_u *aen_data); 88 int aen_type, union bfa_aen_data_u *aen_data);
85 89
86s32 bfa_aen_fetch(struct bfa_aen_s *aen, struct bfa_aen_entry_s *aen_entry, 90bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen,
87 s32 entry_space, s32 rii, s32 *ri_arr, 91 struct bfa_aen_entry_s *aen_entry,
88 s32 ri_arr_cnt); 92 int entry_req, enum bfa_aen_app app_id, int *entry_ret);
89 93
90s32 bfa_aen_get_inst(struct bfa_aen_s *aen); 94int bfa_aen_get_inst(struct bfa_aen_s *aen);
91 95
92#endif /* __BFA_AEN_H__ */ 96#endif /* __BFA_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
index 64c1412c5703..1f5966cfbd16 100644
--- a/drivers/scsi/bfa/include/bfa.h
+++ b/drivers/scsi/bfa/include/bfa.h
@@ -76,11 +76,11 @@ struct bfa_meminfo_s {
76 struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX]; 76 struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
77}; 77};
78#define bfa_meminfo_kva(_m) \ 78#define bfa_meminfo_kva(_m) \
79 (_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp 79 ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
80#define bfa_meminfo_dma_virt(_m) \ 80#define bfa_meminfo_dma_virt(_m) \
81 (_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp 81 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
82#define bfa_meminfo_dma_phys(_m) \ 82#define bfa_meminfo_dma_phys(_m) \
83 (_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp 83 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
84 84
85/** 85/**
86 * Generic Scatter Gather Element used by driver 86 * Generic Scatter Gather Element used by driver
@@ -100,12 +100,32 @@ struct bfa_sge_s {
100/* 100/*
101 * bfa stats interfaces 101 * bfa stats interfaces
102 */ 102 */
103#define bfa_stats(_mod, _stats) (_mod)->stats._stats ++ 103#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++)
104 104
105#define bfa_ioc_get_stats(__bfa, __ioc_stats) \ 105#define bfa_ioc_get_stats(__bfa, __ioc_stats) \
106 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) 106 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
107#define bfa_ioc_clear_stats(__bfa) \ 107#define bfa_ioc_clear_stats(__bfa) \
108 bfa_ioc_clr_stats(&(__bfa)->ioc) 108 bfa_ioc_clr_stats(&(__bfa)->ioc)
109#define bfa_get_nports(__bfa) \
110 bfa_ioc_get_nports(&(__bfa)->ioc)
111#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
112 bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
113#define bfa_get_adapter_model(__bfa, __model) \
114 bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
115#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
116 bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
117#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
118 bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
119#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
120 bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
121#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
122 bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
123#define bfa_get_ioc_state(__bfa) \
124 bfa_ioc_get_state(&(__bfa)->ioc)
125#define bfa_get_type(__bfa) \
126 bfa_ioc_get_type(&(__bfa)->ioc)
127#define bfa_get_mac(__bfa) \
128 bfa_ioc_get_mac(&(__bfa)->ioc)
109 129
110/* 130/*
111 * bfa API functions 131 * bfa API functions
@@ -136,7 +156,7 @@ void bfa_isr_enable(struct bfa_s *bfa);
136void bfa_isr_disable(struct bfa_s *bfa); 156void bfa_isr_disable(struct bfa_s *bfa);
137void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, 157void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
138 u32 *num_vecs, u32 *max_vec_bit); 158 u32 *num_vecs, u32 *max_vec_bit);
139#define bfa_msix(__bfa, __vec) (__bfa)->msix.handler[__vec](__bfa, __vec) 159#define bfa_msix(__bfa, __vec) ((__bfa)->msix.handler[__vec](__bfa, __vec))
140 160
141void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q); 161void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
142void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q); 162void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
@@ -161,6 +181,7 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
161void bfa_iocfc_enable(struct bfa_s *bfa); 181void bfa_iocfc_enable(struct bfa_s *bfa);
162void bfa_iocfc_disable(struct bfa_s *bfa); 182void bfa_iocfc_disable(struct bfa_s *bfa);
163void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); 183void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
184void bfa_chip_reset(struct bfa_s *bfa);
164void bfa_cb_ioc_disable(void *bfad); 185void bfa_cb_ioc_disable(void *bfad);
165void bfa_timer_tick(struct bfa_s *bfa); 186void bfa_timer_tick(struct bfa_s *bfa);
166#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ 187#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
@@ -171,6 +192,7 @@ void bfa_timer_tick(struct bfa_s *bfa);
171 */ 192 */
172bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen); 193bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
173bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen); 194bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
195void bfa_debug_fwsave_clear(struct bfa_s *bfa);
174 196
175#include "bfa_priv.h" 197#include "bfa_priv.h"
176 198
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
index 0c80b74f72ef..1349b99a3c6d 100644
--- a/drivers/scsi/bfa/include/bfa_svc.h
+++ b/drivers/scsi/bfa/include/bfa_svc.h
@@ -26,6 +26,7 @@ struct bfa_fcxp_s;
26#include <defs/bfa_defs_pport.h> 26#include <defs/bfa_defs_pport.h>
27#include <defs/bfa_defs_rport.h> 27#include <defs/bfa_defs_rport.h>
28#include <defs/bfa_defs_qos.h> 28#include <defs/bfa_defs_qos.h>
29#include <defs/bfa_defs_fcport.h>
29#include <cs/bfa_sm.h> 30#include <cs/bfa_sm.h>
30#include <bfa.h> 31#include <bfa.h>
31 32
@@ -34,10 +35,10 @@ struct bfa_fcxp_s;
34 */ 35 */
35struct bfa_rport_info_s { 36struct bfa_rport_info_s {
36 u16 max_frmsz; /* max rcv pdu size */ 37 u16 max_frmsz; /* max rcv pdu size */
37 u32 pid : 24, /* remote port ID */ 38 u32 pid:24, /* remote port ID */
38 lp_tag : 8; 39 lp_tag:8; /* tag */
39 u32 local_pid : 24, /* local port ID */ 40 u32 local_pid:24, /* local port ID */
40 cisc : 8; /* CIRO supported */ 41 cisc:8; /* CIRO supported */
41 u8 fc_class; /* supported FC classes. enum fc_cos */ 42 u8 fc_class; /* supported FC classes. enum fc_cos */
42 u8 vf_en; /* virtual fabric enable */ 43 u8 vf_en; /* virtual fabric enable */
43 u16 vf_id; /* virtual fabric ID */ 44 u16 vf_id; /* virtual fabric ID */
@@ -54,7 +55,7 @@ struct bfa_rport_s {
54 void *rport_drv; /* fcs/driver rport object */ 55 void *rport_drv; /* fcs/driver rport object */
55 u16 fw_handle; /* firmware rport handle */ 56 u16 fw_handle; /* firmware rport handle */
56 u16 rport_tag; /* BFA rport tag */ 57 u16 rport_tag; /* BFA rport tag */
57 struct bfa_rport_info_s rport_info; /* rport info from *fcs/driver */ 58 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
58 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 59 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
59 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ 60 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
60 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */ 61 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
@@ -101,7 +102,7 @@ struct bfa_uf_buf_s {
101struct bfa_uf_s { 102struct bfa_uf_s {
102 struct list_head qe; /* queue element */ 103 struct list_head qe; /* queue element */
103 struct bfa_s *bfa; /* bfa instance */ 104 struct bfa_s *bfa; /* bfa instance */
104 u16 uf_tag; /* identifying tag f/w messages */ 105 u16 uf_tag; /* identifying tag fw msgs */
105 u16 vf_id; 106 u16 vf_id;
106 u16 src_rport_handle; 107 u16 src_rport_handle;
107 u16 rsvd; 108 u16 rsvd;
@@ -127,7 +128,7 @@ struct bfa_lps_s {
127 u8 reqq; /* lport request queue */ 128 u8 reqq; /* lport request queue */
128 u8 alpa; /* ALPA for loop topologies */ 129 u8 alpa; /* ALPA for loop topologies */
129 u32 lp_pid; /* lport port ID */ 130 u32 lp_pid; /* lport port ID */
130 bfa_boolean_t fdisc; /* send FDISC instead of FLOGI*/ 131 bfa_boolean_t fdisc; /* send FDISC instead of FLOGI */
131 bfa_boolean_t auth_en; /* enable authentication */ 132 bfa_boolean_t auth_en; /* enable authentication */
132 bfa_boolean_t auth_req; /* authentication required */ 133 bfa_boolean_t auth_req; /* authentication required */
133 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */ 134 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
@@ -151,60 +152,69 @@ struct bfa_lps_s {
151 bfa_eproto_status_t ext_status; 152 bfa_eproto_status_t ext_status;
152}; 153};
153 154
155#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
156
154/* 157/*
155 * bfa pport API functions 158 * bfa pport API functions
156 */ 159 */
157bfa_status_t bfa_pport_enable(struct bfa_s *bfa); 160bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
158bfa_status_t bfa_pport_disable(struct bfa_s *bfa); 161bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
159bfa_status_t bfa_pport_cfg_speed(struct bfa_s *bfa, 162bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
160 enum bfa_pport_speed speed); 163 enum bfa_pport_speed speed);
161enum bfa_pport_speed bfa_pport_get_speed(struct bfa_s *bfa); 164enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa);
162bfa_status_t bfa_pport_cfg_topology(struct bfa_s *bfa, 165bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
163 enum bfa_pport_topology topo); 166 enum bfa_pport_topology topo);
164enum bfa_pport_topology bfa_pport_get_topology(struct bfa_s *bfa); 167enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa);
165bfa_status_t bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); 168bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
166bfa_boolean_t bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); 169bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
167u8 bfa_pport_get_myalpa(struct bfa_s *bfa); 170u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
168bfa_status_t bfa_pport_clr_hardalpa(struct bfa_s *bfa); 171bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
169bfa_status_t bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); 172bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
170u16 bfa_pport_get_maxfrsize(struct bfa_s *bfa); 173u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
171u32 bfa_pport_mypid(struct bfa_s *bfa); 174u32 bfa_fcport_mypid(struct bfa_s *bfa);
172u8 bfa_pport_get_rx_bbcredit(struct bfa_s *bfa); 175u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
173bfa_status_t bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap); 176bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
174bfa_status_t bfa_pport_trunk_disable(struct bfa_s *bfa); 177bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa);
175bfa_boolean_t bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap); 178bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
176void bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr); 179void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
177wwn_t bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); 180wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
178bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa, 181void bfa_fcport_event_register(struct bfa_s *bfa,
179 union bfa_pport_stats_u *stats,
180 bfa_cb_pport_t cbfn, void *cbarg);
181bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
182 void *cbarg);
183void bfa_pport_event_register(struct bfa_s *bfa,
184 void (*event_cbfn) (void *cbarg, 182 void (*event_cbfn) (void *cbarg,
185 bfa_pport_event_t event), void *event_cbarg); 183 bfa_pport_event_t event), void *event_cbarg);
186bfa_boolean_t bfa_pport_is_disabled(struct bfa_s *bfa); 184bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
187void bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); 185void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
188void bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); 186void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
189bfa_status_t bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, 187bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
190 enum bfa_pport_speed speed); 188 enum bfa_pport_speed speed);
191enum bfa_pport_speed bfa_pport_get_ratelim_speed(struct bfa_s *bfa); 189enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
192 190
193void bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); 191void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
194void bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status); 192void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
195void bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, 193void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
196 bfa_boolean_t link_e2e_beacon); 194 bfa_boolean_t link_e2e_beacon);
197void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event); 195void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event);
198void bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr); 196void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
199void bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, 197 struct bfa_qos_attr_s *qos_attr);
198void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
200 struct bfa_qos_vc_attr_s *qos_vc_attr); 199 struct bfa_qos_vc_attr_s *qos_vc_attr);
201bfa_status_t bfa_pport_get_qos_stats(struct bfa_s *bfa, 200bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
202 union bfa_pport_stats_u *stats, 201 union bfa_fcport_stats_u *stats,
203 bfa_cb_pport_t cbfn, void *cbarg); 202 bfa_cb_pport_t cbfn, void *cbarg);
204bfa_status_t bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, 203bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
205 void *cbarg); 204 void *cbarg);
206bfa_boolean_t bfa_pport_is_ratelim(struct bfa_s *bfa); 205bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
207bfa_boolean_t bfa_pport_is_linkup(struct bfa_s *bfa); 206 union bfa_fcport_stats_u *stats,
207 bfa_cb_pport_t cbfn, void *cbarg);
208bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
209 void *cbarg);
210
211bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
212bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
213bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
214 union bfa_fcport_stats_u *stats,
215 bfa_cb_pport_t cbfn, void *cbarg);
216bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
217 void *cbarg);
208 218
209/* 219/*
210 * bfa rport API functions 220 * bfa rport API functions
@@ -293,6 +303,7 @@ void bfa_uf_free(struct bfa_uf_s *uf);
293 * bfa lport service api 303 * bfa lport service api
294 */ 304 */
295 305
306u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
296struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); 307struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
297void bfa_lps_delete(struct bfa_lps_s *lps); 308void bfa_lps_delete(struct bfa_lps_s *lps);
298void bfa_lps_discard(struct bfa_lps_s *lps); 309void bfa_lps_discard(struct bfa_lps_s *lps);
@@ -315,10 +326,12 @@ wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
315wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps); 326wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
316u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps); 327u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
317u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps); 328u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
329mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
318void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); 330void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
319void bfa_cb_lps_flogo_comp(void *bfad, void *uarg); 331void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
320void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); 332void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
321void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); 333void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
334void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
322 335
323#endif /* __BFA_SVC_H__ */ 336#endif /* __BFA_SVC_H__ */
324 337
diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h
index e407103fa565..f71087448222 100644
--- a/drivers/scsi/bfa/include/bfa_timer.h
+++ b/drivers/scsi/bfa/include/bfa_timer.h
@@ -41,7 +41,7 @@ struct bfa_timer_mod_s {
41 struct list_head timer_q; 41 struct list_head timer_q;
42}; 42};
43 43
44#define BFA_TIMER_FREQ 500 /**< specified in millisecs */ 44#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
45 45
46void bfa_timer_beat(struct bfa_timer_mod_s *mod); 46void bfa_timer_beat(struct bfa_timer_mod_s *mod);
47void bfa_timer_init(struct bfa_timer_mod_s *mod); 47void bfa_timer_init(struct bfa_timer_mod_s *mod);
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h
index 6cadfe0d4ba1..a550e80cabd2 100644
--- a/drivers/scsi/bfa/include/bfi/bfi.h
+++ b/drivers/scsi/bfa/include/bfi/bfi.h
@@ -93,13 +93,13 @@ union bfi_addr_u {
93 */ 93 */
94struct bfi_sge_s { 94struct bfi_sge_s {
95#ifdef __BIGENDIAN 95#ifdef __BIGENDIAN
96 u32 flags : 2, 96 u32 flags:2,
97 rsvd : 2, 97 rsvd:2,
98 sg_len : 28; 98 sg_len:28;
99#else 99#else
100 u32 sg_len : 28, 100 u32 sg_len:28,
101 rsvd : 2, 101 rsvd:2,
102 flags : 2; 102 flags:2;
103#endif 103#endif
104 union bfi_addr_u sga; 104 union bfi_addr_u sga;
105}; 105};
@@ -143,8 +143,8 @@ enum bfi_mclass {
143 BFI_MC_IOC = 1, /* IO Controller (IOC) */ 143 BFI_MC_IOC = 1, /* IO Controller (IOC) */
144 BFI_MC_DIAG = 2, /* Diagnostic Msgs */ 144 BFI_MC_DIAG = 2, /* Diagnostic Msgs */
145 BFI_MC_FLASH = 3, /* Flash message class */ 145 BFI_MC_FLASH = 3, /* Flash message class */
146 BFI_MC_CEE = 4, 146 BFI_MC_CEE = 4, /* CEE */
147 BFI_MC_FC_PORT = 5, /* FC port */ 147 BFI_MC_FCPORT = 5, /* FC port */
148 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ 148 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
149 BFI_MC_LL = 7, /* Link Layer */ 149 BFI_MC_LL = 7, /* Link Layer */
150 BFI_MC_UF = 8, /* Unsolicited frame receive */ 150 BFI_MC_UF = 8, /* Unsolicited frame receive */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
index b3bb52b565b1..a51ee61ddb19 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
@@ -177,7 +177,21 @@
177#define __PSS_LMEM_INIT_EN 0x00000100 177#define __PSS_LMEM_INIT_EN 0x00000100
178#define __PSS_LPU1_RESET 0x00000002 178#define __PSS_LPU1_RESET 0x00000002
179#define __PSS_LPU0_RESET 0x00000001 179#define __PSS_LPU0_RESET 0x00000001
180 180#define PSS_ERR_STATUS_REG 0x00018810
181#define __PSS_LMEM1_CORR_ERR 0x00000800
182#define __PSS_LMEM0_CORR_ERR 0x00000400
183#define __PSS_LMEM1_UNCORR_ERR 0x00000200
184#define __PSS_LMEM0_UNCORR_ERR 0x00000100
185#define __PSS_BAL_PERR 0x00000080
186#define __PSS_DIP_IF_ERR 0x00000040
187#define __PSS_IOH_IF_ERR 0x00000020
188#define __PSS_TDS_IF_ERR 0x00000010
189#define __PSS_RDS_IF_ERR 0x00000008
190#define __PSS_SGM_IF_ERR 0x00000004
191#define __PSS_LPU1_RAM_ERR 0x00000002
192#define __PSS_LPU0_RAM_ERR 0x00000001
193#define ERR_SET_REG 0x00018818
194#define __PSS_ERR_STATUS_SET 0x00000fff
181 195
182/* 196/*
183 * These definitions are either in error/missing in spec. Its auto-generated 197 * These definitions are either in error/missing in spec. Its auto-generated
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
index d3caa58c0a0a..57a8497105af 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
@@ -430,6 +430,31 @@ enum {
430#define __PSS_LMEM_INIT_EN 0x00000100 430#define __PSS_LMEM_INIT_EN 0x00000100
431#define __PSS_LPU1_RESET 0x00000002 431#define __PSS_LPU1_RESET 0x00000002
432#define __PSS_LPU0_RESET 0x00000001 432#define __PSS_LPU0_RESET 0x00000001
433#define PSS_ERR_STATUS_REG 0x00018810
434#define __PSS_LPU1_TCM_READ_ERR 0x00200000
435#define __PSS_LPU0_TCM_READ_ERR 0x00100000
436#define __PSS_LMEM5_CORR_ERR 0x00080000
437#define __PSS_LMEM4_CORR_ERR 0x00040000
438#define __PSS_LMEM3_CORR_ERR 0x00020000
439#define __PSS_LMEM2_CORR_ERR 0x00010000
440#define __PSS_LMEM1_CORR_ERR 0x00008000
441#define __PSS_LMEM0_CORR_ERR 0x00004000
442#define __PSS_LMEM5_UNCORR_ERR 0x00002000
443#define __PSS_LMEM4_UNCORR_ERR 0x00001000
444#define __PSS_LMEM3_UNCORR_ERR 0x00000800
445#define __PSS_LMEM2_UNCORR_ERR 0x00000400
446#define __PSS_LMEM1_UNCORR_ERR 0x00000200
447#define __PSS_LMEM0_UNCORR_ERR 0x00000100
448#define __PSS_BAL_PERR 0x00000080
449#define __PSS_DIP_IF_ERR 0x00000040
450#define __PSS_IOH_IF_ERR 0x00000020
451#define __PSS_TDS_IF_ERR 0x00000010
452#define __PSS_RDS_IF_ERR 0x00000008
453#define __PSS_SGM_IF_ERR 0x00000004
454#define __PSS_LPU1_RAM_ERR 0x00000002
455#define __PSS_LPU0_RAM_ERR 0x00000001
456#define ERR_SET_REG 0x00018818
457#define __PSS_ERR_STATUS_SET 0x003fffff
433#define HQM_QSET0_RXQ_DRBL_P0 0x00038000 458#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
434#define __RXQ0_ADD_VECTORS_P 0x80000000 459#define __RXQ0_ADD_VECTORS_P 0x80000000
435#define __RXQ0_STOP_P 0x40000000 460#define __RXQ0_STOP_P 0x40000000
@@ -589,6 +614,7 @@ enum {
589#define __HFN_INT_MBOX_LPU1 0x00200000U 614#define __HFN_INT_MBOX_LPU1 0x00200000U
590#define __HFN_INT_MBOX1_LPU0 0x00400000U 615#define __HFN_INT_MBOX1_LPU0 0x00400000U
591#define __HFN_INT_MBOX1_LPU1 0x00800000U 616#define __HFN_INT_MBOX1_LPU1 0x00800000U
617#define __HFN_INT_LL_HALT 0x01000000U
592#define __HFN_INT_CPE_MASK 0x000000ffU 618#define __HFN_INT_CPE_MASK 0x000000ffU
593#define __HFN_INT_RME_MASK 0x0000ff00U 619#define __HFN_INT_RME_MASK 0x0000ff00U
594 620
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
index 026e9c06ae97..a0158aac0024 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
@@ -123,7 +123,7 @@ enum bfi_ioc_state {
123 BFI_IOC_DISABLING = 5, /* IOC is being disabled */ 123 BFI_IOC_DISABLING = 5, /* IOC is being disabled */
124 BFI_IOC_DISABLED = 6, /* IOC is disabled */ 124 BFI_IOC_DISABLED = 6, /* IOC is disabled */
125 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */ 125 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
126 BFI_IOC_HBFAIL = 8, /* IOC heart-beat failure */ 126 BFI_IOC_FAIL = 8, /* IOC heart-beat failure */
127 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ 127 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
128}; 128};
129 129
@@ -142,7 +142,7 @@ enum {
142 BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */ 142 BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */
143}; 143};
144 144
145#define BFI_ADAPTER_GETP(__prop,__adap_prop) \ 145#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
146 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ 146 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
147 BFI_ADAPTER_ ## __prop ## _SH) 147 BFI_ADAPTER_ ## __prop ## _SH)
148#define BFI_ADAPTER_SETP(__prop, __val) \ 148#define BFI_ADAPTER_SETP(__prop, __val) \
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h
index 414b0e30f6ef..7ed31bbb8696 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_lps.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h
@@ -30,6 +30,7 @@ enum bfi_lps_h2i_msgs {
30enum bfi_lps_i2h_msgs { 30enum bfi_lps_i2h_msgs {
31 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1), 31 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
32 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2), 32 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
33 BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3),
33}; 34};
34 35
35struct bfi_lps_login_req_s { 36struct bfi_lps_login_req_s {
@@ -55,8 +56,8 @@ struct bfi_lps_login_rsp_s {
55 u16 bb_credit; 56 u16 bb_credit;
56 u8 f_port; 57 u8 f_port;
57 u8 npiv_en; 58 u8 npiv_en;
58 u32 lp_pid : 24; 59 u32 lp_pid:24;
59 u32 auth_req : 8; 60 u32 auth_req:8;
60 mac_t lp_mac; 61 mac_t lp_mac;
61 mac_t fcf_mac; 62 mac_t fcf_mac;
62 u8 ext_status; 63 u8 ext_status;
@@ -77,6 +78,12 @@ struct bfi_lps_logout_rsp_s {
77 u8 rsvd[2]; 78 u8 rsvd[2];
78}; 79};
79 80
81struct bfi_lps_cvl_event_s {
82 struct bfi_mhdr_s mh; /* common msg header */
83 u8 lp_tag;
84 u8 rsvd[3];
85};
86
80union bfi_lps_h2i_msg_u { 87union bfi_lps_h2i_msg_u {
81 struct bfi_mhdr_s *msg; 88 struct bfi_mhdr_s *msg;
82 struct bfi_lps_login_req_s *login_req; 89 struct bfi_lps_login_req_s *login_req;
@@ -87,6 +94,7 @@ union bfi_lps_i2h_msg_u {
87 struct bfi_msg_s *msg; 94 struct bfi_msg_s *msg;
88 struct bfi_lps_login_rsp_s *login_rsp; 95 struct bfi_lps_login_rsp_s *login_rsp;
89 struct bfi_lps_logout_rsp_s *logout_rsp; 96 struct bfi_lps_logout_rsp_s *logout_rsp;
97 struct bfi_lps_cvl_event_s *cvl_event;
90}; 98};
91 99
92#pragma pack() 100#pragma pack()
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h
index c96d246851af..50dcf45c7470 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_pport.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_pport.h
@@ -22,163 +22,97 @@
22 22
23#pragma pack(1) 23#pragma pack(1)
24 24
25enum bfi_pport_h2i { 25enum bfi_fcport_h2i {
26 BFI_PPORT_H2I_ENABLE_REQ = (1), 26 BFI_FCPORT_H2I_ENABLE_REQ = (1),
27 BFI_PPORT_H2I_DISABLE_REQ = (2), 27 BFI_FCPORT_H2I_DISABLE_REQ = (2),
28 BFI_PPORT_H2I_GET_STATS_REQ = (3), 28 BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3),
29 BFI_PPORT_H2I_CLEAR_STATS_REQ = (4), 29 BFI_FCPORT_H2I_STATS_GET_REQ = (4),
30 BFI_PPORT_H2I_SET_SVC_PARAMS_REQ = (5), 30 BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5),
31 BFI_PPORT_H2I_ENABLE_RX_VF_TAG_REQ = (6),
32 BFI_PPORT_H2I_ENABLE_TX_VF_TAG_REQ = (7),
33 BFI_PPORT_H2I_GET_QOS_STATS_REQ = (8),
34 BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ = (9),
35}; 31};
36 32
37enum bfi_pport_i2h { 33enum bfi_fcport_i2h {
38 BFI_PPORT_I2H_ENABLE_RSP = BFA_I2HM(1), 34 BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
39 BFI_PPORT_I2H_DISABLE_RSP = BFA_I2HM(2), 35 BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
40 BFI_PPORT_I2H_GET_STATS_RSP = BFA_I2HM(3), 36 BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3),
41 BFI_PPORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), 37 BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4),
42 BFI_PPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(5), 38 BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5),
43 BFI_PPORT_I2H_ENABLE_RX_VF_TAG_RSP = BFA_I2HM(6), 39 BFI_FCPORT_I2H_EVENT = BFA_I2HM(6),
44 BFI_PPORT_I2H_ENABLE_TX_VF_TAG_RSP = BFA_I2HM(7),
45 BFI_PPORT_I2H_EVENT = BFA_I2HM(8),
46 BFI_PPORT_I2H_GET_QOS_STATS_RSP = BFA_I2HM(9),
47 BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP = BFA_I2HM(10),
48}; 40};
49 41
50/** 42/**
51 * Generic REQ type 43 * Generic REQ type
52 */ 44 */
53struct bfi_pport_generic_req_s { 45struct bfi_fcport_req_s {
54 struct bfi_mhdr_s mh; /* msg header */ 46 struct bfi_mhdr_s mh; /* msg header */
55 u32 msgtag; /* msgtag for reply */ 47 u32 msgtag; /* msgtag for reply */
56}; 48};
57 49
58/** 50/**
59 * Generic RSP type 51 * Generic RSP type
60 */ 52 */
61struct bfi_pport_generic_rsp_s { 53struct bfi_fcport_rsp_s {
62 struct bfi_mhdr_s mh; /* common msg header */ 54 struct bfi_mhdr_s mh; /* common msg header */
63 u8 status; /* port enable status */ 55 u8 status; /* port enable status */
64 u8 rsvd[3]; 56 u8 rsvd[3];
65 u32 msgtag; /* msgtag for reply */ 57 u32 msgtag; /* msgtag for reply */
66}; 58};
67 59
68/** 60/**
69 * BFI_PPORT_H2I_ENABLE_REQ 61 * BFI_FCPORT_H2I_ENABLE_REQ
70 */ 62 */
71struct bfi_pport_enable_req_s { 63struct bfi_fcport_enable_req_s {
72 struct bfi_mhdr_s mh; /* msg header */ 64 struct bfi_mhdr_s mh; /* msg header */
73 u32 rsvd1; 65 u32 rsvd1;
74 wwn_t nwwn; /* node wwn of physical port */ 66 wwn_t nwwn; /* node wwn of physical port */
75 wwn_t pwwn; /* port wwn of physical port */ 67 wwn_t pwwn; /* port wwn of physical port */
76 struct bfa_pport_cfg_s port_cfg; /* port configuration */ 68 struct bfa_pport_cfg_s port_cfg; /* port configuration */
77 union bfi_addr_u stats_dma_addr; /* DMA address for stats */ 69 union bfi_addr_u stats_dma_addr; /* DMA address for stats */
78 u32 msgtag; /* msgtag for reply */ 70 u32 msgtag; /* msgtag for reply */
79 u32 rsvd2; 71 u32 rsvd2;
80}; 72};
81 73
82/** 74/**
83 * BFI_PPORT_I2H_ENABLE_RSP 75 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
84 */ 76 */
85#define bfi_pport_enable_rsp_t struct bfi_pport_generic_rsp_s 77struct bfi_fcport_set_svc_params_req_s {
86
87/**
88 * BFI_PPORT_H2I_DISABLE_REQ
89 */
90#define bfi_pport_disable_req_t struct bfi_pport_generic_req_s
91
92/**
93 * BFI_PPORT_I2H_DISABLE_RSP
94 */
95#define bfi_pport_disable_rsp_t struct bfi_pport_generic_rsp_s
96
97/**
98 * BFI_PPORT_H2I_GET_STATS_REQ
99 */
100#define bfi_pport_get_stats_req_t struct bfi_pport_generic_req_s
101
102/**
103 * BFI_PPORT_I2H_GET_STATS_RSP
104 */
105#define bfi_pport_get_stats_rsp_t struct bfi_pport_generic_rsp_s
106
107/**
108 * BFI_PPORT_H2I_CLEAR_STATS_REQ
109 */
110#define bfi_pport_clear_stats_req_t struct bfi_pport_generic_req_s
111
112/**
113 * BFI_PPORT_I2H_CLEAR_STATS_RSP
114 */
115#define bfi_pport_clear_stats_rsp_t struct bfi_pport_generic_rsp_s
116
117/**
118 * BFI_PPORT_H2I_GET_QOS_STATS_REQ
119 */
120#define bfi_pport_get_qos_stats_req_t struct bfi_pport_generic_req_s
121
122/**
123 * BFI_PPORT_H2I_GET_QOS_STATS_RSP
124 */
125#define bfi_pport_get_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
126
127/**
128 * BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ
129 */
130#define bfi_pport_clear_qos_stats_req_t struct bfi_pport_generic_req_s
131
132/**
133 * BFI_PPORT_H2I_CLEAR_QOS_STATS_RSP
134 */
135#define bfi_pport_clear_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
136
137/**
138 * BFI_PPORT_H2I_SET_SVC_PARAMS_REQ
139 */
140struct bfi_pport_set_svc_params_req_s {
141 struct bfi_mhdr_s mh; /* msg header */ 78 struct bfi_mhdr_s mh; /* msg header */
142 u16 tx_bbcredit; /* Tx credits */ 79 u16 tx_bbcredit; /* Tx credits */
143 u16 rsvd; 80 u16 rsvd;
144}; 81};
145 82
146/** 83/**
147 * BFI_PPORT_I2H_SET_SVC_PARAMS_RSP 84 * BFI_FCPORT_I2H_EVENT
148 */
149
150/**
151 * BFI_PPORT_I2H_EVENT
152 */ 85 */
153struct bfi_pport_event_s { 86struct bfi_fcport_event_s {
154 struct bfi_mhdr_s mh; /* common msg header */ 87 struct bfi_mhdr_s mh; /* common msg header */
155 struct bfa_pport_link_s link_state; 88 struct bfa_pport_link_s link_state;
156}; 89};
157 90
158union bfi_pport_h2i_msg_u { 91/**
92 * fcport H2I message
93 */
94union bfi_fcport_h2i_msg_u {
159 struct bfi_mhdr_s *mhdr; 95 struct bfi_mhdr_s *mhdr;
160 struct bfi_pport_enable_req_s *penable; 96 struct bfi_fcport_enable_req_s *penable;
161 struct bfi_pport_generic_req_s *pdisable; 97 struct bfi_fcport_req_s *pdisable;
162 struct bfi_pport_generic_req_s *pgetstats; 98 struct bfi_fcport_set_svc_params_req_s *psetsvcparams;
163 struct bfi_pport_generic_req_s *pclearstats; 99 struct bfi_fcport_req_s *pstatsget;
164 struct bfi_pport_set_svc_params_req_s *psetsvcparams; 100 struct bfi_fcport_req_s *pstatsclear;
165 struct bfi_pport_get_qos_stats_req_s *pgetqosstats;
166 struct bfi_pport_generic_req_s *pclearqosstats;
167}; 101};
168 102
169union bfi_pport_i2h_msg_u { 103/**
104 * fcport I2H message
105 */
106union bfi_fcport_i2h_msg_u {
170 struct bfi_msg_s *msg; 107 struct bfi_msg_s *msg;
171 struct bfi_pport_generic_rsp_s *enable_rsp; 108 struct bfi_fcport_rsp_s *penable_rsp;
172 struct bfi_pport_disable_rsp_s *disable_rsp; 109 struct bfi_fcport_rsp_s *pdisable_rsp;
173 struct bfi_pport_generic_rsp_s *getstats_rsp; 110 struct bfi_fcport_rsp_s *psetsvcparams_rsp;
174 struct bfi_pport_clear_stats_rsp_s *clearstats_rsp; 111 struct bfi_fcport_rsp_s *pstatsget_rsp;
175 struct bfi_pport_set_svc_params_rsp_s *setsvcparasm_rsp; 112 struct bfi_fcport_rsp_s *pstatsclear_rsp;
176 struct bfi_pport_get_qos_stats_rsp_s *getqosstats_rsp; 113 struct bfi_fcport_event_s *event;
177 struct bfi_pport_clear_qos_stats_rsp_s *clearqosstats_rsp;
178 struct bfi_pport_event_s *event;
179}; 114};
180 115
181#pragma pack() 116#pragma pack()
182 117
183#endif /* __BFI_PPORT_H__ */ 118#endif /* __BFI_PPORT_H__ */
184
diff --git a/drivers/scsi/bfa/include/bfi/bfi_rport.h b/drivers/scsi/bfa/include/bfi/bfi_rport.h
index 3520f55f09d7..e1cd83b56ec6 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_rport.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_rport.h
@@ -38,10 +38,10 @@ struct bfi_rport_create_req_s {
38 struct bfi_mhdr_s mh; /* common msg header */ 38 struct bfi_mhdr_s mh; /* common msg header */
39 u16 bfa_handle; /* host rport handle */ 39 u16 bfa_handle; /* host rport handle */
40 u16 max_frmsz; /* max rcv pdu size */ 40 u16 max_frmsz; /* max rcv pdu size */
41 u32 pid : 24, /* remote port ID */ 41 u32 pid:24, /* remote port ID */
42 lp_tag : 8; /* local port tag */ 42 lp_tag:8; /* local port tag */
43 u32 local_pid : 24, /* local port ID */ 43 u32 local_pid:24, /* local port ID */
44 cisc : 8; 44 cisc:8;
45 u8 fc_class; /* supported FC classes */ 45 u8 fc_class; /* supported FC classes */
46 u8 vf_en; /* virtual fabric enable */ 46 u8 vf_en; /* virtual fabric enable */
47 u16 vf_id; /* virtual fabric ID */ 47 u16 vf_id; /* virtual fabric ID */
diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
index 43ba7064e81a..a75a1f3be315 100644
--- a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
+++ b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
@@ -31,6 +31,10 @@
31enum { 31enum {
32 BFA_TRC_CNA_CEE = 1, 32 BFA_TRC_CNA_CEE = 1,
33 BFA_TRC_CNA_PORT = 2, 33 BFA_TRC_CNA_PORT = 2,
34 BFA_TRC_CNA_IOC = 3,
35 BFA_TRC_CNA_DIAG = 4,
36 BFA_TRC_CNA_IOC_CB = 5,
37 BFA_TRC_CNA_IOC_CT = 6,
34}; 38};
35 39
36#endif /* __BFA_CNA_TRCMOD_H__ */ 40#endif /* __BFA_CNA_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_checksum.h b/drivers/scsi/bfa/include/cs/bfa_checksum.h
index af8c1d533ba8..650f8d0aaff9 100644
--- a/drivers/scsi/bfa/include/cs/bfa_checksum.h
+++ b/drivers/scsi/bfa/include/cs/bfa_checksum.h
@@ -31,7 +31,7 @@ bfa_checksum_u32(u32 *buf, int sz)
31 for (i = 0; i < m; i++) 31 for (i = 0; i < m; i++)
32 sum ^= buf[i]; 32 sum ^= buf[i];
33 33
34 return (sum); 34 return sum;
35} 35}
36 36
37static inline u16 37static inline u16
@@ -43,7 +43,7 @@ bfa_checksum_u16(u16 *buf, int sz)
43 for (i = 0; i < m; i++) 43 for (i = 0; i < m; i++)
44 sum ^= buf[i]; 44 sum ^= buf[i];
45 45
46 return (sum); 46 return sum;
47} 47}
48 48
49static inline u8 49static inline u8
@@ -55,6 +55,6 @@ bfa_checksum_u8(u8 *buf, int sz)
55 for (i = 0; i < sz; i++) 55 for (i = 0; i < sz; i++)
56 sum ^= buf[i]; 56 sum ^= buf[i];
57 57
58 return (sum); 58 return sum;
59} 59}
60#endif 60#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h
index 761cbe22130a..bc334e0a93fa 100644
--- a/drivers/scsi/bfa/include/cs/bfa_log.h
+++ b/drivers/scsi/bfa/include/cs/bfa_log.h
@@ -157,7 +157,7 @@ typedef void (*bfa_log_cb_t)(struct bfa_log_mod_s *log_mod, u32 msg_id,
157 157
158 158
159struct bfa_log_mod_s { 159struct bfa_log_mod_s {
160 char instance_info[16]; /* instance info */ 160 char instance_info[BFA_STRING_32]; /* instance info */
161 int log_level[BFA_LOG_MODULE_ID_MAX + 1]; 161 int log_level[BFA_LOG_MODULE_ID_MAX + 1];
162 /* log level for modules */ 162 /* log level for modules */
163 bfa_log_cb_t cbfn; /* callback function */ 163 bfa_log_cb_t cbfn; /* callback function */
diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/include/cs/bfa_plog.h
index 670f86e5fc6e..f5bef63b5877 100644
--- a/drivers/scsi/bfa/include/cs/bfa_plog.h
+++ b/drivers/scsi/bfa/include/cs/bfa_plog.h
@@ -80,7 +80,8 @@ enum bfa_plog_mid {
80 BFA_PL_MID_HAL_FCXP = 4, 80 BFA_PL_MID_HAL_FCXP = 4,
81 BFA_PL_MID_HAL_UF = 5, 81 BFA_PL_MID_HAL_UF = 5,
82 BFA_PL_MID_FCS = 6, 82 BFA_PL_MID_FCS = 6,
83 BFA_PL_MID_MAX = 7 83 BFA_PL_MID_LPS = 7,
84 BFA_PL_MID_MAX = 8
84}; 85};
85 86
86#define BFA_PL_MID_STRLEN 8 87#define BFA_PL_MID_STRLEN 8
@@ -118,7 +119,11 @@ enum bfa_plog_eid {
118 BFA_PL_EID_RSCN = 17, 119 BFA_PL_EID_RSCN = 17,
119 BFA_PL_EID_DEBUG = 18, 120 BFA_PL_EID_DEBUG = 18,
120 BFA_PL_EID_MISC = 19, 121 BFA_PL_EID_MISC = 19,
121 BFA_PL_EID_MAX = 20 122 BFA_PL_EID_FIP_FCF_DISC = 20,
123 BFA_PL_EID_FIP_FCF_CVL = 21,
124 BFA_PL_EID_LOGIN = 22,
125 BFA_PL_EID_LOGO = 23,
126 BFA_PL_EID_MAX = 24
122}; 127};
123 128
124#define BFA_PL_ENAME_STRLEN 8 129#define BFA_PL_ENAME_STRLEN 8
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h
index 9877066680a6..11fba9082f05 100644
--- a/drivers/scsi/bfa/include/cs/bfa_sm.h
+++ b/drivers/scsi/bfa/include/cs/bfa_sm.h
@@ -23,9 +23,17 @@
23#define __BFA_SM_H__ 23#define __BFA_SM_H__
24 24
25typedef void (*bfa_sm_t)(void *sm, int event); 25typedef void (*bfa_sm_t)(void *sm, int event);
26/**
27 * oc - object class eg. bfa_ioc
28 * st - state, eg. reset
29 * otype - object type, eg. struct bfa_ioc_s
30 * etype - object type, eg. enum ioc_event
31 */
32#define bfa_sm_state_decl(oc, st, otype, etype) \
33 static void oc ## _sm_ ## st(otype * fsm, etype event)
26 34
27#define bfa_sm_set_state(_sm, _state) (_sm)->sm = (bfa_sm_t)(_state) 35#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
28#define bfa_sm_send_event(_sm, _event) (_sm)->sm((_sm), (_event)) 36#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
29#define bfa_sm_get_state(_sm) ((_sm)->sm) 37#define bfa_sm_get_state(_sm) ((_sm)->sm)
30#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) 38#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
31 39
@@ -62,7 +70,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
62} while (0) 70} while (0)
63 71
64#define bfa_fsm_send_event(_fsm, _event) \ 72#define bfa_fsm_send_event(_fsm, _event) \
65 (_fsm)->fsm((_fsm), (_event)) 73 ((_fsm)->fsm((_fsm), (_event)))
66#define bfa_fsm_cmp_state(_fsm, _state) \ 74#define bfa_fsm_cmp_state(_fsm, _state) \
67 ((_fsm)->fsm == (bfa_fsm_t)(_state)) 75 ((_fsm)->fsm == (bfa_fsm_t)(_state))
68 76
diff --git a/drivers/scsi/bfa/include/cs/bfa_trc.h b/drivers/scsi/bfa/include/cs/bfa_trc.h
index 3e743928c74c..310771c888e7 100644
--- a/drivers/scsi/bfa/include/cs/bfa_trc.h
+++ b/drivers/scsi/bfa/include/cs/bfa_trc.h
@@ -24,7 +24,7 @@
24#endif 24#endif
25 25
26#ifndef BFA_TRC_TS 26#ifndef BFA_TRC_TS
27#define BFA_TRC_TS(_trcm) ((_trcm)->ticks ++) 27#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
28#endif 28#endif
29 29
30struct bfa_trc_s { 30struct bfa_trc_s {
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
index 4c81a613db3d..35244698fcdc 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
@@ -30,6 +30,16 @@
30#include <defs/bfa_defs_audit.h> 30#include <defs/bfa_defs_audit.h>
31#include <defs/bfa_defs_ethport.h> 31#include <defs/bfa_defs_ethport.h>
32 32
33#define BFA_AEN_MAX_APP 5
34
35enum bfa_aen_app {
36 bfa_aen_app_bcu = 0, /* No thread for bcu */
37 bfa_aen_app_hcm = 1,
38 bfa_aen_app_cim = 2,
39 bfa_aen_app_snia = 3,
40 bfa_aen_app_test = 4, /* To be removed after unit test */
41};
42
33enum bfa_aen_category { 43enum bfa_aen_category {
34 BFA_AEN_CAT_ADAPTER = 1, 44 BFA_AEN_CAT_ADAPTER = 1,
35 BFA_AEN_CAT_PORT = 2, 45 BFA_AEN_CAT_PORT = 2,
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
index dd19c83aba58..45df32820911 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
@@ -23,6 +23,7 @@
23#define PRIVATE_KEY 19009 23#define PRIVATE_KEY 19009
24#define KEY_LEN 32399 24#define KEY_LEN 32399
25#define BFA_AUTH_SECRET_STRING_LEN 256 25#define BFA_AUTH_SECRET_STRING_LEN 256
26#define BFA_AUTH_FAIL_NO_PASSWORD 0xFE
26#define BFA_AUTH_FAIL_TIMEOUT 0xFF 27#define BFA_AUTH_FAIL_TIMEOUT 0xFF
27 28
28/** 29/**
@@ -41,6 +42,27 @@ enum bfa_auth_status {
41 BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */ 42 BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */
42}; 43};
43 44
45enum bfa_auth_rej_code {
46 BFA_AUTH_RJT_CODE_AUTH_FAILURE = 1, /* auth failure */
47 BFA_AUTH_RJT_CODE_LOGICAL_ERR = 2, /* logical error */
48};
49
50/**
51 * Authentication reject codes
52 */
53enum bfa_auth_rej_code_exp {
54 BFA_AUTH_MECH_NOT_USABLE = 1, /* auth. mechanism not usable */
55 BFA_AUTH_DH_GROUP_NOT_USABLE = 2, /* DH Group not usable */
56 BFA_AUTH_HASH_FUNC_NOT_USABLE = 3, /* hash Function not usable */
57 BFA_AUTH_AUTH_XACT_STARTED = 4, /* auth xact started */
58 BFA_AUTH_AUTH_FAILED = 5, /* auth failed */
59 BFA_AUTH_INCORRECT_PLD = 6, /* incorrect payload */
60 BFA_AUTH_INCORRECT_PROTO_MSG = 7, /* incorrect proto msg */
61 BFA_AUTH_RESTART_AUTH_PROTO = 8, /* restart auth protocol */
62 BFA_AUTH_AUTH_CONCAT_NOT_SUPP = 9, /* auth concat not supported */
63 BFA_AUTH_PROTO_VER_NOT_SUPP = 10,/* proto version not supported */
64};
65
44struct auth_proto_stats_s { 66struct auth_proto_stats_s {
45 u32 auth_rjts; 67 u32 auth_rjts;
46 u32 auth_negs; 68 u32 auth_negs;
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
index 520a22f52dd1..b0ac9ac15c5d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
@@ -28,10 +28,6 @@
28 28
29#define BFA_CEE_LLDP_MAX_STRING_LEN (128) 29#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
30 30
31
32/* FIXME: this is coming from the protocol spec. Can the host & apps share the
33 protocol .h files ?
34 */
35#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001 31#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
36#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002 32#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
37#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004 33#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
@@ -94,9 +90,10 @@ struct bfa_cee_dcbx_cfg_s {
94/* CEE status */ 90/* CEE status */
95/* Making this to tri-state for the benefit of port list command */ 91/* Making this to tri-state for the benefit of port list command */
96enum bfa_cee_status_e { 92enum bfa_cee_status_e {
97 CEE_PHY_DOWN = 0, 93 CEE_UP = 0,
98 CEE_PHY_UP = 1, 94 CEE_PHY_UP = 1,
99 CEE_UP = 2, 95 CEE_LOOPBACK = 2,
96 CEE_PHY_DOWN = 3,
100}; 97};
101 98
102/* CEE Query */ 99/* CEE Query */
@@ -107,7 +104,8 @@ struct bfa_cee_attr_s {
107 struct bfa_cee_dcbx_cfg_s dcbx_remote; 104 struct bfa_cee_dcbx_cfg_s dcbx_remote;
108 mac_t src_mac; 105 mac_t src_mac;
109 u8 link_speed; 106 u8 link_speed;
110 u8 filler[3]; 107 u8 nw_priority;
108 u8 filler[2];
111}; 109};
112 110
113 111
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
index 57049805762b..50382dd2ab41 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
@@ -21,6 +21,7 @@
21/** 21/**
22 * Driver statistics 22 * Driver statistics
23 */ 23 */
24struct bfa_driver_stats_s {
24 u16 tm_io_abort; 25 u16 tm_io_abort;
25 u16 tm_io_abort_comp; 26 u16 tm_io_abort_comp;
26 u16 tm_lun_reset; 27 u16 tm_lun_reset;
@@ -34,7 +35,7 @@
34 u64 output_req; 35 u64 output_req;
35 u64 input_words; 36 u64 input_words;
36 u64 output_words; 37 u64 output_words;
37} bfa_driver_stats_t; 38};
38 39
39 40
40#endif /* __BFA_DEFS_DRIVER_H__ */ 41#endif /* __BFA_DEFS_DRIVER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
index 79f9b3e146f7..b4fa0923aa89 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
@@ -19,6 +19,7 @@
19#define __BFA_DEFS_ETHPORT_H__ 19#define __BFA_DEFS_ETHPORT_H__
20 20
21#include <defs/bfa_defs_status.h> 21#include <defs/bfa_defs_status.h>
22#include <defs/bfa_defs_port.h>
22#include <protocol/types.h> 23#include <protocol/types.h>
23#include <cna/pstats/phyport_defs.h> 24#include <cna/pstats/phyport_defs.h>
24#include <cna/pstats/ethport_defs.h> 25#include <cna/pstats/ethport_defs.h>
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
new file mode 100644
index 000000000000..a07ef4a3cd78
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * bfa_defs_fcport.h
7 *
8 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License (GPL) Version 2 as
12 * published by the Free Software Foundation
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19#ifndef __BFA_DEFS_FCPORT_H__
20#define __BFA_DEFS_FCPORT_H__
21
22#include <defs/bfa_defs_types.h>
23#include <protocol/types.h>
24
25#pragma pack(1)
26
27/**
28 * FCoE statistics
29 */
30struct bfa_fcoe_stats_s {
31 u64 secs_reset; /* Seconds since stats reset */
32 u64 cee_linkups; /* CEE link up */
33 u64 cee_linkdns; /* CEE link down */
34 u64 fip_linkups; /* FIP link up */
35 u64 fip_linkdns; /* FIP link down */
36 u64 fip_fails; /* FIP failures */
37 u64 mac_invalids; /* Invalid mac assignments */
38 u64 vlan_req; /* Vlan requests */
39 u64 vlan_notify; /* Vlan notifications */
40 u64 vlan_err; /* Vlan notification errors */
41 u64 vlan_timeouts; /* Vlan request timeouts */
42 u64 vlan_invalids; /* Vlan invalids */
43 u64 disc_req; /* Discovery requests */
44 u64 disc_rsp; /* Discovery responses */
45 u64 disc_err; /* Discovery error frames */
46 u64 disc_unsol; /* Discovery unsolicited */
47 u64 disc_timeouts; /* Discovery timeouts */
48 u64 disc_fcf_unavail; /* Discovery FCF not avail */
49 u64 linksvc_unsupp; /* FIP link service req unsupp. */
50 u64 linksvc_err; /* FIP link service req errors */
51 u64 logo_req; /* FIP logo */
52 u64 clrvlink_req; /* Clear virtual link requests */
53 u64 op_unsupp; /* FIP operation unsupp. */
54 u64 untagged; /* FIP untagged frames */
55 u64 txf_ucast; /* Tx FCoE unicast frames */
56 u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
57 u64 txf_ucast_octets; /* Tx FCoE unicast octets */
58 u64 txf_mcast; /* Tx FCoE mutlicast frames */
59 u64 txf_mcast_vlan; /* Tx FCoE mutlicast vlan frames */
60 u64 txf_mcast_octets; /* Tx FCoE multicast octets */
61 u64 txf_bcast; /* Tx FCoE broadcast frames */
62 u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
63 u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
64 u64 txf_timeout; /* Tx timeouts */
65 u64 txf_parity_errors; /* Transmit parity err */
66 u64 txf_fid_parity_errors; /* Transmit FID parity err */
67 u64 tx_pause; /* Tx pause frames */
68 u64 tx_zero_pause; /* Tx zero pause frames */
69 u64 tx_first_pause; /* Tx first pause frames */
70 u64 rx_pause; /* Rx pause frames */
71 u64 rx_zero_pause; /* Rx zero pause frames */
72 u64 rx_first_pause; /* Rx first pause frames */
73 u64 rxf_ucast_octets; /* Rx unicast octets */
74 u64 rxf_ucast; /* Rx unicast frames */
75 u64 rxf_ucast_vlan; /* Rx unicast vlan frames */
76 u64 rxf_mcast_octets; /* Rx multicast octets */
77 u64 rxf_mcast; /* Rx multicast frames */
78 u64 rxf_mcast_vlan; /* Rx multicast vlan frames */
79 u64 rxf_bcast_octets; /* Rx broadcast octests */
80 u64 rxf_bcast; /* Rx broadcast frames */
81 u64 rxf_bcast_vlan; /* Rx broadcast vlan frames */
82};
83
84/**
85 * QoS or FCoE stats (fcport stats excluding physical FC port stats)
86 */
87union bfa_fcport_stats_u {
88 struct bfa_qos_stats_s fcqos;
89 struct bfa_fcoe_stats_s fcoe;
90};
91
92#pragma pack()
93
94#endif /* __BFA_DEFS_FCPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
deleted file mode 100644
index 9ccf53bef65a..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IM_COMMON_H__
19#define __BFA_DEFS_IM_COMMON_H__
20
21#define BFA_ADAPTER_NAME_LEN 256
22#define BFA_ADAPTER_GUID_LEN 256
23#define RESERVED_VLAN_NAME L"PORT VLAN"
24#define PASSTHRU_VLAN_NAME L"PASSTHRU VLAN"
25
26 u64 tx_pkt_cnt;
27 u64 rx_pkt_cnt;
28 u32 duration;
29 u8 status;
30} bfa_im_stats_t, *pbfa_im_stats_t;
31
32#endif /* __BFA_DEFS_IM_COMMON_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
deleted file mode 100644
index a486a7eb81d6..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IM_TEAM_H__
19#define __BFA_DEFS_IM_TEAM_H__
20
21#include <protocol/types.h>
22
23#define BFA_TEAM_MAX_PORTS 8
24#define BFA_TEAM_NAME_LEN 256
25#define BFA_MAX_NUM_TEAMS 16
26#define BFA_TEAM_INVALID_DELAY -1
27
28 BFA_LACP_RATE_SLOW = 1,
29 BFA_LACP_RATE_FAST
30} bfa_im_lacp_rate_t;
31
32 BFA_TEAM_MODE_FAIL_OVER = 1,
33 BFA_TEAM_MODE_FAIL_BACK,
34 BFA_TEAM_MODE_LACP,
35 BFA_TEAM_MODE_NONE
36} bfa_im_team_mode_t;
37
38 BFA_XMIT_POLICY_L2 = 1,
39 BFA_XMIT_POLICY_L3_L4
40} bfa_im_xmit_policy_t;
41
42 bfa_im_team_mode_t team_mode;
43 bfa_im_lacp_rate_t lacp_rate;
44 bfa_im_xmit_policy_t xmit_policy;
45 int delay;
46 wchar_t primary[BFA_ADAPTER_NAME_LEN];
47 wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN];
48 mac_t mac;
49 u16 num_ports;
50 u16 num_vlans;
51 u16 vlan_list[BFA_MAX_VLANS_PER_PORT];
52 wchar_t team_guid_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_GUID_LEN];
53 wchar_t ioc_name_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_NAME_LEN];
54} bfa_im_team_attr_t;
55
56 wchar_t team_name[BFA_TEAM_NAME_LEN];
57 bfa_im_xmit_policy_t xmit_policy;
58 int delay;
59 wchar_t primary[BFA_ADAPTER_NAME_LEN];
60 wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN];
61} bfa_im_team_edit_t, *pbfa_im_team_edit_t;
62
63 wchar_t team_name[BFA_TEAM_NAME_LEN];
64 bfa_im_team_mode_t team_mode;
65 mac_t mac;
66} bfa_im_team_info_t;
67
68 bfa_im_team_info_t team_info[BFA_MAX_NUM_TEAMS];
69 u16 num_teams;
70} bfa_im_team_list_t, *pbfa_im_team_list_t;
71
72#endif /* __BFA_DEFS_IM_TEAM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
index b1d532da3a9d..8d8e6a966537 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
@@ -126,6 +126,7 @@ struct bfa_ioc_attr_s {
126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ 126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
127 struct bfa_ioc_pci_attr_s pci_attr; 127 struct bfa_ioc_pci_attr_s pci_attr;
128 u8 port_id; /* port number */ 128 u8 port_id; /* port number */
129 u8 rsvd[7]; /*!< 64bit align */
129}; 130};
130 131
131/** 132/**
@@ -143,8 +144,8 @@ enum bfa_ioc_aen_event {
143 * BFA IOC level event data, now just a place holder 144 * BFA IOC level event data, now just a place holder
144 */ 145 */
145struct bfa_ioc_aen_data_s { 146struct bfa_ioc_aen_data_s {
146 enum bfa_ioc_type_e ioc_type;
147 wwn_t pwwn; 147 wwn_t pwwn;
148 s16 ioc_type;
148 mac_t mac; 149 mac_t mac;
149}; 150};
150 151
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
index d76bcbd9820f..c290fb13d2d1 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
@@ -26,6 +26,8 @@
26 26
27#define BFA_IOCFC_INTR_DELAY 1125 27#define BFA_IOCFC_INTR_DELAY 1125
28#define BFA_IOCFC_INTR_LATENCY 225 28#define BFA_IOCFC_INTR_LATENCY 225
29#define BFA_IOCFCOE_INTR_DELAY 25
30#define BFA_IOCFCOE_INTR_LATENCY 5
29 31
30/** 32/**
31 * Interrupt coalescing configuration. 33 * Interrupt coalescing configuration.
@@ -50,7 +52,7 @@ struct bfa_iocfc_fwcfg_s {
50 u16 num_fcxp_reqs; /* unassisted FC exchanges */ 52 u16 num_fcxp_reqs; /* unassisted FC exchanges */
51 u16 num_uf_bufs; /* unsolicited recv buffers */ 53 u16 num_uf_bufs; /* unsolicited recv buffers */
52 u8 num_cqs; 54 u8 num_cqs;
53 u8 rsvd; 55 u8 rsvd[5];
54}; 56};
55 57
56struct bfa_iocfc_drvcfg_s { 58struct bfa_iocfc_drvcfg_s {
@@ -224,18 +226,24 @@ struct bfa_fw_port_physm_stats_s {
224 226
225 227
226struct bfa_fw_fip_stats_s { 228struct bfa_fw_fip_stats_s {
229 u32 vlan_req; /* vlan discovery requests */
230 u32 vlan_notify; /* vlan notifications */
231 u32 vlan_err; /* vlan response error */
232 u32 vlan_timeouts; /* vlan disvoery timeouts */
233 u32 vlan_invalids; /* invalid vlan in discovery advert. */
227 u32 disc_req; /* Discovery solicit requests */ 234 u32 disc_req; /* Discovery solicit requests */
228 u32 disc_rsp; /* Discovery solicit response */ 235 u32 disc_rsp; /* Discovery solicit response */
229 u32 disc_err; /* Discovery advt. parse errors */ 236 u32 disc_err; /* Discovery advt. parse errors */
230 u32 disc_unsol; /* Discovery unsolicited */ 237 u32 disc_unsol; /* Discovery unsolicited */
231 u32 disc_timeouts; /* Discovery timeouts */ 238 u32 disc_timeouts; /* Discovery timeouts */
239 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
232 u32 linksvc_unsupp; /* Unsupported link service req */ 240 u32 linksvc_unsupp; /* Unsupported link service req */
233 u32 linksvc_err; /* Parse error in link service req */ 241 u32 linksvc_err; /* Parse error in link service req */
234 u32 logo_req; /* Number of FIP logos received */ 242 u32 logo_req; /* Number of FIP logos received */
235 u32 clrvlink_req; /* Clear virtual link req */ 243 u32 clrvlink_req; /* Clear virtual link req */
236 u32 op_unsupp; /* Unsupported FIP operation */ 244 u32 op_unsupp; /* Unsupported FIP operation */
237 u32 untagged; /* Untagged frames (ignored) */ 245 u32 untagged; /* Untagged frames (ignored) */
238 u32 rsvd; 246 u32 invalid_version; /*!< Invalid FIP version */
239}; 247};
240 248
241 249
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
index 7359f82aacfc..0952a139c47c 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
@@ -59,8 +59,8 @@ enum bfa_lport_aen_event {
59 */ 59 */
60struct bfa_lport_aen_data_s { 60struct bfa_lport_aen_data_s {
61 u16 vf_id; /* vf_id of this logical port */ 61 u16 vf_id; /* vf_id of this logical port */
62 u16 rsvd; 62 s16 roles; /* Logical port mode,IM/TM/IP etc */
63 enum bfa_port_role roles; /* Logical port mode,IM/TM/IP etc */ 63 u32 rsvd;
64 wwn_t ppwwn; /* WWN of its physical port */ 64 wwn_t ppwwn; /* WWN of its physical port */
65 wwn_t lpwwn; /* WWN of this logical port */ 65 wwn_t lpwwn; /* WWN of this logical port */
66}; 66};
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
index 13fd4ab6aae2..c5bd9c36ad4d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
@@ -22,7 +22,47 @@
22/** 22/**
23 * Manufacturing block version 23 * Manufacturing block version
24 */ 24 */
25#define BFA_MFG_VERSION 1 25#define BFA_MFG_VERSION 2
26
27/**
28 * Manufacturing block encrypted version
29 */
30#define BFA_MFG_ENC_VER 2
31
32/**
33 * Manufacturing block version 1 length
34 */
35#define BFA_MFG_VER1_LEN 128
36
37/**
38 * Manufacturing block header length
39 */
40#define BFA_MFG_HDR_LEN 4
41
42/**
43 * Checksum size
44 */
45#define BFA_MFG_CHKSUM_SIZE 16
46
47/**
48 * Manufacturing block encrypted version
49 */
50#define BFA_MFG_ENC_VER 2
51
52/**
53 * Manufacturing block version 1 length
54 */
55#define BFA_MFG_VER1_LEN 128
56
57/**
58 * Manufacturing block header length
59 */
60#define BFA_MFG_HDR_LEN 4
61
62/**
63 * Checksum size
64 */
65#define BFA_MFG_CHKSUM_SIZE 16
26 66
27/** 67/**
28 * Manufacturing block format 68 * Manufacturing block format
@@ -30,29 +70,74 @@
30#define BFA_MFG_SERIALNUM_SIZE 11 70#define BFA_MFG_SERIALNUM_SIZE 11
31#define BFA_MFG_PARTNUM_SIZE 14 71#define BFA_MFG_PARTNUM_SIZE 14
32#define BFA_MFG_SUPPLIER_ID_SIZE 10 72#define BFA_MFG_SUPPLIER_ID_SIZE 10
33#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 73#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
34#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 74#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
35#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 75#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
36#define STRSZ(_n) (((_n) + 4) & ~3) 76#define STRSZ(_n) (((_n) + 4) & ~3)
37 77
38/** 78/**
79 * Manufacturing card type
80 */
81enum {
82 BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
83 BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
84 BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
85 BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
86 BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
87 BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
88 BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
89};
90
91#pragma pack(1)
92
93/**
94 * Card type to port number conversion
95 */
96#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10)
97
98
99/**
100 * All numerical fields are in big-endian format.
101 */
102struct bfa_mfg_block_s {
103};
104
105/**
39 * VPD data length 106 * VPD data length
40 */ 107 */
41#define BFA_MFG_VPD_LEN 256 108#define BFA_MFG_VPD_LEN 512
109
110#define BFA_MFG_VPD_PCI_HDR_OFF 137
111#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /* version mask 3 bits */
112#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /* vendor mask 5 bits */
113
114/**
115 * VPD vendor tag
116 */
117enum {
118 BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
119 BFA_MFG_VPD_IBM = 1, /* vendor IBM */
120 BFA_MFG_VPD_HP = 2, /* vendor HP */
121 BFA_MFG_VPD_DELL = 3, /* vendor DELL */
122 BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
123 BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
124 BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
125 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
126};
42 127
43/** 128/**
44 * All numerical fields are in big-endian format. 129 * All numerical fields are in big-endian format.
45 */ 130 */
46struct bfa_mfg_vpd_s { 131struct bfa_mfg_vpd_s {
47 u8 version; /* vpd data version */ 132 u8 version; /* vpd data version */
48 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ 133 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
49 u8 chksum; /* u8 checksum */ 134 u8 chksum; /* u8 checksum */
50 u8 vendor; /* vendor */ 135 u8 vendor; /* vendor */
51 u8 len; /* vpd data length excluding header */ 136 u8 len; /* vpd data length excluding header */
52 u8 rsv; 137 u8 rsv;
53 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ 138 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
54}; 139};
55 140
56#pragma pack(1) 141#pragma pack()
57 142
58#endif /* __BFA_DEFS_MFG_H__ */ 143#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
index de0696c81bc4..501bc9739d9d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
@@ -185,6 +185,8 @@ struct bfa_port_attr_s {
185 wwn_t fabric_name; /* attached switch's nwwn */ 185 wwn_t fabric_name; /* attached switch's nwwn */
186 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached 186 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
187 * fabric's ip addr */ 187 * fabric's ip addr */
188 struct mac_s fpma_mac; /* Lport's FPMA Mac address */
189 u16 authfail; /* auth failed state */
188}; 190};
189 191
190/** 192/**
@@ -232,14 +234,15 @@ enum bfa_port_aen_sfp_pom {
232}; 234};
233 235
234struct bfa_port_aen_data_s { 236struct bfa_port_aen_data_s {
235 enum bfa_ioc_type_e ioc_type; 237 wwn_t pwwn; /* WWN of the physical port */
236 wwn_t pwwn; /* WWN of the physical port */ 238 wwn_t fwwn; /* WWN of the fabric port */
237 wwn_t fwwn; /* WWN of the fabric port */ 239 s32 phy_port_num; /*! For SFP related events */
238 mac_t mac; /* MAC addres of the ethernet port, 240 s16 ioc_type;
239 * applicable to CNA port only */ 241 s16 level; /* Only transitions will
240 int phy_port_num; /*! For SFP related events */ 242 * be informed */
241 enum bfa_port_aen_sfp_pom level; /* Only transitions will 243 struct mac_s mac; /* MAC address of the ethernet port,
242 * be informed */ 244 * applicable to CNA port only */
245 s16 rsvd;
243}; 246};
244 247
245#endif /* __BFA_DEFS_PORT_H__ */ 248#endif /* __BFA_DEFS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
index a000bc4e2d4a..26e5cc78095d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
@@ -61,7 +61,7 @@ enum bfa_pport_speed {
61 * Port operational type (in sync with SNIA port type). 61 * Port operational type (in sync with SNIA port type).
62 */ 62 */
63enum bfa_pport_type { 63enum bfa_pport_type {
64 BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unkown */ 64 BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unknown */
65 BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */ 65 BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */
66 BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */ 66 BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */
67 BFA_PPORT_TYPE_NLPORT = 6, /* public loop */ 67 BFA_PPORT_TYPE_NLPORT = 6, /* public loop */
@@ -232,7 +232,7 @@ struct bfa_pport_attr_s {
232 u32 pid; /* port ID */ 232 u32 pid; /* port ID */
233 enum bfa_pport_type port_type; /* current topology */ 233 enum bfa_pport_type port_type; /* current topology */
234 u32 loopback; /* external loopback */ 234 u32 loopback; /* external loopback */
235 u32 rsvd1; 235 u32 authfail; /* auth fail state */
236 u32 rsvd2; /* padding for 64 bit */ 236 u32 rsvd2; /* padding for 64 bit */
237}; 237};
238 238
@@ -240,73 +240,79 @@ struct bfa_pport_attr_s {
240 * FC Port statistics. 240 * FC Port statistics.
241 */ 241 */
242struct bfa_pport_fc_stats_s { 242struct bfa_pport_fc_stats_s {
243 u64 secs_reset; /* seconds since stats is reset */ 243 u64 secs_reset; /* Seconds since stats is reset */
244 u64 tx_frames; /* transmitted frames */ 244 u64 tx_frames; /* Tx frames */
245 u64 tx_words; /* transmitted words */ 245 u64 tx_words; /* Tx words */
246 u64 rx_frames; /* received frames */ 246 u64 tx_lip; /* TX LIP */
247 u64 rx_words; /* received words */ 247 u64 tx_nos; /* Tx NOS */
248 u64 lip_count; /* LIPs seen */ 248 u64 tx_ols; /* Tx OLS */
249 u64 nos_count; /* NOS count */ 249 u64 tx_lr; /* Tx LR */
250 u64 error_frames; /* errored frames (sent?) */ 250 u64 tx_lrr; /* Tx LRR */
251 u64 dropped_frames; /* dropped frames */ 251 u64 rx_frames; /* Rx frames */
252 u64 link_failures; /* link failure count */ 252 u64 rx_words; /* Rx words */
253 u64 loss_of_syncs; /* loss of sync count */ 253 u64 lip_count; /* Rx LIP */
254 u64 loss_of_signals;/* loss of signal count */ 254 u64 nos_count; /* Rx NOS */
255 u64 primseq_errs; /* primitive sequence protocol */ 255 u64 ols_count; /* Rx OLS */
256 u64 bad_os_count; /* invalid ordered set */ 256 u64 lr_count; /* Rx LR */
257 u64 err_enc_out; /* Encoding error outside frame */ 257 u64 lrr_count; /* Rx LRR */
258 u64 invalid_crcs; /* frames received with invalid CRC*/ 258 u64 invalid_crcs; /* Rx CRC err frames */
259 u64 undersized_frm; /* undersized frames */ 259 u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */
260 u64 oversized_frm; /* oversized frames */ 260 u64 undersized_frm; /* Rx undersized frames */
261 u64 bad_eof_frm; /* frames with bad EOF */ 261 u64 oversized_frm; /* Rx oversized frames */
262 struct bfa_qos_stats_s qos_stats; /* QoS statistics */ 262 u64 bad_eof_frm; /* Rx frames with bad EOF */
263 u64 error_frames; /* Errored frames */
264 u64 dropped_frames; /* Dropped frames */
265 u64 link_failures; /* Link Failure (LF) count */
266 u64 loss_of_syncs; /* Loss of sync count */
267 u64 loss_of_signals;/* Loss of signal count */
268 u64 primseq_errs; /* Primitive sequence protocol err. */
269 u64 bad_os_count; /* Invalid ordered sets */
270 u64 err_enc_out; /* Encoding err nonframe_8b10b */
271 u64 err_enc; /* Encoding err frame_8b10b */
263}; 272};
264 273
265/** 274/**
266 * Eth Port statistics. 275 * Eth Port statistics.
267 */ 276 */
268struct bfa_pport_eth_stats_s { 277struct bfa_pport_eth_stats_s {
269 u64 secs_reset; /* seconds since stats is reset */ 278 u64 secs_reset; /* Seconds since stats is reset */
270 u64 frame_64; /* both rx and tx counter */ 279 u64 frame_64; /* Frames 64 bytes */
271 u64 frame_65_127; /* both rx and tx counter */ 280 u64 frame_65_127; /* Frames 65-127 bytes */
272 u64 frame_128_255; /* both rx and tx counter */ 281 u64 frame_128_255; /* Frames 128-255 bytes */
273 u64 frame_256_511; /* both rx and tx counter */ 282 u64 frame_256_511; /* Frames 256-511 bytes */
274 u64 frame_512_1023; /* both rx and tx counter */ 283 u64 frame_512_1023; /* Frames 512-1023 bytes */
275 u64 frame_1024_1518; /* both rx and tx counter */ 284 u64 frame_1024_1518; /* Frames 1024-1518 bytes */
276 u64 frame_1519_1522; /* both rx and tx counter */ 285 u64 frame_1519_1522; /* Frames 1519-1522 bytes */
277 286 u64 tx_bytes; /* Tx bytes */
278 u64 tx_bytes; 287 u64 tx_packets; /* Tx packets */
279 u64 tx_packets; 288 u64 tx_mcast_packets; /* Tx multicast packets */
280 u64 tx_mcast_packets; 289 u64 tx_bcast_packets; /* Tx broadcast packets */
281 u64 tx_bcast_packets; 290 u64 tx_control_frame; /* Tx control frame */
282 u64 tx_control_frame; 291 u64 tx_drop; /* Tx drops */
283 u64 tx_drop; 292 u64 tx_jabber; /* Tx jabber */
284 u64 tx_jabber; 293 u64 tx_fcs_error; /* Tx FCS error */
285 u64 tx_fcs_error; 294 u64 tx_fragments; /* Tx fragments */
286 u64 tx_fragments; 295 u64 rx_bytes; /* Rx bytes */
287 296 u64 rx_packets; /* Rx packets */
288 u64 rx_bytes; 297 u64 rx_mcast_packets; /* Rx multicast packets */
289 u64 rx_packets; 298 u64 rx_bcast_packets; /* Rx broadcast packets */
290 u64 rx_mcast_packets; 299 u64 rx_control_frames; /* Rx control frames */
291 u64 rx_bcast_packets; 300 u64 rx_unknown_opcode; /* Rx unknown opcode */
292 u64 rx_control_frames; 301 u64 rx_drop; /* Rx drops */
293 u64 rx_unknown_opcode; 302 u64 rx_jabber; /* Rx jabber */
294 u64 rx_drop; 303 u64 rx_fcs_error; /* Rx FCS errors */
295 u64 rx_jabber; 304 u64 rx_alignment_error; /* Rx alignment errors */
296 u64 rx_fcs_error; 305 u64 rx_frame_length_error; /* Rx frame len errors */
297 u64 rx_alignment_error; 306 u64 rx_code_error; /* Rx code errors */
298 u64 rx_frame_length_error; 307 u64 rx_fragments; /* Rx fragments */
299 u64 rx_code_error; 308 u64 rx_pause; /* Rx pause */
300 u64 rx_fragments; 309 u64 rx_zero_pause; /* Rx zero pause */
301 310 u64 tx_pause; /* Tx pause */
302 u64 rx_pause; /* BPC */ 311 u64 tx_zero_pause; /* Tx zero pause */
303 u64 rx_zero_pause; /* BPC Pause cancellation */ 312 u64 rx_fcoe_pause; /* Rx fcoe pause */
304 u64 tx_pause; /* BPC */ 313 u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
305 u64 tx_zero_pause; /* BPC Pause cancellation */ 314 u64 tx_fcoe_pause; /* Tx FCoE pause */
306 u64 rx_fcoe_pause; /* BPC */ 315 u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
307 u64 rx_fcoe_zero_pause; /* BPC Pause cancellation */
308 u64 tx_fcoe_pause; /* BPC */
309 u64 tx_fcoe_zero_pause; /* BPC Pause cancellation */
310}; 316};
311 317
312/** 318/**
@@ -333,8 +339,7 @@ struct bfa_pport_fcpmap_s {
333}; 339};
334 340
335/** 341/**
336 * Port RNID info. 342 * Port RNI */
337 */
338struct bfa_pport_rnid_s { 343struct bfa_pport_rnid_s {
339 wwn_t wwn; 344 wwn_t wwn;
340 u32 unittype; 345 u32 unittype;
@@ -347,6 +352,23 @@ struct bfa_pport_rnid_s {
347 u16 topologydiscoveryflags; 352 u16 topologydiscoveryflags;
348}; 353};
349 354
355struct bfa_fcport_fcf_s {
356 wwn_t name; /* FCF name */
357 wwn_t fabric_name; /* Fabric Name */
358 u8 fipenabled; /* FIP enabled or not */
359 u8 fipfailed; /* FIP failed or not */
360 u8 resv[2];
361 u8 pri; /* FCF priority */
362 u8 version; /* FIP version used */
363 u8 available; /* Available for login */
364 u8 fka_disabled; /* FKA is disabled */
365 u8 maxsz_verified; /* FCoE max size verified */
366 u8 fc_map[3]; /* FC map */
367 u16 vlan; /* FCoE vlan tag/priority */
368 u32 fka_adv_per; /* FIP ka advert. period */
369 struct mac_s mac; /* FCF mac */
370};
371
350/** 372/**
351 * Link state information 373 * Link state information
352 */ 374 */
@@ -378,6 +400,7 @@ struct bfa_pport_link_s {
378 struct fc_alpabm_s alpabm; /* alpa bitmap */ 400 struct fc_alpabm_s alpabm; /* alpa bitmap */
379 } loop_info; 401 } loop_info;
380 } tl; 402 } tl;
403 struct bfa_fcport_fcf_s fcf; /*!< FCF information (for FCoE) */
381}; 404};
382 405
383#endif /* __BFA_DEFS_PPORT_H__ */ 406#endif /* __BFA_DEFS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
index cdceaeb9f4b8..4374494bd566 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
@@ -180,8 +180,8 @@ enum bfa_status {
180 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part 180 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part
181 * of another team */ 181 * of another team */
182 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured. 182 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured.
183 * Delete all VLANs before 183 * Delete all VLANs to become
184 * creating team */ 184 * part of the team */
185 BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured 185 BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured
186 * for adapters */ 186 * for adapters */
187 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds 187 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds
@@ -213,7 +213,7 @@ enum bfa_status {
213 * loaded */ 213 * loaded */
214 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */ 214 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */
215 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */ 215 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */
216 BFA_STATUS_NO_DRIVER = 133, /* Storage/Ethernet driver not loaded */ 216 BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed or loaded */
217 BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */ 217 BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */
218 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */ 218 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */
219 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */ 219 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */
@@ -228,8 +228,7 @@ enum bfa_status {
228 BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem 228 BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem
229 * handle Failed. Please try 229 * handle Failed. Please try
230 * after some time */ 230 * after some time */
231 BFA_STATUS_IM_NOT_BOUND = 143, /* Brocade 10G Ethernet Service is not 231 BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */
232 * Enabled on this port */
233 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient 232 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient
234 * permissions to execute the BCU 233 * permissions to execute the BCU
235 * application */ 234 * application */
@@ -242,6 +241,14 @@ enum bfa_status {
242 * failed */ 241 * failed */
243 BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation 242 BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation
244 * failed */ 243 * failed */
244 BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the
245 * team */
246 BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /* VLAN ID doesn't exists */
247 BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /* Teaming configuration doesn't
248 * exists */
249 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not
250 * allowed for the current
251 * Teaming mode */
245 BFA_STATUS_MAX_VAL /* Unknown error code */ 252 BFA_STATUS_MAX_VAL /* Unknown error code */
246}; 253};
247#define bfa_status_t enum bfa_status 254#define bfa_status_t enum bfa_status
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
index 31881d218515..ade763dbc8ce 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
@@ -25,7 +25,7 @@
25 * Temperature sensor status values 25 * Temperature sensor status values
26 */ 26 */
27enum bfa_tsensor_status { 27enum bfa_tsensor_status {
28 BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unkown status */ 28 BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unknown status */
29 BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */ 29 BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */
30 BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */ 30 BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */
31 BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */ 31 BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
index a6c70aee0aa3..52585d3dd891 100644
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
@@ -70,7 +70,6 @@ void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
70 */ 70 */
71void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv); 71void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
72 72
73void bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim_drv);
74void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv); 73void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv);
75 74
76#endif /* __BFAD_FCB_FCPIM_H__ */ 75#endif /* __BFAD_FCB_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
index 627669c65546..f2fd35fdee28 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
@@ -49,6 +49,7 @@ struct bfa_fcs_s {
49 struct bfa_trc_mod_s *trcmod; /* tracing module */ 49 struct bfa_trc_mod_s *trcmod; /* tracing module */
50 struct bfa_aen_s *aen; /* aen component */ 50 struct bfa_aen_s *aen; /* aen component */
51 bfa_boolean_t vf_enabled; /* VF mode is enabled */ 51 bfa_boolean_t vf_enabled; /* VF mode is enabled */
52 bfa_boolean_t fdmi_enabled; /*!< FDMI is enabled */
52 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ 53 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
53 u16 port_vfid; /* port default VF ID */ 54 u16 port_vfid; /* port default VF ID */
54 struct bfa_fcs_driver_info_s driver_info; 55 struct bfa_fcs_driver_info_s driver_info;
@@ -60,10 +61,12 @@ struct bfa_fcs_s {
60/* 61/*
61 * bfa fcs API functions 62 * bfa fcs API functions
62 */ 63 */
63void bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 64void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
64 bfa_boolean_t min_cfg); 65 bfa_boolean_t min_cfg);
66void bfa_fcs_init(struct bfa_fcs_s *fcs);
65void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 67void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
66 struct bfa_fcs_driver_info_s *driver_info); 68 struct bfa_fcs_driver_info_s *driver_info);
69void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
67void bfa_fcs_exit(struct bfa_fcs_s *fcs); 70void bfa_fcs_exit(struct bfa_fcs_s *fcs);
68void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); 71void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
69void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod); 72void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod);
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
index 4ffd2242d3de..08b79d5e46f3 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
@@ -75,7 +75,7 @@ struct bfa_fcs_fabric_s {
75 */ 75 */
76}; 76};
77 77
78#define bfa_fcs_fabric_npiv_capable(__f) (__f)->is_npiv 78#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
79#define bfa_fcs_fabric_is_switched(__f) \ 79#define bfa_fcs_fabric_is_switched(__f) \
80 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) 80 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
81 81
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
index b85cba884b96..ceaefd3060f4 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
@@ -34,14 +34,6 @@ struct bfa_fcs_s;
34struct bfa_fcs_fabric_s; 34struct bfa_fcs_fabric_s;
35 35
36/* 36/*
37* @todo : need to move to a global config file.
38 * Maximum Vports supported per physical port or vf.
39 */
40#define BFA_FCS_MAX_VPORTS_SUPP_CB 255
41#define BFA_FCS_MAX_VPORTS_SUPP_CT 191
42
43/*
44* @todo : need to move to a global config file.
45 * Maximum Rports supported per port (physical/logical). 37 * Maximum Rports supported per port (physical/logical).
46 */ 38 */
47#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */ 39#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
@@ -125,12 +117,12 @@ union bfa_fcs_port_topo_u {
125struct bfa_fcs_port_s { 117struct bfa_fcs_port_s {
126 struct list_head qe; /* used by port/vport */ 118 struct list_head qe; /* used by port/vport */
127 bfa_sm_t sm; /* state machine */ 119 bfa_sm_t sm; /* state machine */
128 struct bfa_fcs_fabric_s *fabric; /* parent fabric */ 120 struct bfa_fcs_fabric_s *fabric;/* parent fabric */
129 struct bfa_port_cfg_s port_cfg; /* port configuration */ 121 struct bfa_port_cfg_s port_cfg;/* port configuration */
130 struct bfa_timer_s link_timer; /* timer for link offline */ 122 struct bfa_timer_s link_timer; /* timer for link offline */
131 u32 pid : 24; /* FC address */ 123 u32 pid:24; /* FC address */
132 u8 lp_tag; /* lport tag */ 124 u8 lp_tag; /* lport tag */
133 u16 num_rports; /* Num of r-ports */ 125 u16 num_rports; /* Num of r-ports */
134 struct list_head rport_q; /* queue of discovered r-ports */ 126 struct list_head rport_q; /* queue of discovered r-ports */
135 struct bfa_fcs_s *fcs; /* FCS instance */ 127 struct bfa_fcs_s *fcs; /* FCS instance */
136 union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */ 128 union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */
@@ -188,13 +180,14 @@ bfa_fcs_port_get_drvport(struct bfa_fcs_port_s *port)
188} 180}
189 181
190 182
191#define bfa_fcs_port_get_opertype(_lport) (_lport)->fabric->oper_type 183#define bfa_fcs_port_get_opertype(_lport) ((_lport)->fabric->oper_type)
192 184
193 185
194#define bfa_fcs_port_get_fabric_name(_lport) (_lport)->fabric->fabric_name 186#define bfa_fcs_port_get_fabric_name(_lport) ((_lport)->fabric->fabric_name)
195 187
196 188
197#define bfa_fcs_port_get_fabric_ipaddr(_lport) (_lport)->fabric->fabric_ip_addr 189#define bfa_fcs_port_get_fabric_ipaddr(_lport) \
190 ((_lport)->fabric->fabric_ip_addr)
198 191
199/** 192/**
200 * bfa fcs port public functions 193 * bfa fcs port public functions
diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h
index 0412aea2ec30..5f8f5e30b9e8 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_hal.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_hal.h
@@ -27,4 +27,10 @@
27 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3) 27 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3)
28#define BFA_LOG_HAL_SM_ASSERT \ 28#define BFA_LOG_HAL_SM_ASSERT \
29 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4) 29 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4)
30#define BFA_LOG_HAL_DRIVER_ERROR \
31 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5)
32#define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \
33 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6)
34#define BFA_LOG_HAL_MBOX_ERROR \
35 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7)
30#endif 36#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h
index 317c0547ee16..bd451db4c30a 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_linux.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_linux.h
@@ -41,4 +41,20 @@
41 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10) 41 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10)
42#define BFA_LOG_LINUX_SCSI_ABORT_COMP \ 42#define BFA_LOG_LINUX_SCSI_ABORT_COMP \
43 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11) 43 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11)
44#define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \
45 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12)
46#define BFA_LOG_LINUX_BNA_STATE_MACHINE \
47 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13)
48#define BFA_LOG_LINUX_IOC_ERROR \
49 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14)
50#define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \
51 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15)
52#define BFA_LOG_LINUX_RING_BUFFER_ERROR \
53 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16)
54#define BFA_LOG_LINUX_DRIVER_ERROR \
55 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17)
56#define BFA_LOG_LINUX_DRIVER_DIAG \
57 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18)
58#define BFA_LOG_LINUX_DRIVER_AEN \
59 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19)
44#endif 60#endif
diff --git a/drivers/scsi/bfa/include/protocol/ct.h b/drivers/scsi/bfa/include/protocol/ct.h
index c59d6630b070..b82540a230c4 100644
--- a/drivers/scsi/bfa/include/protocol/ct.h
+++ b/drivers/scsi/bfa/include/protocol/ct.h
@@ -82,7 +82,7 @@ enum {
82}; 82};
83 83
84/* 84/*
85 * defintions for CT reason code 85 * definitions for CT reason code
86 */ 86 */
87enum { 87enum {
88 CT_RSN_INV_CMD = 0x01, 88 CT_RSN_INV_CMD = 0x01,
@@ -129,7 +129,7 @@ enum {
129}; 129};
130 130
131/* 131/*
132 * defintions for the explanation code for all servers 132 * definitions for the explanation code for all servers
133 */ 133 */
134enum { 134enum {
135 CT_EXP_AUTH_EXCEPTION = 0xF1, 135 CT_EXP_AUTH_EXCEPTION = 0xF1,
@@ -193,11 +193,11 @@ struct fcgs_rftid_req_s {
193#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 193#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01
194 194
195struct fcgs_rffid_req_s{ 195struct fcgs_rffid_req_s{
196 u32 rsvd :8; 196 u32 rsvd:8;
197 u32 dap :24; /* port identifier */ 197 u32 dap:24; /* port identifier */
198 u32 rsvd1 :16; 198 u32 rsvd1:16;
199 u32 fc4ftr_bits :8; /* fc4 feature bits */ 199 u32 fc4ftr_bits:8; /* fc4 feature bits */
200 u32 fc4_type :8; /* corresponding FC4 Type */ 200 u32 fc4_type:8; /* corresponding FC4 Type */
201}; 201};
202 202
203/** 203/**
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h
index 3e39ba58cfb5..8d1038035a76 100644
--- a/drivers/scsi/bfa/include/protocol/fc.h
+++ b/drivers/scsi/bfa/include/protocol/fc.h
@@ -50,6 +50,11 @@ struct fchs_s {
50 50
51 u32 ro; /* relative offset */ 51 u32 ro; /* relative offset */
52}; 52};
53
54#define FC_SOF_LEN 4
55#define FC_EOF_LEN 4
56#define FC_CRC_LEN 4
57
53/* 58/*
54 * Fibre Channel BB_E Header Structure 59 * Fibre Channel BB_E Header Structure
55 */ 60 */
@@ -486,14 +491,14 @@ struct fc_rsi_s {
486 * see FC-PH-X table 113 & 115 for explanation also FCP table 8 491 * see FC-PH-X table 113 & 115 for explanation also FCP table 8
487 */ 492 */
488struct fc_prli_params_s{ 493struct fc_prli_params_s{
489 u32 reserved: 16; 494 u32 reserved:16;
490#ifdef __BIGENDIAN 495#ifdef __BIGENDIAN
491 u32 reserved1: 5; 496 u32 reserved1:5;
492 u32 rec_support : 1; 497 u32 rec_support:1;
493 u32 task_retry_id : 1; 498 u32 task_retry_id:1;
494 u32 retry : 1; 499 u32 retry:1;
495 500
496 u32 confirm : 1; 501 u32 confirm:1;
497 u32 doverlay:1; 502 u32 doverlay:1;
498 u32 initiator:1; 503 u32 initiator:1;
499 u32 target:1; 504 u32 target:1;
@@ -502,10 +507,10 @@ struct fc_prli_params_s{
502 u32 rxrdisab:1; 507 u32 rxrdisab:1;
503 u32 wxrdisab:1; 508 u32 wxrdisab:1;
504#else 509#else
505 u32 retry : 1; 510 u32 retry:1;
506 u32 task_retry_id : 1; 511 u32 task_retry_id:1;
507 u32 rec_support : 1; 512 u32 rec_support:1;
508 u32 reserved1: 5; 513 u32 reserved1:5;
509 514
510 u32 wxrdisab:1; 515 u32 wxrdisab:1;
511 u32 rxrdisab:1; 516 u32 rxrdisab:1;
@@ -514,7 +519,7 @@ struct fc_prli_params_s{
514 u32 target:1; 519 u32 target:1;
515 u32 initiator:1; 520 u32 initiator:1;
516 u32 doverlay:1; 521 u32 doverlay:1;
517 u32 confirm : 1; 522 u32 confirm:1;
518#endif 523#endif
519}; 524};
520 525
diff --git a/drivers/scsi/bfa/include/protocol/pcifw.h b/drivers/scsi/bfa/include/protocol/pcifw.h
deleted file mode 100644
index 6830dc3ee58a..000000000000
--- a/drivers/scsi/bfa/include/protocol/pcifw.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * pcifw.h PCI FW related headers
20 */
21
22#ifndef __PCIFW_H__
23#define __PCIFW_H__
24
25#pragma pack(1)
26
27struct pnp_hdr_s{
28 u32 signature; /* "$PnP" */
29 u8 rev; /* Struct revision */
30 u8 len; /* Header structure len in multiples
31 * of 16 bytes */
32 u16 off; /* Offset to next header 00 if none */
33 u8 rsvd; /* Reserved byte */
34 u8 cksum; /* 8-bit checksum for this header */
35 u32 pnp_dev_id; /* PnP Device Id */
36 u16 mfstr; /* Pointer to manufacturer string */
37 u16 prstr; /* Pointer to product string */
38 u8 devtype[3]; /* Device Type Code */
39 u8 devind; /* Device Indicator */
40 u16 bcventr; /* Bootstrap entry vector */
41 u16 rsvd2; /* Reserved */
42 u16 sriv; /* Static resource information vector */
43};
44
45struct pci_3_0_ds_s{
46 u32 sig; /* Signature "PCIR" */
47 u16 vendid; /* Vendor ID */
48 u16 devid; /* Device ID */
49 u16 devlistoff; /* Device List Offset */
50 u16 len; /* PCI Data Structure Length */
51 u8 rev; /* PCI Data Structure Revision */
52 u8 clcode[3]; /* Class Code */
53 u16 imglen; /* Code image length in multiples of
54 * 512 bytes */
55 u16 coderev; /* Revision level of code/data */
56 u8 codetype; /* Code type 0x00 - BIOS */
57 u8 indr; /* Last image indicator */
58 u16 mrtimglen; /* Max Run Time Image Length */
59 u16 cuoff; /* Config Utility Code Header Offset */
60 u16 dmtfclp; /* DMTF CLP entry point offset */
61};
62
63struct pci_optrom_hdr_s{
64 u16 sig; /* Signature 0x55AA */
65 u8 len; /* Option ROM length in units of 512 bytes */
66 u8 inivec[3]; /* Initialization vector */
67 u8 rsvd[16]; /* Reserved field */
68 u16 verptr; /* Pointer to version string - private */
69 u16 pcids; /* Pointer to PCI data structure */
70 u16 pnphdr; /* Pointer to PnP expansion header */
71};
72
73#pragma pack()
74
75#endif
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c
index a418dedebe9e..f6342efb6a90 100644
--- a/drivers/scsi/bfa/loop.c
+++ b/drivers/scsi/bfa/loop.c
@@ -58,49 +58,16 @@ static const u8 port_loop_alpa_map[] = {
58/* 58/*
59 * Local Functions 59 * Local Functions
60 */ 60 */
61bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, 61static bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port,
62 u8 alpa); 62 u8 alpa);
63 63
64void bfa_fcs_port_loop_plogi_response(void *fcsarg, 64static void bfa_fcs_port_loop_plogi_response(void *fcsarg,
65 struct bfa_fcxp_s *fcxp, 65 struct bfa_fcxp_s *fcxp,
66 void *cbarg, 66 void *cbarg,
67 bfa_status_t req_status, 67 bfa_status_t req_status,
68 u32 rsp_len, 68 u32 rsp_len,
69 u32 resid_len, 69 u32 resid_len,
70 struct fchs_s *rsp_fchs); 70 struct fchs_s *rsp_fchs);
71
72bfa_status_t bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port,
73 u8 alpa);
74
75void bfa_fcs_port_loop_adisc_response(void *fcsarg,
76 struct bfa_fcxp_s *fcxp,
77 void *cbarg,
78 bfa_status_t req_status,
79 u32 rsp_len,
80 u32 resid_len,
81 struct fchs_s *rsp_fchs);
82
83bfa_status_t bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port,
84 u8 alpa);
85
86void bfa_fcs_port_loop_plogi_acc_response(void *fcsarg,
87 struct bfa_fcxp_s *fcxp,
88 void *cbarg,
89 bfa_status_t req_status,
90 u32 rsp_len,
91 u32 resid_len,
92 struct fchs_s *rsp_fchs);
93
94bfa_status_t bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port,
95 u8 alpa);
96
97void bfa_fcs_port_loop_adisc_acc_response(void *fcsarg,
98 struct bfa_fcxp_s *fcxp,
99 void *cbarg,
100 bfa_status_t req_status,
101 u32 rsp_len,
102 u32 resid_len,
103 struct fchs_s *rsp_fchs);
104/** 71/**
105 * Called by port to initializar in provate LOOP topology. 72 * Called by port to initializar in provate LOOP topology.
106 */ 73 */
@@ -179,7 +146,7 @@ bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port)
179/** 146/**
180 * Local Functions. 147 * Local Functions.
181 */ 148 */
182bfa_status_t 149static bfa_status_t
183bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa) 150bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
184{ 151{
185 struct fchs_s fchs; 152 struct fchs_s fchs;
@@ -195,7 +162,7 @@ bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
195 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, 162 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
196 bfa_fcs_port_get_fcid(port), 0, 163 bfa_fcs_port_get_fcid(port), 0,
197 port->port_cfg.pwwn, port->port_cfg.nwwn, 164 port->port_cfg.pwwn, port->port_cfg.nwwn,
198 bfa_pport_get_maxfrsize(port->fcs->bfa)); 165 bfa_fcport_get_maxfrsize(port->fcs->bfa));
199 166
200 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 167 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
201 FC_CLASS_3, len, &fchs, 168 FC_CLASS_3, len, &fchs,
@@ -208,7 +175,7 @@ bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
208/** 175/**
209 * Called by fcxp to notify the Plogi response 176 * Called by fcxp to notify the Plogi response
210 */ 177 */
211void 178static void
212bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, 179bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
213 void *cbarg, bfa_status_t req_status, 180 void *cbarg, bfa_status_t req_status,
214 u32 rsp_len, u32 resid_len, 181 u32 rsp_len, u32 resid_len,
@@ -244,179 +211,3 @@ bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
244 bfa_assert(0); 211 bfa_assert(0);
245 } 212 }
246} 213}
247
248bfa_status_t
249bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port, u8 alpa)
250{
251 struct fchs_s fchs;
252 struct bfa_fcxp_s *fcxp;
253 int len;
254
255 bfa_trc(port->fcs, alpa);
256
257 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
258 NULL);
259 bfa_assert(fcxp);
260
261 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
262 bfa_fcs_port_get_fcid(port), 0,
263 port->port_cfg.pwwn, port->port_cfg.nwwn,
264 bfa_pport_get_maxfrsize(port->fcs->bfa));
265
266 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
267 FC_CLASS_3, len, &fchs,
268 bfa_fcs_port_loop_plogi_acc_response,
269 (void *)port, FC_MAX_PDUSZ, 0); /* No response
270 * expected
271 */
272
273 return BFA_STATUS_OK;
274}
275
276/*
277 * Plogi Acc Response
278 * We donot do any processing here.
279 */
280void
281bfa_fcs_port_loop_plogi_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
282 void *cbarg, bfa_status_t req_status,
283 u32 rsp_len, u32 resid_len,
284 struct fchs_s *rsp_fchs)
285{
286
287 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
288
289 bfa_trc(port->fcs, port->pid);
290
291 /*
292 * Sanity Checks
293 */
294 if (req_status != BFA_STATUS_OK) {
295 bfa_trc(port->fcs, req_status);
296 return;
297 }
298}
299
300bfa_status_t
301bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port, u8 alpa)
302{
303 struct fchs_s fchs;
304 struct bfa_fcxp_s *fcxp;
305 int len;
306
307 bfa_trc(port->fcs, alpa);
308
309 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
310 NULL);
311 bfa_assert(fcxp);
312
313 len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
314 bfa_fcs_port_get_fcid(port), 0,
315 port->port_cfg.pwwn, port->port_cfg.nwwn);
316
317 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
318 FC_CLASS_3, len, &fchs,
319 bfa_fcs_port_loop_adisc_response, (void *)port,
320 FC_MAX_PDUSZ, FC_RA_TOV);
321
322 return BFA_STATUS_OK;
323}
324
325/**
326 * Called by fcxp to notify the ADISC response
327 */
328void
329bfa_fcs_port_loop_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
330 void *cbarg, bfa_status_t req_status,
331 u32 rsp_len, u32 resid_len,
332 struct fchs_s *rsp_fchs)
333{
334 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
335 struct bfa_fcs_rport_s *rport;
336 struct fc_adisc_s *adisc_resp;
337 struct fc_els_cmd_s *els_cmd;
338 u32 pid = rsp_fchs->s_id;
339
340 bfa_trc(port->fcs, req_status);
341
342 /*
343 * Sanity Checks
344 */
345 if (req_status != BFA_STATUS_OK) {
346 /*
347 * TBD : we may need to retry certain requests
348 */
349 bfa_fcxp_free(fcxp);
350 return;
351 }
352
353 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
354 adisc_resp = (struct fc_adisc_s *) els_cmd;
355
356 if (els_cmd->els_code == FC_ELS_ACC) {
357 } else {
358 bfa_trc(port->fcs, adisc_resp->els_cmd.els_code);
359
360 /*
361 * TBD: we may need to check for reject codes and retry
362 */
363 rport = bfa_fcs_port_get_rport_by_pid(port, pid);
364 if (rport) {
365 list_del(&rport->qe);
366 bfa_fcs_rport_delete(rport);
367 }
368
369 }
370 return;
371}
372
373bfa_status_t
374bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port, u8 alpa)
375{
376 struct fchs_s fchs;
377 struct bfa_fcxp_s *fcxp;
378 int len;
379
380 bfa_trc(port->fcs, alpa);
381
382 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
383 NULL);
384 bfa_assert(fcxp);
385
386 len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
387 bfa_fcs_port_get_fcid(port), 0,
388 port->port_cfg.pwwn, port->port_cfg.nwwn);
389
390 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
391 FC_CLASS_3, len, &fchs,
392 bfa_fcs_port_loop_adisc_acc_response,
393 (void *)port, FC_MAX_PDUSZ, 0); /* no reponse
394 * expected
395 */
396
397 return BFA_STATUS_OK;
398}
399
400/*
401 * Adisc Acc Response
402 * We donot do any processing here.
403 */
404void
405bfa_fcs_port_loop_adisc_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
406 void *cbarg, bfa_status_t req_status,
407 u32 rsp_len, u32 resid_len,
408 struct fchs_s *rsp_fchs)
409{
410
411 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
412
413 bfa_trc(port->fcs, port->pid);
414
415 /*
416 * Sanity Checks
417 */
418 if (req_status != BFA_STATUS_OK) {
419 bfa_trc(port->fcs, req_status);
420 return;
421 }
422}
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
index 8f51a83f1834..d3907d184e2b 100644
--- a/drivers/scsi/bfa/lport_api.c
+++ b/drivers/scsi/bfa/lport_api.c
@@ -43,7 +43,7 @@ bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg)
43struct bfa_fcs_port_s * 43struct bfa_fcs_port_s *
44bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) 44bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
45{ 45{
46 return (&fcs->fabric.bport); 46 return &fcs->fabric.bport;
47} 47}
48 48
49wwn_t 49wwn_t
@@ -88,11 +88,10 @@ bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index,
88 } 88 }
89 89
90 bfa_trc(fcs, i); 90 bfa_trc(fcs, i);
91 if (rport) { 91 if (rport)
92 return rport->pwwn; 92 return rport->pwwn;
93 } else { 93 else
94 return (wwn_t) 0; 94 return (wwn_t) 0;
95 }
96} 95}
97 96
98void 97void
@@ -157,7 +156,7 @@ bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
157 /* 156 /*
158 * Get Physical port's current speed 157 * Get Physical port's current speed
159 */ 158 */
160 bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 159 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
161 pport_speed = pport_attr.speed; 160 pport_speed = pport_attr.speed;
162 bfa_trc(fcs, pport_speed); 161 bfa_trc(fcs, pport_speed);
163 162
@@ -198,17 +197,17 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
198 vf = bfa_fcs_vf_lookup(fcs, vf_id); 197 vf = bfa_fcs_vf_lookup(fcs, vf_id);
199 if (vf == NULL) { 198 if (vf == NULL) {
200 bfa_trc(fcs, vf_id); 199 bfa_trc(fcs, vf_id);
201 return (NULL); 200 return NULL;
202 } 201 }
203 202
204 if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn)) 203 if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
205 return (&vf->bport); 204 return &vf->bport;
206 205
207 vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn); 206 vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
208 if (vport) 207 if (vport)
209 return (&vport->lport); 208 return &vport->lport;
210 209
211 return (NULL); 210 return NULL;
212} 211}
213 212
214/* 213/*
@@ -236,7 +235,8 @@ bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
236 port_info->port_wwn = bfa_fcs_port_get_pwwn(port); 235 port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
237 port_info->node_wwn = bfa_fcs_port_get_nwwn(port); 236 port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
238 237
239 port_info->max_vports_supp = bfa_fcs_vport_get_max(port->fcs); 238 port_info->max_vports_supp =
239 bfa_lps_get_max_vport(port->fcs->bfa);
240 port_info->num_vports_inuse = 240 port_info->num_vports_inuse =
241 bfa_fcs_fabric_vport_count(port->fabric); 241 bfa_fcs_fabric_vport_count(port->fabric);
242 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; 242 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c
index c96b3ca007ae..5e8c8dee6c97 100644
--- a/drivers/scsi/bfa/ms.c
+++ b/drivers/scsi/bfa/ms.c
@@ -118,7 +118,7 @@ bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
118 break; 118 break;
119 119
120 default: 120 default:
121 bfa_assert(0); 121 bfa_sm_fault(ms->port->fcs, event);
122 } 122 }
123} 123}
124 124
@@ -141,7 +141,7 @@ bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
141 break; 141 break;
142 142
143 default: 143 default:
144 bfa_assert(0); 144 bfa_sm_fault(ms->port->fcs, event);
145 } 145 }
146} 146}
147 147
@@ -190,7 +190,7 @@ bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
190 break; 190 break;
191 191
192 default: 192 default:
193 bfa_assert(0); 193 bfa_sm_fault(ms->port->fcs, event);
194 } 194 }
195} 195}
196 196
@@ -216,7 +216,7 @@ bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
216 break; 216 break;
217 217
218 default: 218 default:
219 bfa_assert(0); 219 bfa_sm_fault(ms->port->fcs, event);
220 } 220 }
221} 221}
222 222
@@ -230,10 +230,6 @@ bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
230 switch (event) { 230 switch (event) {
231 case MSSM_EVENT_PORT_OFFLINE: 231 case MSSM_EVENT_PORT_OFFLINE:
232 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); 232 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
233 /*
234 * now invoke MS related sub-modules
235 */
236 bfa_fcs_port_fdmi_offline(ms);
237 break; 233 break;
238 234
239 case MSSM_EVENT_PORT_FABRIC_RSCN: 235 case MSSM_EVENT_PORT_FABRIC_RSCN:
@@ -243,7 +239,7 @@ bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
243 break; 239 break;
244 240
245 default: 241 default:
246 bfa_assert(0); 242 bfa_sm_fault(ms->port->fcs, event);
247 } 243 }
248} 244}
249 245
@@ -266,7 +262,7 @@ bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
266 break; 262 break;
267 263
268 default: 264 default:
269 bfa_assert(0); 265 bfa_sm_fault(ms->port->fcs, event);
270 } 266 }
271} 267}
272 268
@@ -304,7 +300,7 @@ bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
304 break; 300 break;
305 301
306 default: 302 default:
307 bfa_assert(0); 303 bfa_sm_fault(ms->port->fcs, event);
308 } 304 }
309} 305}
310 306
@@ -330,7 +326,7 @@ bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
330 break; 326 break;
331 327
332 default: 328 default:
333 bfa_assert(0); 329 bfa_sm_fault(ms->port->fcs, event);
334 } 330 }
335} 331}
336 332
@@ -466,7 +462,7 @@ bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
466 break; 462 break;
467 463
468 default: 464 default:
469 bfa_assert(0); 465 bfa_sm_fault(ms->port->fcs, event);
470 } 466 }
471} 467}
472 468
@@ -502,7 +498,7 @@ bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
502 break; 498 break;
503 499
504 default: 500 default:
505 bfa_assert(0); 501 bfa_sm_fault(ms->port->fcs, event);
506 } 502 }
507} 503}
508 504
@@ -528,7 +524,7 @@ bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
528 break; 524 break;
529 525
530 default: 526 default:
531 bfa_assert(0); 527 bfa_sm_fault(ms->port->fcs, event);
532 } 528 }
533} 529}
534 530
@@ -637,7 +633,7 @@ bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
637 bfa_os_hton3b(FC_MGMT_SERVER), 633 bfa_os_hton3b(FC_MGMT_SERVER),
638 bfa_fcs_port_get_fcid(port), 0, 634 bfa_fcs_port_get_fcid(port), 0,
639 port->port_cfg.pwwn, port->port_cfg.nwwn, 635 port->port_cfg.pwwn, port->port_cfg.nwwn,
640 bfa_pport_get_maxfrsize(port->fcs->bfa)); 636 bfa_fcport_get_maxfrsize(port->fcs->bfa));
641 637
642 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 638 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
643 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response, 639 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response,
@@ -735,6 +731,7 @@ bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port)
735 731
736 ms->port = port; 732 ms->port = port;
737 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE); 733 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
734 bfa_fcs_port_fdmi_offline(ms);
738} 735}
739 736
740void 737void
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
index 59fea99d67a4..d20dd7e15742 100644
--- a/drivers/scsi/bfa/ns.c
+++ b/drivers/scsi/bfa/ns.c
@@ -164,7 +164,7 @@ bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
164 break; 164 break;
165 165
166 default: 166 default:
167 bfa_assert(0); 167 bfa_sm_fault(ns->port->fcs, event);
168 } 168 }
169} 169}
170 170
@@ -187,7 +187,7 @@ bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
187 break; 187 break;
188 188
189 default: 189 default:
190 bfa_assert(0); 190 bfa_sm_fault(ns->port->fcs, event);
191 } 191 }
192} 192}
193 193
@@ -221,7 +221,7 @@ bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
221 break; 221 break;
222 222
223 default: 223 default:
224 bfa_assert(0); 224 bfa_sm_fault(ns->port->fcs, event);
225 } 225 }
226} 226}
227 227
@@ -247,7 +247,7 @@ bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
247 break; 247 break;
248 248
249 default: 249 default:
250 bfa_assert(0); 250 bfa_sm_fault(ns->port->fcs, event);
251 } 251 }
252} 252}
253 253
@@ -270,7 +270,7 @@ bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
270 break; 270 break;
271 271
272 default: 272 default:
273 bfa_assert(0); 273 bfa_sm_fault(ns->port->fcs, event);
274 } 274 }
275} 275}
276 276
@@ -304,7 +304,7 @@ bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
304 break; 304 break;
305 305
306 default: 306 default:
307 bfa_assert(0); 307 bfa_sm_fault(ns->port->fcs, event);
308 } 308 }
309} 309}
310 310
@@ -330,7 +330,7 @@ bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
330 break; 330 break;
331 331
332 default: 332 default:
333 bfa_assert(0); 333 bfa_sm_fault(ns->port->fcs, event);
334 } 334 }
335} 335}
336 336
@@ -353,7 +353,7 @@ bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
353 break; 353 break;
354 354
355 default: 355 default:
356 bfa_assert(0); 356 bfa_sm_fault(ns->port->fcs, event);
357 } 357 }
358} 358}
359 359
@@ -390,7 +390,7 @@ bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
390 break; 390 break;
391 391
392 default: 392 default:
393 bfa_assert(0); 393 bfa_sm_fault(ns->port->fcs, event);
394 } 394 }
395} 395}
396 396
@@ -413,7 +413,7 @@ bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
413 break; 413 break;
414 414
415 default: 415 default:
416 bfa_assert(0); 416 bfa_sm_fault(ns->port->fcs, event);
417 } 417 }
418} 418}
419 419
@@ -436,7 +436,7 @@ bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
436 break; 436 break;
437 437
438 default: 438 default:
439 bfa_assert(0); 439 bfa_sm_fault(ns->port->fcs, event);
440 } 440 }
441} 441}
442 442
@@ -494,7 +494,7 @@ bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
494 break; 494 break;
495 495
496 default: 496 default:
497 bfa_assert(0); 497 bfa_sm_fault(ns->port->fcs, event);
498 } 498 }
499} 499}
500 500
@@ -517,7 +517,7 @@ bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
517 break; 517 break;
518 518
519 default: 519 default:
520 bfa_assert(0); 520 bfa_sm_fault(ns->port->fcs, event);
521 } 521 }
522} 522}
523static void 523static void
@@ -539,7 +539,7 @@ bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
539 break; 539 break;
540 540
541 default: 541 default:
542 bfa_assert(0); 542 bfa_sm_fault(ns->port->fcs, event);
543 } 543 }
544} 544}
545 545
@@ -575,7 +575,7 @@ bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
575 break; 575 break;
576 576
577 default: 577 default:
578 bfa_assert(0); 578 bfa_sm_fault(ns->port->fcs, event);
579 } 579 }
580} 580}
581 581
@@ -598,7 +598,7 @@ bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
598 break; 598 break;
599 599
600 default: 600 default:
601 bfa_assert(0); 601 bfa_sm_fault(ns->port->fcs, event);
602 } 602 }
603} 603}
604 604
@@ -626,7 +626,7 @@ bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
626 break; 626 break;
627 627
628 default: 628 default:
629 bfa_assert(0); 629 bfa_sm_fault(ns->port->fcs, event);
630 } 630 }
631} 631}
632 632
@@ -660,7 +660,7 @@ bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
660 bfa_os_hton3b(FC_NAME_SERVER), 660 bfa_os_hton3b(FC_NAME_SERVER),
661 bfa_fcs_port_get_fcid(port), 0, 661 bfa_fcs_port_get_fcid(port), 0,
662 port->port_cfg.pwwn, port->port_cfg.nwwn, 662 port->port_cfg.pwwn, port->port_cfg.nwwn,
663 bfa_pport_get_maxfrsize(port->fcs->bfa)); 663 bfa_fcport_get_maxfrsize(port->fcs->bfa));
664 664
665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response, 666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
@@ -932,11 +932,10 @@ bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
932 } 932 }
933 ns->fcxp = fcxp; 933 ns->fcxp = fcxp;
934 934
935 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { 935 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
936 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR; 936 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
937 } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) { 937 else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port))
938 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET; 938 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET;
939 }
940 939
941 len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 940 len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
942 bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP, 941 bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP,
diff --git a/drivers/scsi/bfa/plog.c b/drivers/scsi/bfa/plog.c
index 86af818d17bb..fcb8864d3276 100644
--- a/drivers/scsi/bfa/plog.c
+++ b/drivers/scsi/bfa/plog.c
@@ -180,5 +180,5 @@ bfa_plog_disable(struct bfa_plog_s *plog)
180bfa_boolean_t 180bfa_boolean_t
181bfa_plog_get_setting(struct bfa_plog_s *plog) 181bfa_plog_get_setting(struct bfa_plog_s *plog)
182{ 182{
183 return((bfa_boolean_t)plog->plog_enabled); 183 return (bfa_boolean_t)plog->plog_enabled;
184} 184}
diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c
index 9cf58bb138dc..7b096f2e3836 100644
--- a/drivers/scsi/bfa/rport.c
+++ b/drivers/scsi/bfa/rport.c
@@ -19,6 +19,7 @@
19 * rport.c Remote port implementation. 19 * rport.c Remote port implementation.
20 */ 20 */
21 21
22#include <linux/slab.h>
22#include <bfa.h> 23#include <bfa.h>
23#include <bfa_svc.h> 24#include <bfa_svc.h>
24#include "fcbuild.h" 25#include "fcbuild.h"
@@ -224,7 +225,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
224 break; 225 break;
225 226
226 default: 227 default:
227 bfa_assert(0); 228 bfa_sm_fault(rport->fcs, event);
228 } 229 }
229} 230}
230 231
@@ -276,7 +277,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
276 break; 277 break;
277 278
278 default: 279 default:
279 bfa_assert(0); 280 bfa_sm_fault(rport->fcs, event);
280 } 281 }
281} 282}
282 283
@@ -332,7 +333,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
332 break; 333 break;
333 334
334 default: 335 default:
335 bfa_assert(0); 336 bfa_sm_fault(rport->fcs, event);
336 } 337 }
337} 338}
338 339
@@ -406,7 +407,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
406 break; 407 break;
407 408
408 default: 409 default:
409 bfa_assert(0); 410 bfa_sm_fault(rport->fcs, event);
410 } 411 }
411} 412}
412 413
@@ -481,7 +482,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
481 break; 482 break;
482 483
483 default: 484 default:
484 bfa_assert(0); 485 bfa_sm_fault(rport->fcs, event);
485 } 486 }
486} 487}
487 488
@@ -534,7 +535,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
534 break; 535 break;
535 536
536 default: 537 default:
537 bfa_assert(0); 538 bfa_sm_fault(rport->fcs, event);
538 } 539 }
539} 540}
540 541
@@ -589,7 +590,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
589 break; 590 break;
590 591
591 default: 592 default:
592 bfa_assert(0); 593 bfa_sm_fault(rport->fcs, event);
593 } 594 }
594} 595}
595 596
@@ -646,7 +647,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
646 break; 647 break;
647 648
648 default: 649 default:
649 bfa_assert(0); 650 bfa_sm_fault(rport->fcs, event);
650 } 651 }
651} 652}
652 653
@@ -704,7 +705,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
704 break; 705 break;
705 706
706 default: 707 default:
707 bfa_assert(0); 708 bfa_sm_fault(rport->fcs, event);
708 } 709 }
709} 710}
710 711
@@ -754,7 +755,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
754 break; 755 break;
755 756
756 default: 757 default:
757 bfa_assert(0); 758 bfa_sm_fault(rport->fcs, event);
758 } 759 }
759} 760}
760 761
@@ -816,7 +817,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
816 break; 817 break;
817 818
818 default: 819 default:
819 bfa_assert(0); 820 bfa_sm_fault(rport->fcs, event);
820 } 821 }
821} 822}
822 823
@@ -846,7 +847,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
846 break; 847 break;
847 848
848 default: 849 default:
849 bfa_assert(0); 850 bfa_sm_fault(rport->fcs, event);
850 } 851 }
851} 852}
852 853
@@ -869,7 +870,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
869 break; 870 break;
870 871
871 default: 872 default:
872 bfa_assert(0); 873 bfa_sm_fault(rport->fcs, event);
873 } 874 }
874} 875}
875 876
@@ -905,7 +906,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
905 break; 906 break;
906 907
907 default: 908 default:
908 bfa_assert(0); 909 bfa_sm_fault(rport->fcs, event);
909 } 910 }
910} 911}
911 912
@@ -925,10 +926,17 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
925 case RPSM_EVENT_HCB_OFFLINE: 926 case RPSM_EVENT_HCB_OFFLINE:
926 case RPSM_EVENT_ADDRESS_CHANGE: 927 case RPSM_EVENT_ADDRESS_CHANGE:
927 if (bfa_fcs_port_is_online(rport->port)) { 928 if (bfa_fcs_port_is_online(rport->port)) {
928 bfa_sm_set_state(rport, 929 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
929 bfa_fcs_rport_sm_nsdisc_sending); 930 bfa_sm_set_state(rport,
930 rport->ns_retries = 0; 931 bfa_fcs_rport_sm_nsdisc_sending);
931 bfa_fcs_rport_send_gidpn(rport, NULL); 932 rport->ns_retries = 0;
933 bfa_fcs_rport_send_gidpn(rport, NULL);
934 } else {
935 bfa_sm_set_state(rport,
936 bfa_fcs_rport_sm_plogi_sending);
937 rport->plogi_retries = 0;
938 bfa_fcs_rport_send_plogi(rport, NULL);
939 }
932 } else { 940 } else {
933 rport->pid = 0; 941 rport->pid = 0;
934 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); 942 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
@@ -951,7 +959,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
951 break; 959 break;
952 960
953 default: 961 default:
954 bfa_assert(0); 962 bfa_sm_fault(rport->fcs, event);
955 } 963 }
956} 964}
957 965
@@ -1011,7 +1019,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1011 break; 1019 break;
1012 1020
1013 default: 1021 default:
1014 bfa_assert(0); 1022 bfa_sm_fault(rport->fcs, event);
1015 } 1023 }
1016} 1024}
1017 1025
@@ -1038,7 +1046,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1038 break; 1046 break;
1039 1047
1040 default: 1048 default:
1041 bfa_assert(0); 1049 bfa_sm_fault(rport->fcs, event);
1042 } 1050 }
1043} 1051}
1044 1052
@@ -1073,7 +1081,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1073 break; 1081 break;
1074 1082
1075 default: 1083 default:
1076 bfa_assert(0); 1084 bfa_sm_fault(rport->fcs, event);
1077 } 1085 }
1078} 1086}
1079 1087
@@ -1132,7 +1140,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1132 break; 1140 break;
1133 1141
1134 default: 1142 default:
1135 bfa_assert(0); 1143 bfa_sm_fault(rport->fcs, event);
1136 } 1144 }
1137} 1145}
1138 1146
@@ -1188,7 +1196,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1188 break; 1196 break;
1189 1197
1190 default: 1198 default:
1191 bfa_assert(0); 1199 bfa_sm_fault(rport->fcs, event);
1192 } 1200 }
1193} 1201}
1194 1202
@@ -1249,7 +1257,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1249 break; 1257 break;
1250 1258
1251 default: 1259 default:
1252 bfa_assert(0); 1260 bfa_sm_fault(rport->fcs, event);
1253 } 1261 }
1254} 1262}
1255 1263
@@ -1334,7 +1342,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1334 break; 1342 break;
1335 1343
1336 default: 1344 default:
1337 bfa_assert(0); 1345 bfa_sm_fault(rport->fcs, event);
1338 } 1346 }
1339} 1347}
1340 1348
@@ -1366,7 +1374,7 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1366 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1374 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1367 bfa_fcs_port_get_fcid(port), 0, 1375 bfa_fcs_port_get_fcid(port), 0,
1368 port->port_cfg.pwwn, port->port_cfg.nwwn, 1376 port->port_cfg.pwwn, port->port_cfg.nwwn,
1369 bfa_pport_get_maxfrsize(port->fcs->bfa)); 1377 bfa_fcport_get_maxfrsize(port->fcs->bfa));
1370 1378
1371 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1379 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1372 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, 1380 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
@@ -1478,7 +1486,7 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1478 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1486 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1479 bfa_fcs_port_get_fcid(port), rport->reply_oxid, 1487 bfa_fcs_port_get_fcid(port), rport->reply_oxid,
1480 port->port_cfg.pwwn, port->port_cfg.nwwn, 1488 port->port_cfg.pwwn, port->port_cfg.nwwn,
1481 bfa_pport_get_maxfrsize(port->fcs->bfa)); 1489 bfa_fcport_get_maxfrsize(port->fcs->bfa));
1482 1490
1483 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1491 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1484 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1492 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
@@ -1813,7 +1821,7 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
1813 /* 1821 /*
1814 * get curent speed from pport attributes from BFA 1822 * get curent speed from pport attributes from BFA
1815 */ 1823 */
1816 bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 1824 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
1817 1825
1818 speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); 1826 speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
1819 1827
@@ -2032,13 +2040,10 @@ bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2032 2040
2033 switch (event) { 2041 switch (event) {
2034 case BFA_RPORT_AEN_ONLINE: 2042 case BFA_RPORT_AEN_ONLINE:
2035 bfa_log(logmod, BFA_AEN_RPORT_ONLINE, rpwwn_ptr, lpwwn_ptr);
2036 break;
2037 case BFA_RPORT_AEN_OFFLINE: 2043 case BFA_RPORT_AEN_OFFLINE:
2038 bfa_log(logmod, BFA_AEN_RPORT_OFFLINE, rpwwn_ptr, lpwwn_ptr);
2039 break;
2040 case BFA_RPORT_AEN_DISCONNECT: 2044 case BFA_RPORT_AEN_DISCONNECT:
2041 bfa_log(logmod, BFA_AEN_RPORT_DISCONNECT, rpwwn_ptr, lpwwn_ptr); 2045 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event),
2046 rpwwn_ptr, lpwwn_ptr);
2042 break; 2047 break;
2043 case BFA_RPORT_AEN_QOS_PRIO: 2048 case BFA_RPORT_AEN_QOS_PRIO:
2044 aen_data.rport.priv.qos = data->priv.qos; 2049 aen_data.rport.priv.qos = data->priv.qos;
@@ -2164,7 +2169,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2164 bfa_trc(port->fcs, port->fabric->bb_credit); 2169 bfa_trc(port->fcs, port->fabric->bb_credit);
2165 2170
2166 port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); 2171 port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
2167 bfa_pport_set_tx_bbcredit(port->fcs->bfa, 2172 bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
2168 port->fabric->bb_credit); 2173 port->fabric->bb_credit);
2169 } 2174 }
2170 2175
@@ -2575,23 +2580,6 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2575} 2580}
2576 2581
2577/** 2582/**
2578 * Module initialization
2579 */
2580void
2581bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs)
2582{
2583}
2584
2585/**
2586 * Module cleanup
2587 */
2588void
2589bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs)
2590{
2591 bfa_fcs_modexit_comp(fcs);
2592}
2593
2594/**
2595 * Return state of rport. 2583 * Return state of rport.
2596 */ 2584 */
2597int 2585int
diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c
index 3dae1774181e..a441f41d2a64 100644
--- a/drivers/scsi/bfa/rport_api.c
+++ b/drivers/scsi/bfa/rport_api.c
@@ -102,7 +102,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
102 rport_attr->qos_attr = qos_attr; 102 rport_attr->qos_attr = qos_attr;
103 103
104 rport_attr->trl_enforced = BFA_FALSE; 104 rport_attr->trl_enforced = BFA_FALSE;
105 if (bfa_pport_is_ratelim(port->fcs->bfa)) { 105 if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
106 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) || 106 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) ||
107 (rport->rpf.rpsc_speed < 107 (rport->rpf.rpsc_speed <
108 bfa_fcs_port_get_rport_max_speed(port))) 108 bfa_fcs_port_get_rport_max_speed(port)))
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
index 8a1f59d596c1..ae7bba67ae2a 100644
--- a/drivers/scsi/bfa/rport_ftrs.c
+++ b/drivers/scsi/bfa/rport_ftrs.c
@@ -79,7 +79,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
79 bfa_trc(rport->fcs, event); 79 bfa_trc(rport->fcs, event);
80 80
81 switch (event) { 81 switch (event) {
82 case RPFSM_EVENT_RPORT_ONLINE : 82 case RPFSM_EVENT_RPORT_ONLINE:
83 if (!BFA_FCS_PID_IS_WKA(rport->pid)) { 83 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
84 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); 84 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
85 rpf->rpsc_retries = 0; 85 rpf->rpsc_retries = 0;
@@ -87,11 +87,11 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
87 break; 87 break;
88 }; 88 };
89 89
90 case RPFSM_EVENT_RPORT_OFFLINE : 90 case RPFSM_EVENT_RPORT_OFFLINE:
91 break; 91 break;
92 92
93 default: 93 default:
94 bfa_assert(0); 94 bfa_sm_fault(rport->fcs, event);
95 } 95 }
96} 96}
97 97
@@ -107,14 +107,14 @@ bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
107 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc); 107 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
108 break; 108 break;
109 109
110 case RPFSM_EVENT_RPORT_OFFLINE : 110 case RPFSM_EVENT_RPORT_OFFLINE:
111 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); 111 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
112 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe); 112 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
113 rpf->rpsc_retries = 0; 113 rpf->rpsc_retries = 0;
114 break; 114 break;
115 115
116 default: 116 default:
117 bfa_assert(0); 117 bfa_sm_fault(rport->fcs, event);
118 } 118 }
119} 119}
120 120
@@ -130,11 +130,10 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
130 case RPFSM_EVENT_RPSC_COMP: 130 case RPFSM_EVENT_RPSC_COMP:
131 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); 131 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
132 /* Update speed info in f/w via BFA */ 132 /* Update speed info in f/w via BFA */
133 if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) { 133 if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN)
134 bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); 134 bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
135 } else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) { 135 else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN)
136 bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); 136 bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
137 }
138 break; 137 break;
139 138
140 case RPFSM_EVENT_RPSC_FAIL: 139 case RPFSM_EVENT_RPSC_FAIL:
@@ -154,14 +153,14 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
154 } 153 }
155 break; 154 break;
156 155
157 case RPFSM_EVENT_RPORT_OFFLINE : 156 case RPFSM_EVENT_RPORT_OFFLINE:
158 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); 157 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
159 bfa_fcxp_discard(rpf->fcxp); 158 bfa_fcxp_discard(rpf->fcxp);
160 rpf->rpsc_retries = 0; 159 rpf->rpsc_retries = 0;
161 break; 160 break;
162 161
163 default: 162 default:
164 bfa_assert(0); 163 bfa_sm_fault(rport->fcs, event);
165 } 164 }
166} 165}
167 166
@@ -174,20 +173,20 @@ bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
174 bfa_trc(rport->fcs, event); 173 bfa_trc(rport->fcs, event);
175 174
176 switch (event) { 175 switch (event) {
177 case RPFSM_EVENT_TIMEOUT : 176 case RPFSM_EVENT_TIMEOUT:
178 /* re-send the RPSC */ 177 /* re-send the RPSC */
179 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); 178 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
180 bfa_fcs_rpf_send_rpsc2(rpf, NULL); 179 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
181 break; 180 break;
182 181
183 case RPFSM_EVENT_RPORT_OFFLINE : 182 case RPFSM_EVENT_RPORT_OFFLINE:
184 bfa_timer_stop(&rpf->timer); 183 bfa_timer_stop(&rpf->timer);
185 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); 184 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
186 rpf->rpsc_retries = 0; 185 rpf->rpsc_retries = 0;
187 break; 186 break;
188 187
189 default: 188 default:
190 bfa_assert(0); 189 bfa_sm_fault(rport->fcs, event);
191 } 190 }
192} 191}
193 192
@@ -201,13 +200,13 @@ bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
201 bfa_trc(rport->fcs, event); 200 bfa_trc(rport->fcs, event);
202 201
203 switch (event) { 202 switch (event) {
204 case RPFSM_EVENT_RPORT_OFFLINE : 203 case RPFSM_EVENT_RPORT_OFFLINE:
205 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); 204 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
206 rpf->rpsc_retries = 0; 205 rpf->rpsc_retries = 0;
207 break; 206 break;
208 207
209 default: 208 default:
210 bfa_assert(0); 209 bfa_sm_fault(rport->fcs, event);
211 } 210 }
212} 211}
213 212
@@ -221,16 +220,16 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
221 bfa_trc(rport->fcs, event); 220 bfa_trc(rport->fcs, event);
222 221
223 switch (event) { 222 switch (event) {
224 case RPFSM_EVENT_RPORT_ONLINE : 223 case RPFSM_EVENT_RPORT_ONLINE:
225 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); 224 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
226 bfa_fcs_rpf_send_rpsc2(rpf, NULL); 225 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
227 break; 226 break;
228 227
229 case RPFSM_EVENT_RPORT_OFFLINE : 228 case RPFSM_EVENT_RPORT_OFFLINE:
230 break; 229 break;
231 230
232 default: 231 default:
233 bfa_assert(0); 232 bfa_sm_fault(rport->fcs, event);
234 } 233 }
235} 234}
236/** 235/**
@@ -366,10 +365,9 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
366 bfa_trc(rport->fcs, ls_rjt->reason_code); 365 bfa_trc(rport->fcs, ls_rjt->reason_code);
367 bfa_trc(rport->fcs, ls_rjt->reason_code_expl); 366 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
368 rport->stats.rpsc_rejects++; 367 rport->stats.rpsc_rejects++;
369 if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) { 368 if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
370 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); 369 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
371 } else { 370 else
372 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); 371 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
373 }
374 } 372 }
375} 373}
diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c
index bd4771ff62c8..8fe09ba88a91 100644
--- a/drivers/scsi/bfa/scn.c
+++ b/drivers/scsi/bfa/scn.c
@@ -90,7 +90,7 @@ bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
90 break; 90 break;
91 91
92 default: 92 default:
93 bfa_assert(0); 93 bfa_sm_fault(scn->port->fcs, event);
94 } 94 }
95} 95}
96 96
@@ -109,7 +109,7 @@ bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
109 break; 109 break;
110 110
111 default: 111 default:
112 bfa_assert(0); 112 bfa_sm_fault(scn->port->fcs, event);
113 } 113 }
114} 114}
115 115
@@ -137,7 +137,7 @@ bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
137 break; 137 break;
138 138
139 default: 139 default:
140 bfa_assert(0); 140 bfa_sm_fault(scn->port->fcs, event);
141 } 141 }
142} 142}
143 143
@@ -157,7 +157,7 @@ bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
157 break; 157 break;
158 158
159 default: 159 default:
160 bfa_assert(0); 160 bfa_sm_fault(scn->port->fcs, event);
161 } 161 }
162} 162}
163 163
@@ -171,7 +171,7 @@ bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
171 break; 171 break;
172 172
173 default: 173 default:
174 bfa_assert(0); 174 bfa_sm_fault(scn->port->fcs, event);
175 } 175 }
176} 176}
177 177
diff --git a/drivers/scsi/bfa/vfapi.c b/drivers/scsi/bfa/vfapi.c
index 31d81fe2fc48..391a4790bebd 100644
--- a/drivers/scsi/bfa/vfapi.c
+++ b/drivers/scsi/bfa/vfapi.c
@@ -189,7 +189,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
189{ 189{
190 bfa_trc(fcs, vf_id); 190 bfa_trc(fcs, vf_id);
191 if (vf_id == FC_VF_ID_NULL) 191 if (vf_id == FC_VF_ID_NULL)
192 return (&fcs->fabric); 192 return &fcs->fabric;
193 193
194 /** 194 /**
195 * @todo vf support 195 * @todo vf support
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
index c10af06c5714..27cd619a227a 100644
--- a/drivers/scsi/bfa/vport.c
+++ b/drivers/scsi/bfa/vport.c
@@ -31,13 +31,13 @@
31 31
32BFA_TRC_FILE(FCS, VPORT); 32BFA_TRC_FILE(FCS, VPORT);
33 33
34#define __vport_fcs(__vp) (__vp)->lport.fcs 34#define __vport_fcs(__vp) ((__vp)->lport.fcs)
35#define __vport_pwwn(__vp) (__vp)->lport.port_cfg.pwwn 35#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn)
36#define __vport_nwwn(__vp) (__vp)->lport.port_cfg.nwwn 36#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn)
37#define __vport_bfa(__vp) (__vp)->lport.fcs->bfa 37#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa)
38#define __vport_fcid(__vp) (__vp)->lport.pid 38#define __vport_fcid(__vp) ((__vp)->lport.pid)
39#define __vport_fabric(__vp) (__vp)->lport.fabric 39#define __vport_fabric(__vp) ((__vp)->lport.fabric)
40#define __vport_vfid(__vp) (__vp)->lport.fabric->vf_id 40#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id)
41 41
42#define BFA_FCS_VPORT_MAX_RETRIES 5 42#define BFA_FCS_VPORT_MAX_RETRIES 5
43/* 43/*
@@ -122,7 +122,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
122 break; 122 break;
123 123
124 default: 124 default:
125 bfa_assert(0); 125 bfa_sm_fault(__vport_fcs(vport), event);
126 } 126 }
127} 127}
128 128
@@ -165,7 +165,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
165 break; 165 break;
166 166
167 default: 167 default:
168 bfa_assert(0); 168 bfa_sm_fault(__vport_fcs(vport), event);
169 } 169 }
170} 170}
171 171
@@ -202,7 +202,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
202 break; 202 break;
203 203
204 default: 204 default:
205 bfa_assert(0); 205 bfa_sm_fault(__vport_fcs(vport), event);
206 } 206 }
207} 207}
208 208
@@ -249,7 +249,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
249 break; 249 break;
250 250
251 default: 251 default:
252 bfa_assert(0); 252 bfa_sm_fault(__vport_fcs(vport), event);
253 } 253 }
254} 254}
255 255
@@ -283,7 +283,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
283 break; 283 break;
284 284
285 default: 285 default:
286 bfa_assert(0); 286 bfa_sm_fault(__vport_fcs(vport), event);
287 } 287 }
288} 288}
289 289
@@ -310,7 +310,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
310 break; 310 break;
311 311
312 default: 312 default:
313 bfa_assert(0); 313 bfa_sm_fault(__vport_fcs(vport), event);
314 } 314 }
315} 315}
316 316
@@ -339,7 +339,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
339 break; 339 break;
340 340
341 default: 341 default:
342 bfa_assert(0); 342 bfa_sm_fault(__vport_fcs(vport), event);
343 } 343 }
344} 344}
345 345
@@ -387,7 +387,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
387 break; 387 break;
388 388
389 default: 389 default:
390 bfa_assert(0); 390 bfa_sm_fault(__vport_fcs(vport), event);
391 } 391 }
392} 392}
393 393
@@ -419,7 +419,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
419 break; 419 break;
420 420
421 default: 421 default:
422 bfa_assert(0); 422 bfa_sm_fault(__vport_fcs(vport), event);
423 } 423 }
424} 424}
425 425
@@ -447,22 +447,8 @@ bfa_fcs_vport_aen_post(bfa_fcs_lport_t *port, enum bfa_lport_aen_event event)
447 447
448 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); 448 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
449 449
450 switch (event) { 450 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
451 case BFA_LPORT_AEN_NPIV_DUP_WWN: 451 role_str[role/2]);
452 bfa_log(logmod, BFA_AEN_LPORT_NPIV_DUP_WWN, lpwwn_ptr,
453 role_str[role / 2]);
454 break;
455 case BFA_LPORT_AEN_NPIV_FABRIC_MAX:
456 bfa_log(logmod, BFA_AEN_LPORT_NPIV_FABRIC_MAX, lpwwn_ptr,
457 role_str[role / 2]);
458 break;
459 case BFA_LPORT_AEN_NPIV_UNKNOWN:
460 bfa_log(logmod, BFA_AEN_LPORT_NPIV_UNKNOWN, lpwwn_ptr,
461 role_str[role / 2]);
462 break;
463 default:
464 break;
465 }
466 452
467 aen_data.lport.vf_id = port->fabric->vf_id; 453 aen_data.lport.vf_id = port->fabric->vf_id;
468 aen_data.lport.roles = role; 454 aen_data.lport.roles = role;
@@ -478,7 +464,7 @@ static void
478bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) 464bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
479{ 465{
480 bfa_lps_fdisc(vport->lps, vport, 466 bfa_lps_fdisc(vport->lps, vport,
481 bfa_pport_get_maxfrsize(__vport_bfa(vport)), 467 bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
482 __vport_pwwn(vport), __vport_nwwn(vport)); 468 __vport_pwwn(vport), __vport_nwwn(vport));
483 vport->vport_stats.fdisc_sent++; 469 vport->vport_stats.fdisc_sent++;
484} 470}
@@ -617,38 +603,6 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
617} 603}
618 604
619/** 605/**
620 * Module initialization
621 */
622void
623bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs)
624{
625}
626
627/**
628 * Module cleanup
629 */
630void
631bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs)
632{
633 bfa_fcs_modexit_comp(fcs);
634}
635
636u32
637bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs)
638{
639 struct bfa_ioc_attr_s ioc_attr;
640
641 bfa_get_attr(fcs->bfa, &ioc_attr);
642
643 if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT)
644 return (BFA_FCS_MAX_VPORTS_SUPP_CT);
645 else
646 return (BFA_FCS_MAX_VPORTS_SUPP_CB);
647}
648
649
650
651/**
652 * fcs_vport_api Virtual port API 606 * fcs_vport_api Virtual port API
653 */ 607 */
654 608
@@ -675,7 +629,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
675 struct bfad_vport_s *vport_drv) 629 struct bfad_vport_s *vport_drv)
676{ 630{
677 if (vport_cfg->pwwn == 0) 631 if (vport_cfg->pwwn == 0)
678 return (BFA_STATUS_INVALID_WWN); 632 return BFA_STATUS_INVALID_WWN;
679 633
680 if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) 634 if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
681 return BFA_STATUS_VPORT_WWN_BP; 635 return BFA_STATUS_VPORT_WWN_BP;
@@ -684,7 +638,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
684 return BFA_STATUS_VPORT_EXISTS; 638 return BFA_STATUS_VPORT_EXISTS;
685 639
686 if (bfa_fcs_fabric_vport_count(&fcs->fabric) == 640 if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
687 bfa_fcs_vport_get_max(fcs)) 641 bfa_lps_get_max_vport(fcs->bfa))
688 return BFA_STATUS_VPORT_MAX; 642 return BFA_STATUS_VPORT_MAX;
689 643
690 vport->lps = bfa_lps_alloc(fcs->bfa); 644 vport->lps = bfa_lps_alloc(fcs->bfa);
@@ -694,7 +648,8 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
694 vport->vport_drv = vport_drv; 648 vport->vport_drv = vport_drv;
695 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); 649 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
696 650
697 bfa_fcs_lport_init(&vport->lport, fcs, vf_id, vport_cfg, vport); 651 bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
652 bfa_fcs_lport_init(&vport->lport, vport_cfg);
698 653
699 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); 654 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
700 655
@@ -888,4 +843,15 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
888 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 843 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
889} 844}
890 845
846/**
847 * Received clear virtual link
848 */
849void
850bfa_cb_lps_cvl_event(void *bfad, void *uarg)
851{
852 struct bfa_fcs_vport_s *vport = uarg;
891 853
854 /* Send an Offline followed by an ONLINE */
855 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
856 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
857}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 5edde1a8c04d..6b624e767d3b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -232,7 +232,6 @@ struct bnx2i_conn {
232 struct iscsi_cls_conn *cls_conn; 232 struct iscsi_cls_conn *cls_conn;
233 struct bnx2i_hba *hba; 233 struct bnx2i_hba *hba;
234 struct completion cmd_cleanup_cmpl; 234 struct completion cmd_cleanup_cmpl;
235 int is_bound;
236 235
237 u32 iscsi_conn_cid; 236 u32 iscsi_conn_cid;
238#define BNX2I_CID_RESERVED 0x5AFF 237#define BNX2I_CID_RESERVED 0x5AFF
@@ -363,6 +362,7 @@ struct bnx2i_hba {
363 u32 num_ccell; 362 u32 num_ccell;
364 363
365 int ofld_conns_active; 364 int ofld_conns_active;
365 wait_queue_head_t eh_wait;
366 366
367 int max_active_conns; 367 int max_active_conns;
368 struct iscsi_cid_queue cid_que; 368 struct iscsi_cid_queue cid_que;
@@ -382,6 +382,7 @@ struct bnx2i_hba {
382 spinlock_t lock; /* protects hba structure access */ 382 spinlock_t lock; /* protects hba structure access */
383 struct mutex net_dev_lock;/* sync net device access */ 383 struct mutex net_dev_lock;/* sync net device access */
384 384
385 int hba_shutdown_tmo;
385 /* 386 /*
386 * PCI related info. 387 * PCI related info.
387 */ 388 */
@@ -685,6 +686,7 @@ extern unsigned int error_mask1, error_mask2;
685extern u64 iscsi_error_mask; 686extern u64 iscsi_error_mask;
686extern unsigned int en_tcp_dack; 687extern unsigned int en_tcp_dack;
687extern unsigned int event_coal_div; 688extern unsigned int event_coal_div;
689extern unsigned int event_coal_min;
688 690
689extern struct scsi_transport_template *bnx2i_scsi_xport_template; 691extern struct scsi_transport_template *bnx2i_scsi_xport_template;
690extern struct iscsi_transport bnx2i_iscsi_transport; 692extern struct iscsi_transport bnx2i_iscsi_transport;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5c8d7630c13e..18352ff82101 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -11,6 +11,7 @@
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */ 12 */
13 13
14#include <linux/gfp.h>
14#include <scsi/scsi_tcq.h> 15#include <scsi/scsi_tcq.h>
15#include <scsi/libiscsi.h> 16#include <scsi/libiscsi.h>
16#include "bnx2i.h" 17#include "bnx2i.h"
@@ -133,20 +134,38 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{ 134{
134 struct bnx2i_5771x_cq_db *cq_db; 135 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index; 136 u16 cq_index;
137 u16 next_index;
138 u32 num_active_cmds;
136 139
140
141 /* Coalesce CQ entries only on 10G devices */
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 142 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return; 143 return;
139 144
145 /* Do not update CQ DB multiple times before firmware writes
146 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
147 * interrupts and other unwanted results
148 */
149 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
150 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
151 return;
152
140 if (action == CNIC_ARM_CQE) { 153 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn + 154 num_active_cmds = ep->num_active_cmds;
142 ep->num_active_cmds / event_coal_div; 155 if (num_active_cmds <= event_coal_min)
143 cq_index %= (ep->qp.cqe_size * 2 + 1); 156 next_index = 1;
144 if (!cq_index) { 157 else
158 next_index = event_coal_min +
159 (num_active_cmds - event_coal_min) / event_coal_div;
160 if (!next_index)
161 next_index = 1;
162 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
163 if (cq_index > ep->qp.cqe_size * 2)
164 cq_index -= ep->qp.cqe_size * 2;
165 if (!cq_index)
145 cq_index = 1; 166 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *) 167
147 ep->qp.cq_pgtbl_virt; 168 cq_db->sqn[0] = cq_index;
148 cq_db->sqn[0] = cq_index;
149 }
150 } 169 }
151} 170}
152 171
@@ -366,6 +385,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
366 struct bnx2i_cmd *bnx2i_cmd; 385 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe; 386 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword; 387 u32 dword;
388 u32 scsi_lun[2];
369 389
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; 390 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 391 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -376,27 +396,35 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
376 tmfabort_wqe->op_attr = 0; 396 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr = 397 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; 398 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381 399
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); 400 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0; 401 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); 402 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385 403
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); 404 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc) 405 if (!ctask || !ctask->sc)
388 /* 406 /*
389 * the iscsi layer must have completed the cmd while this 407 * the iscsi layer must have completed the cmd while this
390 * was starting up. 408 * was starting up.
409 *
410 * Note: In the case of a SCSI cmd timeout, the task's sc
411 * is still active; hence ctask->sc != 0
412 * In this case, the task must be aborted
391 */ 413 */
392 return 0; 414 return 0;
415
393 ref_sc = ctask->sc; 416 ref_sc = ctask->sc;
394 417
418 /* Retrieve LUN directly from the ref_sc */
419 int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun);
420 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
421 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
422
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE) 423 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 424 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else 425 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 426 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt); 427 tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); 428 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401 429
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 430 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 0c4210d48ee8..5d9296c599f6 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18 18
19#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.0.1e" 20#define DRV_MODULE_VERSION "2.1.0"
21#define DRV_MODULE_RELDATE "June 22, 2009" 21#define DRV_MODULE_RELDATE "Dec 06, 2009"
22 22
23static char version[] __devinitdata = 23static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -32,6 +32,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
32 32
33static DEFINE_MUTEX(bnx2i_dev_lock); 33static DEFINE_MUTEX(bnx2i_dev_lock);
34 34
35unsigned int event_coal_min = 24;
36module_param(event_coal_min, int, 0664);
37MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
38
35unsigned int event_coal_div = 1; 39unsigned int event_coal_div = 1;
36module_param(event_coal_div, int, 0664); 40module_param(event_coal_div, int, 0664);
37MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); 41MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
@@ -83,8 +87,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
83 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); 87 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
84 hba->mail_queue_access = BNX2I_MQ_BIN_MODE; 88 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
85 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || 89 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
86 hba->pci_did == PCI_DEVICE_ID_NX2_57711) 90 hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
91 hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
87 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); 92 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
93 else
94 printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
95 hba->pci_did);
88} 96}
89 97
90 98
@@ -169,11 +177,22 @@ void bnx2i_stop(void *handle)
169 struct bnx2i_hba *hba = handle; 177 struct bnx2i_hba *hba = handle;
170 178
171 /* check if cleanup happened in GOING_DOWN context */ 179 /* check if cleanup happened in GOING_DOWN context */
172 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
173 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, 180 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
174 &hba->adapter_state)) 181 &hba->adapter_state))
175 iscsi_host_for_each_session(hba->shost, 182 iscsi_host_for_each_session(hba->shost,
176 bnx2i_drop_session); 183 bnx2i_drop_session);
184
185 /* Wait for all endpoints to be torn down, Chip will be reset once
186 * control returns to network driver. So it is required to cleanup and
187 * release all connection resources before returning from this routine.
188 */
189 wait_event_interruptible_timeout(hba->eh_wait,
190 (hba->ofld_conns_active == 0),
191 hba->hba_shutdown_tmo);
192 /* This flag should be cleared last so that ep_disconnect() gracefully
193 * cleans up connection context
194 */
195 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
177} 196}
178 197
179/** 198/**
@@ -363,7 +382,7 @@ static int __init bnx2i_mod_init(void)
363 382
364 printk(KERN_INFO "%s", version); 383 printk(KERN_INFO "%s", version);
365 384
366 if (!is_power_of_2(sq_size)) 385 if (sq_size && !is_power_of_2(sq_size))
367 sq_size = roundup_pow_of_two(sq_size); 386 sq_size = roundup_pow_of_two(sq_size);
368 387
369 mutex_init(&bnx2i_dev_lock); 388 mutex_init(&bnx2i_dev_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index cafb888c2376..fa68ab34b998 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -12,6 +12,7 @@
12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
13 */ 13 */
14 14
15#include <linux/slab.h>
15#include <scsi/scsi_tcq.h> 16#include <scsi/scsi_tcq.h>
16#include <scsi/libiscsi.h> 17#include <scsi/libiscsi.h>
17#include "bnx2i.h" 18#include "bnx2i.h"
@@ -485,7 +486,6 @@ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
485 struct iscsi_task *task = session->cmds[i]; 486 struct iscsi_task *task = session->cmds[i];
486 struct bnx2i_cmd *cmd = task->dd_data; 487 struct bnx2i_cmd *cmd = task->dd_data;
487 488
488 /* Anil */
489 task->hdr = &cmd->hdr; 489 task->hdr = &cmd->hdr;
490 task->hdr_max = sizeof(struct iscsi_hdr); 490 task->hdr_max = sizeof(struct iscsi_hdr);
491 491
@@ -765,7 +765,6 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
765 hba->pci_svid = hba->pcidev->subsystem_vendor; 765 hba->pci_svid = hba->pcidev->subsystem_vendor;
766 hba->pci_func = PCI_FUNC(hba->pcidev->devfn); 766 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
767 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); 767 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
768 bnx2i_identify_device(hba);
769 768
770 bnx2i_identify_device(hba); 769 bnx2i_identify_device(hba);
771 bnx2i_setup_host_queue_size(hba, shost); 770 bnx2i_setup_host_queue_size(hba, shost);
@@ -821,6 +820,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
821 820
822 spin_lock_init(&hba->lock); 821 spin_lock_init(&hba->lock);
823 mutex_init(&hba->net_dev_lock); 822 mutex_init(&hba->net_dev_lock);
823 init_waitqueue_head(&hba->eh_wait);
824 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
825 hba->hba_shutdown_tmo = 240 * HZ;
826 else /* 5706/5708/5709 */
827 hba->hba_shutdown_tmo = 30 * HZ;
824 828
825 if (iscsi_host_add(shost, &hba->pcidev->dev)) 829 if (iscsi_host_add(shost, &hba->pcidev->dev))
826 goto free_dump_mem; 830 goto free_dump_mem;
@@ -1161,9 +1165,6 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
1161 struct bnx2i_cmd *cmd = task->dd_data; 1165 struct bnx2i_cmd *cmd = task->dd_data;
1162 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1166 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1163 1167
1164 if (!bnx2i_conn->is_bound)
1165 return -ENOTCONN;
1166
1167 /* 1168 /*
1168 * If there is no scsi_cmnd this must be a mgmt task 1169 * If there is no scsi_cmnd this must be a mgmt task
1169 */ 1170 */
@@ -1371,7 +1372,6 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1371 bnx2i_conn->ep = bnx2i_ep; 1372 bnx2i_conn->ep = bnx2i_ep;
1372 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; 1373 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1373 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; 1374 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1374 bnx2i_conn->is_bound = 1;
1375 1375
1376 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, 1376 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1377 bnx2i_ep->ep_iscsi_cid); 1377 bnx2i_ep->ep_iscsi_cid);
@@ -1432,8 +1432,8 @@ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
1432 break; 1432 break;
1433 case ISCSI_PARAM_CONN_ADDRESS: 1433 case ISCSI_PARAM_CONN_ADDRESS:
1434 if (bnx2i_conn->ep) 1434 if (bnx2i_conn->ep)
1435 len = sprintf(buf, NIPQUAD_FMT "\n", 1435 len = sprintf(buf, "%pI4\n",
1436 NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip)); 1436 &bnx2i_conn->ep->cm_sk->dst_ip);
1437 break; 1437 break;
1438 default: 1438 default:
1439 return iscsi_conn_get_param(cls_conn, param, buf); 1439 return iscsi_conn_get_param(cls_conn, param, buf);
@@ -1663,8 +1663,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1663 */ 1663 */
1664 hba = bnx2i_check_route(dst_addr); 1664 hba = bnx2i_check_route(dst_addr);
1665 1665
1666 if (!hba) { 1666 if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
1667 rc = -ENOMEM; 1667 rc = -EINVAL;
1668 goto check_busy; 1668 goto check_busy;
1669 } 1669 }
1670 1670
@@ -1809,7 +1809,7 @@ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1809 (bnx2i_ep->state == 1809 (bnx2i_ep->state ==
1810 EP_STATE_CONNECT_COMPL)), 1810 EP_STATE_CONNECT_COMPL)),
1811 msecs_to_jiffies(timeout_ms)); 1811 msecs_to_jiffies(timeout_ms));
1812 if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) 1812 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
1813 rc = -1; 1813 rc = -1;
1814 1814
1815 if (rc > 0) 1815 if (rc > 0)
@@ -1883,7 +1883,7 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1883 1883
1884 bnx2i_ep = ep->dd_data; 1884 bnx2i_ep = ep->dd_data;
1885 1885
1886 /* driver should not attempt connection cleanup untill TCP_CONNECT 1886 /* driver should not attempt connection cleanup until TCP_CONNECT
1887 * completes either successfully or fails. Timeout is 9-secs, so 1887 * completes either successfully or fails. Timeout is 9-secs, so
1888 * wait for it to complete 1888 * wait for it to complete
1889 */ 1889 */
@@ -1896,9 +1896,7 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1896 conn = bnx2i_conn->cls_conn->dd_data; 1896 conn = bnx2i_conn->cls_conn->dd_data;
1897 session = conn->session; 1897 session = conn->session;
1898 1898
1899 spin_lock_bh(&session->lock); 1899 iscsi_suspend_queue(conn);
1900 bnx2i_conn->is_bound = 0;
1901 spin_unlock_bh(&session->lock);
1902 } 1900 }
1903 1901
1904 hba = bnx2i_ep->hba; 1902 hba = bnx2i_ep->hba;
@@ -1964,6 +1962,8 @@ return_bnx2i_ep:
1964 1962
1965 if (!hba->ofld_conns_active) 1963 if (!hba->ofld_conns_active)
1966 bnx2i_unreg_dev_all(); 1964 bnx2i_unreg_dev_all();
1965
1966 wake_up_interruptible(&hba->eh_wait);
1967} 1967}
1968 1968
1969 1969
@@ -1997,7 +1997,8 @@ static struct scsi_host_template bnx2i_host_template = {
1997 .queuecommand = iscsi_queuecommand, 1997 .queuecommand = iscsi_queuecommand,
1998 .eh_abort_handler = iscsi_eh_abort, 1998 .eh_abort_handler = iscsi_eh_abort,
1999 .eh_device_reset_handler = iscsi_eh_device_reset, 1999 .eh_device_reset_handler = iscsi_eh_device_reset,
2000 .eh_target_reset_handler = iscsi_eh_target_reset, 2000 .eh_target_reset_handler = iscsi_eh_recover_target,
2001 .change_queue_depth = iscsi_change_queue_depth,
2001 .can_queue = 1024, 2002 .can_queue = 1024,
2002 .max_sectors = 127, 2003 .max_sectors = 127,
2003 .cmd_per_lun = 32, 2004 .cmd_per_lun = 32,
@@ -2034,7 +2035,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
2034 ISCSI_USERNAME | ISCSI_PASSWORD | 2035 ISCSI_USERNAME | ISCSI_PASSWORD |
2035 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 2036 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
2036 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 2037 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
2037 ISCSI_LU_RESET_TMO | 2038 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
2038 ISCSI_PING_TMO | ISCSI_RECV_TMO | 2039 ISCSI_PING_TMO | ISCSI_RECV_TMO |
2039 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, 2040 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
2040 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, 2041 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
index 5799cb5cba6b..d40ea2f5be10 100644
--- a/drivers/scsi/bvme6000_scsi.c
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -12,6 +12,7 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/slab.h>
15#include <asm/bvme6000hw.h> 16#include <asm/bvme6000hw.h>
16#include <scsi/scsi_host.h> 17#include <scsi/scsi_host.h>
17#include <scsi/scsi_device.h> 18#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index fe11c1d4b31d..4799d4391203 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -23,6 +23,7 @@
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
26#include <linux/slab.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 63abb06c4edb..cd05e049d5f6 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -141,6 +141,7 @@ static const struct value_name_pair serv_out12_arr[] = {
141static const struct value_name_pair serv_in16_arr[] = { 141static const struct value_name_pair serv_in16_arr[] = {
142 {0x10, "Read capacity(16)"}, 142 {0x10, "Read capacity(16)"},
143 {0x11, "Read long(16)"}, 143 {0x11, "Read long(16)"},
144 {0x12, "Get LBA status"},
144}; 145};
145#define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr) 146#define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr)
146 147
@@ -218,18 +219,15 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
218 break; 219 break;
219 } 220 }
220 sa = (cdbp[8] << 8) + cdbp[9]; 221 sa = (cdbp[8] << 8) + cdbp[9];
221 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); 222 name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, sa);
222 if (name) { 223 if (name)
223 printk("%s", name); 224 printk("%s", name);
224 if ((cdb_len > 0) && (len != cdb_len)) 225 else
225 printk(", in_cdb_len=%d, ext_len=%d",
226 len, cdb_len);
227 } else {
228 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); 226 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
229 if ((cdb_len > 0) && (len != cdb_len)) 227
230 printk(", in_cdb_len=%d, ext_len=%d", 228 if ((cdb_len > 0) && (len != cdb_len))
231 len, cdb_len); 229 printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
232 } 230
233 break; 231 break;
234 case MAINTENANCE_IN: 232 case MAINTENANCE_IN:
235 sa = cdbp[1] & 0x1f; 233 sa = cdbp[1] & 0x1f;
@@ -348,6 +346,9 @@ void scsi_print_command(struct scsi_cmnd *cmd)
348{ 346{
349 int k; 347 int k;
350 348
349 if (cmd->cmnd == NULL)
350 return;
351
351 scmd_printk(KERN_INFO, cmd, "CDB: "); 352 scmd_printk(KERN_INFO, cmd, "CDB: ");
352 print_opcode_name(cmd->cmnd, cmd->cmd_len); 353 print_opcode_name(cmd->cmnd, cmd->cmd_len);
353 354
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 344fd53b9954..b58d9134ac1b 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -10,6 +10,7 @@
10 * Written by: Karen Xie (kxie@chelsio.com) 10 * Written by: Karen Xie (kxie@chelsio.com)
11 */ 11 */
12 12
13#include <linux/slab.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14#include <linux/scatterlist.h> 15#include <linux/scatterlist.h>
15 16
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 87dd56b422bf..6761b329124d 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -13,6 +13,7 @@
13#ifndef __CXGB3I_ULP2_DDP_H__ 13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__ 14#define __CXGB3I_ULP2_DDP_H__
15 15
16#include <linux/slab.h>
16#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
17 18
18/** 19/**
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 2631bddd255e..7b686abaae64 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/inet.h> 14#include <linux/inet.h>
15#include <linux/slab.h>
15#include <linux/crypto.h> 16#include <linux/crypto.h>
16#include <linux/if_vlan.h> 17#include <linux/if_vlan.h>
17#include <net/dst.h> 18#include <net/dst.h>
@@ -591,8 +592,7 @@ static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
591 cxgb3i_conn_max_recv_dlength(conn); 592 cxgb3i_conn_max_recv_dlength(conn);
592 593
593 spin_lock_bh(&conn->session->lock); 594 spin_lock_bh(&conn->session->lock);
594 sprintf(conn->portal_address, NIPQUAD_FMT, 595 sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr);
595 NIPQUAD(c3cn->daddr.sin_addr.s_addr));
596 conn->portal_port = ntohs(c3cn->daddr.sin_port); 596 conn->portal_port = ntohs(c3cn->daddr.sin_port);
597 spin_unlock_bh(&conn->session->lock); 597 spin_unlock_bh(&conn->session->lock);
598 598
@@ -709,6 +709,12 @@ static int cxgb3i_host_set_param(struct Scsi_Host *shost,
709{ 709{
710 struct cxgb3i_hba *hba = iscsi_host_priv(shost); 710 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
711 711
712 if (!hba->ndev) {
713 shost_printk(KERN_ERR, shost, "Could not set host param. "
714 "Netdev for host not set.\n");
715 return -ENODEV;
716 }
717
712 cxgb3i_api_debug("param %d, buf %s.\n", param, buf); 718 cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
713 719
714 switch (param) { 720 switch (param) {
@@ -739,6 +745,12 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost,
739 struct cxgb3i_hba *hba = iscsi_host_priv(shost); 745 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
740 int len = 0; 746 int len = 0;
741 747
748 if (!hba->ndev) {
749 shost_printk(KERN_ERR, shost, "Could not set host param. "
750 "Netdev for host not set.\n");
751 return -ENODEV;
752 }
753
742 cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param); 754 cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
743 755
744 switch (param) { 756 switch (param) {
@@ -753,7 +765,7 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost,
753 __be32 addr; 765 __be32 addr;
754 766
755 addr = cxgb3i_get_private_ipv4addr(hba->ndev); 767 addr = cxgb3i_get_private_ipv4addr(hba->ndev);
756 len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr)); 768 len = sprintf(buf, "%pI4", &addr);
757 break; 769 break;
758 } 770 }
759 default: 771 default:
@@ -904,7 +916,7 @@ static struct scsi_host_template cxgb3i_host_template = {
904 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 916 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
905 .eh_abort_handler = iscsi_eh_abort, 917 .eh_abort_handler = iscsi_eh_abort,
906 .eh_device_reset_handler = iscsi_eh_device_reset, 918 .eh_device_reset_handler = iscsi_eh_device_reset,
907 .eh_target_reset_handler = iscsi_eh_target_reset, 919 .eh_target_reset_handler = iscsi_eh_recover_target,
908 .target_alloc = iscsi_target_alloc, 920 .target_alloc = iscsi_target_alloc,
909 .use_clustering = DISABLE_CLUSTERING, 921 .use_clustering = DISABLE_CLUSTERING,
910 .this_id = -1, 922 .this_id = -1,
@@ -937,7 +949,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
937 ISCSI_USERNAME | ISCSI_PASSWORD | 949 ISCSI_USERNAME | ISCSI_PASSWORD |
938 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 950 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
939 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 951 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
940 ISCSI_LU_RESET_TMO | 952 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
941 ISCSI_PING_TMO | ISCSI_RECV_TMO | 953 ISCSI_PING_TMO | ISCSI_RECV_TMO |
942 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, 954 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
943 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | 955 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index c1d5be4adf9c..a175be9c496f 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/if_vlan.h> 15#include <linux/if_vlan.h>
16#include <linux/slab.h>
16#include <linux/version.h> 17#include <linux/version.h>
17 18
18#include "cxgb3_defs.h" 19#include "cxgb3_defs.h"
@@ -291,7 +292,7 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
291 c3cn_hold(c3cn); 292 c3cn_hold(c3cn);
292 spin_lock_bh(&c3cn->lock); 293 spin_lock_bh(&c3cn->lock);
293 if (c3cn->state == C3CN_STATE_CONNECTING) 294 if (c3cn->state == C3CN_STATE_CONNECTING)
294 fail_act_open(c3cn, EHOSTUNREACH); 295 fail_act_open(c3cn, -EHOSTUNREACH);
295 spin_unlock_bh(&c3cn->lock); 296 spin_unlock_bh(&c3cn->lock);
296 c3cn_put(c3cn); 297 c3cn_put(c3cn);
297 __kfree_skb(skb); 298 __kfree_skb(skb);
@@ -792,18 +793,18 @@ static int act_open_rpl_status_to_errno(int status)
792{ 793{
793 switch (status) { 794 switch (status) {
794 case CPL_ERR_CONN_RESET: 795 case CPL_ERR_CONN_RESET:
795 return ECONNREFUSED; 796 return -ECONNREFUSED;
796 case CPL_ERR_ARP_MISS: 797 case CPL_ERR_ARP_MISS:
797 return EHOSTUNREACH; 798 return -EHOSTUNREACH;
798 case CPL_ERR_CONN_TIMEDOUT: 799 case CPL_ERR_CONN_TIMEDOUT:
799 return ETIMEDOUT; 800 return -ETIMEDOUT;
800 case CPL_ERR_TCAM_FULL: 801 case CPL_ERR_TCAM_FULL:
801 return ENOMEM; 802 return -ENOMEM;
802 case CPL_ERR_CONN_EXIST: 803 case CPL_ERR_CONN_EXIST:
803 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n"); 804 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
804 return EADDRINUSE; 805 return -EADDRINUSE;
805 default: 806 default:
806 return EIO; 807 return -EIO;
807 } 808 }
808} 809}
809 810
@@ -817,7 +818,7 @@ static void act_open_retry_timer(unsigned long data)
817 spin_lock_bh(&c3cn->lock); 818 spin_lock_bh(&c3cn->lock);
818 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC); 819 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
819 if (!skb) 820 if (!skb)
820 fail_act_open(c3cn, ENOMEM); 821 fail_act_open(c3cn, -ENOMEM);
821 else { 822 else {
822 skb->sk = (struct sock *)c3cn; 823 skb->sk = (struct sock *)c3cn;
823 set_arp_failure_handler(skb, act_open_req_arp_failure); 824 set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -966,14 +967,14 @@ static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
966 case CPL_ERR_BAD_SYN: /* fall through */ 967 case CPL_ERR_BAD_SYN: /* fall through */
967 case CPL_ERR_CONN_RESET: 968 case CPL_ERR_CONN_RESET:
968 return c3cn->state > C3CN_STATE_ESTABLISHED ? 969 return c3cn->state > C3CN_STATE_ESTABLISHED ?
969 EPIPE : ECONNRESET; 970 -EPIPE : -ECONNRESET;
970 case CPL_ERR_XMIT_TIMEDOUT: 971 case CPL_ERR_XMIT_TIMEDOUT:
971 case CPL_ERR_PERSIST_TIMEDOUT: 972 case CPL_ERR_PERSIST_TIMEDOUT:
972 case CPL_ERR_FINWAIT2_TIMEDOUT: 973 case CPL_ERR_FINWAIT2_TIMEDOUT:
973 case CPL_ERR_KEEPALIVE_TIMEDOUT: 974 case CPL_ERR_KEEPALIVE_TIMEDOUT:
974 return ETIMEDOUT; 975 return -ETIMEDOUT;
975 default: 976 default:
976 return EIO; 977 return -EIO;
977 } 978 }
978} 979}
979 980
@@ -1440,6 +1441,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn)
1440static int is_cxgb3_dev(struct net_device *dev) 1441static int is_cxgb3_dev(struct net_device *dev)
1441{ 1442{
1442 struct cxgb3i_sdev_data *cdata; 1443 struct cxgb3i_sdev_data *cdata;
1444 struct net_device *ndev = dev;
1445
1446 if (dev->priv_flags & IFF_802_1Q_VLAN)
1447 ndev = vlan_dev_real_dev(dev);
1443 1448
1444 write_lock(&cdata_rwlock); 1449 write_lock(&cdata_rwlock);
1445 list_for_each_entry(cdata, &cdata_list, list) { 1450 list_for_each_entry(cdata, &cdata_list, list) {
@@ -1447,7 +1452,7 @@ static int is_cxgb3_dev(struct net_device *dev)
1447 int i; 1452 int i;
1448 1453
1449 for (i = 0; i < ports->nports; i++) 1454 for (i = 0; i < ports->nports; i++)
1450 if (dev == ports->lldevs[i]) { 1455 if (ndev == ports->lldevs[i]) {
1451 write_unlock(&cdata_rwlock); 1456 write_unlock(&cdata_rwlock);
1452 return 1; 1457 return 1;
1453 } 1458 }
@@ -1563,9 +1568,29 @@ free_tid:
1563 s3_free_atid(cdev, c3cn->tid); 1568 s3_free_atid(cdev, c3cn->tid);
1564 c3cn->tid = 0; 1569 c3cn->tid = 0;
1565out_err: 1570out_err:
1566 return -1; 1571 return -EINVAL;
1567} 1572}
1568 1573
1574/**
1575 * cxgb3i_find_dev - find the interface associated with the given address
1576 * @ipaddr: ip address
1577 */
1578static struct net_device *
1579cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
1580{
1581 struct flowi fl;
1582 int err;
1583 struct rtable *rt;
1584
1585 memset(&fl, 0, sizeof(fl));
1586 fl.nl_u.ip4_u.daddr = ipaddr;
1587
1588 err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
1589 if (!err)
1590 return (&rt->u.dst)->dev;
1591
1592 return NULL;
1593}
1569 1594
1570/** 1595/**
1571 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address 1596 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
@@ -1581,6 +1606,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1581 struct cxgb3i_sdev_data *cdata; 1606 struct cxgb3i_sdev_data *cdata;
1582 struct t3cdev *cdev; 1607 struct t3cdev *cdev;
1583 __be32 sipv4; 1608 __be32 sipv4;
1609 struct net_device *dstdev;
1584 int err; 1610 int err;
1585 1611
1586 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); 1612 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
@@ -1591,6 +1617,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1591 c3cn->daddr.sin_port = usin->sin_port; 1617 c3cn->daddr.sin_port = usin->sin_port;
1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1618 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1593 1619
1620 dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr);
1621 if (!dstdev || !is_cxgb3_dev(dstdev))
1622 return -ENETUNREACH;
1623
1624 if (dstdev->priv_flags & IFF_802_1Q_VLAN)
1625 dev = dstdev;
1626
1594 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, 1627 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1595 c3cn->daddr.sin_addr.s_addr, 1628 c3cn->daddr.sin_addr.s_addr,
1596 c3cn->saddr.sin_port, 1629 c3cn->saddr.sin_port,
@@ -1643,10 +1676,11 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1643 } else 1676 } else
1644 c3cn->saddr.sin_addr.s_addr = sipv4; 1677 c3cn->saddr.sin_addr.s_addr = sipv4;
1645 1678
1646 c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n", 1679 c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n",
1647 c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr), 1680 c3cn,
1681 &c3cn->saddr.sin_addr.s_addr,
1648 ntohs(c3cn->saddr.sin_port), 1682 ntohs(c3cn->saddr.sin_port),
1649 NIPQUAD(c3cn->daddr.sin_addr.s_addr), 1683 &c3cn->daddr.sin_addr.s_addr,
1650 ntohs(c3cn->daddr.sin_port)); 1684 ntohs(c3cn->daddr.sin_port));
1651 1685
1652 c3cn_set_state(c3cn, C3CN_STATE_CONNECTING); 1686 c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 709105071177..dc5e3e77a351 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -12,6 +12,7 @@
12 * Written by: Karen Xie (kxie@chelsio.com) 12 * Written by: Karen Xie (kxie@chelsio.com)
13 */ 13 */
14 14
15#include <linux/slab.h>
15#include <linux/skbuff.h> 16#include <linux/skbuff.h>
16#include <linux/crypto.h> 17#include <linux/crypto.h>
17#include <scsi/scsi_cmnd.h> 18#include <scsi/scsi_cmnd.h>
@@ -388,8 +389,8 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
388 if (err > 0) { 389 if (err > 0) {
389 int pdulen = err; 390 int pdulen = err;
390 391
391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", 392 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
392 task, skb, skb->len, skb->data_len, err); 393 task, skb, skb->len, skb->data_len, err);
393 394
394 if (task->conn->hdrdgst_en) 395 if (task->conn->hdrdgst_en)
395 pdulen += ISCSI_DIGEST_SIZE; 396 pdulen += ISCSI_DIGEST_SIZE;
@@ -461,10 +462,8 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
461 skb = skb_peek(&c3cn->receive_queue); 462 skb = skb_peek(&c3cn->receive_queue);
462 } 463 }
463 read_unlock(&c3cn->callback_lock); 464 read_unlock(&c3cn->callback_lock);
464 if (c3cn) { 465 c3cn->copied_seq += read;
465 c3cn->copied_seq += read; 466 cxgb3i_c3cn_rx_credits(c3cn, read);
466 cxgb3i_c3cn_rx_credits(c3cn, read);
467 }
468 conn->rxdata_octets += read; 467 conn->rxdata_octets += read;
469 468
470 if (err) { 469 if (err) {
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 075e2397273c..bd977be7544e 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -57,6 +57,7 @@
57#include <linux/pci.h> 57#include <linux/pci.h>
58#include <linux/list.h> 58#include <linux/list.h>
59#include <linux/vmalloc.h> 59#include <linux/vmalloc.h>
60#include <linux/slab.h>
60#include <asm/io.h> 61#include <asm/io.h>
61 62
62#include <scsi/scsi.h> 63#include <scsi/scsi.h>
@@ -1509,7 +1510,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1509 * Try anyway? 1510 * Try anyway?
1510 * 1511 *
1511 * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection 1512 * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
1512 * Timeout, a Disconnect or a Reselction IRQ, so we would be screwed! 1513 * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed!
1513 * (This is likely to be a bug in the hardware. Obviously, most people 1514 * (This is likely to be a bug in the hardware. Obviously, most people
1514 * only have one initiator per SCSI bus.) 1515 * only have one initiator per SCSI bus.)
1515 * Instead let this fail and have the timer make sure the command is 1516 * Instead let this fail and have the timer make sure the command is
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 3ee1cbc89479..6fae3d285ae7 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -21,6 +21,7 @@
21 * Mike Anderson <andmike@linux.vnet.ibm.com> 21 * Mike Anderson <andmike@linux.vnet.ibm.com>
22 */ 22 */
23 23
24#include <linux/slab.h>
24#include <scsi/scsi_dh.h> 25#include <scsi/scsi_dh.h>
25#include "../scsi_priv.h" 26#include "../scsi_priv.h"
26 27
@@ -226,7 +227,7 @@ store_dh_state(struct device *dev, struct device_attribute *attr,
226 * Activate a device handler 227 * Activate a device handler
227 */ 228 */
228 if (scsi_dh->activate) 229 if (scsi_dh->activate)
229 err = scsi_dh->activate(sdev); 230 err = scsi_dh->activate(sdev, NULL, NULL);
230 else 231 else
231 err = 0; 232 err = 0;
232 } 233 }
@@ -304,18 +305,15 @@ static int scsi_dh_notifier(struct notifier_block *nb,
304 sdev = to_scsi_device(dev); 305 sdev = to_scsi_device(dev);
305 306
306 if (action == BUS_NOTIFY_ADD_DEVICE) { 307 if (action == BUS_NOTIFY_ADD_DEVICE) {
308 err = device_create_file(dev, &scsi_dh_state_attr);
309 /* don't care about err */
307 devinfo = device_handler_match(NULL, sdev); 310 devinfo = device_handler_match(NULL, sdev);
308 if (!devinfo) 311 if (devinfo)
309 goto out; 312 err = scsi_dh_handler_attach(sdev, devinfo);
310
311 err = scsi_dh_handler_attach(sdev, devinfo);
312 if (!err)
313 err = device_create_file(dev, &scsi_dh_state_attr);
314 } else if (action == BUS_NOTIFY_DEL_DEVICE) { 313 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
315 device_remove_file(dev, &scsi_dh_state_attr); 314 device_remove_file(dev, &scsi_dh_state_attr);
316 scsi_dh_handler_detach(sdev, NULL); 315 scsi_dh_handler_detach(sdev, NULL);
317 } 316 }
318out:
319 return err; 317 return err;
320} 318}
321 319
@@ -423,10 +421,17 @@ EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
423/* 421/*
424 * scsi_dh_activate - activate the path associated with the scsi_device 422 * scsi_dh_activate - activate the path associated with the scsi_device
425 * corresponding to the given request queue. 423 * corresponding to the given request queue.
426 * @q - Request queue that is associated with the scsi_device to be 424 * Returns immediately without waiting for activation to be completed.
427 * activated. 425 * @q - Request queue that is associated with the scsi_device to be
426 * activated.
427 * @fn - Function to be called upon completion of the activation.
428 * Function fn is called with data (below) and the error code.
429 * Function fn may be called from the same calling context. So,
430 * do not hold the lock in the caller which may be needed in fn.
431 * @data - data passed to the function fn upon completion.
432 *
428 */ 433 */
429int scsi_dh_activate(struct request_queue *q) 434int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
430{ 435{
431 int err = 0; 436 int err = 0;
432 unsigned long flags; 437 unsigned long flags;
@@ -445,7 +450,7 @@ int scsi_dh_activate(struct request_queue *q)
445 return err; 450 return err;
446 451
447 if (scsi_dh->activate) 452 if (scsi_dh->activate)
448 err = scsi_dh->activate(sdev); 453 err = scsi_dh->activate(sdev, fn, data);
449 put_device(&sdev->sdev_gendev); 454 put_device(&sdev->sdev_gendev);
450 return err; 455 return err;
451} 456}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index b5cdefaf2608..1a970a76b1b9 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -19,6 +19,7 @@
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * 20 *
21 */ 21 */
22#include <linux/slab.h>
22#include <scsi/scsi.h> 23#include <scsi/scsi.h>
23#include <scsi/scsi_eh.h> 24#include <scsi/scsi_eh.h>
24#include <scsi/scsi_dh.h> 25#include <scsi/scsi_dh.h>
@@ -60,11 +61,17 @@ struct alua_dh_data {
60 int bufflen; 61 int bufflen;
61 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 62 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
62 int senselen; 63 int senselen;
64 struct scsi_device *sdev;
65 activate_complete callback_fn;
66 void *callback_data;
63}; 67};
64 68
65#define ALUA_POLICY_SWITCH_CURRENT 0 69#define ALUA_POLICY_SWITCH_CURRENT 0
66#define ALUA_POLICY_SWITCH_ALL 1 70#define ALUA_POLICY_SWITCH_ALL 1
67 71
72static char print_alua_state(int);
73static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *);
74
68static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) 75static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
69{ 76{
70 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 77 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
@@ -231,18 +238,71 @@ done:
231} 238}
232 239
233/* 240/*
241 * alua_stpg - Evaluate SET TARGET GROUP STATES
242 * @sdev: the device to be evaluated
243 * @state: the new target group state
244 *
245 * Send a SET TARGET GROUP STATES command to the device.
246 * We only have to test here if we should resubmit the command;
247 * any other error is assumed as a failure.
248 */
249static void stpg_endio(struct request *req, int error)
250{
251 struct alua_dh_data *h = req->end_io_data;
252 struct scsi_sense_hdr sense_hdr;
253 unsigned err = SCSI_DH_IO;
254
255 if (error || host_byte(req->errors) != DID_OK ||
256 msg_byte(req->errors) != COMMAND_COMPLETE)
257 goto done;
258
259 if (err == SCSI_DH_IO && h->senselen > 0) {
260 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
261 &sense_hdr);
262 if (!err) {
263 err = SCSI_DH_IO;
264 goto done;
265 }
266 err = alua_check_sense(h->sdev, &sense_hdr);
267 if (err == ADD_TO_MLQUEUE) {
268 err = SCSI_DH_RETRY;
269 goto done;
270 }
271 sdev_printk(KERN_INFO, h->sdev,
272 "%s: stpg sense code: %02x/%02x/%02x\n",
273 ALUA_DH_NAME, sense_hdr.sense_key,
274 sense_hdr.asc, sense_hdr.ascq);
275 err = SCSI_DH_IO;
276 }
277 if (err == SCSI_DH_OK) {
278 h->state = TPGS_STATE_OPTIMIZED;
279 sdev_printk(KERN_INFO, h->sdev,
280 "%s: port group %02x switched to state %c\n",
281 ALUA_DH_NAME, h->group_id,
282 print_alua_state(h->state));
283 }
284done:
285 blk_put_request(req);
286 if (h->callback_fn) {
287 h->callback_fn(h->callback_data, err);
288 h->callback_fn = h->callback_data = NULL;
289 }
290 return;
291}
292
293/*
234 * submit_stpg - Issue a SET TARGET GROUP STATES command 294 * submit_stpg - Issue a SET TARGET GROUP STATES command
235 * @sdev: sdev the command should be sent to
236 * 295 *
237 * Currently we're only setting the current target port group state 296 * Currently we're only setting the current target port group state
238 * to 'active/optimized' and let the array firmware figure out 297 * to 'active/optimized' and let the array firmware figure out
239 * the states of the remaining groups. 298 * the states of the remaining groups.
240 */ 299 */
241static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h) 300static unsigned submit_stpg(struct alua_dh_data *h)
242{ 301{
243 struct request *rq; 302 struct request *rq;
244 int err = SCSI_DH_RES_TEMP_UNAVAIL; 303 int err = SCSI_DH_RES_TEMP_UNAVAIL;
245 int stpg_len = 8; 304 int stpg_len = 8;
305 struct scsi_device *sdev = h->sdev;
246 306
247 /* Prepare the data buffer */ 307 /* Prepare the data buffer */
248 memset(h->buff, 0, stpg_len); 308 memset(h->buff, 0, stpg_len);
@@ -252,7 +312,7 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
252 312
253 rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); 313 rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
254 if (!rq) 314 if (!rq)
255 goto done; 315 return SCSI_DH_RES_TEMP_UNAVAIL;
256 316
257 /* Prepare the command. */ 317 /* Prepare the command. */
258 rq->cmd[0] = MAINTENANCE_OUT; 318 rq->cmd[0] = MAINTENANCE_OUT;
@@ -266,17 +326,9 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
266 rq->sense = h->sense; 326 rq->sense = h->sense;
267 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 327 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
268 rq->sense_len = h->senselen = 0; 328 rq->sense_len = h->senselen = 0;
329 rq->end_io_data = h;
269 330
270 err = blk_execute_rq(rq->q, NULL, rq, 1); 331 blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
271 if (err == -EIO) {
272 sdev_printk(KERN_INFO, sdev,
273 "%s: stpg failed with %x\n",
274 ALUA_DH_NAME, rq->errors);
275 h->senselen = rq->sense_len;
276 err = SCSI_DH_IO;
277 }
278 blk_put_request(rq);
279done:
280 return err; 332 return err;
281} 333}
282 334
@@ -477,50 +529,6 @@ static int alua_check_sense(struct scsi_device *sdev,
477} 529}
478 530
479/* 531/*
480 * alua_stpg - Evaluate SET TARGET GROUP STATES
481 * @sdev: the device to be evaluated
482 * @state: the new target group state
483 *
484 * Send a SET TARGET GROUP STATES command to the device.
485 * We only have to test here if we should resubmit the command;
486 * any other error is assumed as a failure.
487 */
488static int alua_stpg(struct scsi_device *sdev, int state,
489 struct alua_dh_data *h)
490{
491 struct scsi_sense_hdr sense_hdr;
492 unsigned err;
493 int retry = ALUA_FAILOVER_RETRIES;
494
495 retry:
496 err = submit_stpg(sdev, h);
497 if (err == SCSI_DH_IO && h->senselen > 0) {
498 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
499 &sense_hdr);
500 if (!err)
501 return SCSI_DH_IO;
502 err = alua_check_sense(sdev, &sense_hdr);
503 if (retry > 0 && err == ADD_TO_MLQUEUE) {
504 retry--;
505 goto retry;
506 }
507 sdev_printk(KERN_INFO, sdev,
508 "%s: stpg sense code: %02x/%02x/%02x\n",
509 ALUA_DH_NAME, sense_hdr.sense_key,
510 sense_hdr.asc, sense_hdr.ascq);
511 err = SCSI_DH_IO;
512 }
513 if (err == SCSI_DH_OK) {
514 h->state = state;
515 sdev_printk(KERN_INFO, sdev,
516 "%s: port group %02x switched to state %c\n",
517 ALUA_DH_NAME, h->group_id,
518 print_alua_state(h->state) );
519 }
520 return err;
521}
522
523/*
524 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES 532 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
525 * @sdev: the device to be evaluated. 533 * @sdev: the device to be evaluated.
526 * 534 *
@@ -652,7 +660,8 @@ out:
652 * based on a certain policy. But until we actually encounter them it 660 * based on a certain policy. But until we actually encounter them it
653 * should be okay. 661 * should be okay.
654 */ 662 */
655static int alua_activate(struct scsi_device *sdev) 663static int alua_activate(struct scsi_device *sdev,
664 activate_complete fn, void *data)
656{ 665{
657 struct alua_dh_data *h = get_alua_data(sdev); 666 struct alua_dh_data *h = get_alua_data(sdev);
658 int err = SCSI_DH_OK; 667 int err = SCSI_DH_OK;
@@ -663,11 +672,19 @@ static int alua_activate(struct scsi_device *sdev)
663 goto out; 672 goto out;
664 } 673 }
665 674
666 if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) 675 if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) {
667 err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h); 676 h->callback_fn = fn;
677 h->callback_data = data;
678 err = submit_stpg(h);
679 if (err == SCSI_DH_OK)
680 return 0;
681 h->callback_fn = h->callback_data = NULL;
682 }
668 683
669out: 684out:
670 return err; 685 if (fn)
686 fn(data, err);
687 return 0;
671} 688}
672 689
673/* 690/*
@@ -701,6 +718,8 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
701 {"IBM", "2145" }, 718 {"IBM", "2145" },
702 {"Pillar", "Axiom" }, 719 {"Pillar", "Axiom" },
703 {"Intel", "Multi-Flex"}, 720 {"Intel", "Multi-Flex"},
721 {"NETAPP", "LUN"},
722 {"AIX", "NVDISK"},
704 {NULL, NULL} 723 {NULL, NULL}
705}; 724};
706 725
@@ -745,6 +764,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
745 h->rel_port = -1; 764 h->rel_port = -1;
746 h->buff = h->inq; 765 h->buff = h->inq;
747 h->bufflen = ALUA_INQUIRY_SIZE; 766 h->bufflen = ALUA_INQUIRY_SIZE;
767 h->sdev = sdev;
748 768
749 err = alua_initialize(sdev, h); 769 err = alua_initialize(sdev, h);
750 if (err != SCSI_DH_OK) 770 if (err != SCSI_DH_OK)
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 0cffe84976fe..e8a0bc3efd49 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -20,6 +20,7 @@
20 * along with this program; see the file COPYING. If not, write to 20 * along with this program; see the file COPYING. If not, write to
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 */ 22 */
23#include <linux/slab.h>
23#include <scsi/scsi.h> 24#include <scsi/scsi.h>
24#include <scsi/scsi_eh.h> 25#include <scsi/scsi_eh.h>
25#include <scsi/scsi_dh.h> 26#include <scsi/scsi_dh.h>
@@ -272,7 +273,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
272 int len = 0; 273 int len = 0;
273 274
274 rq = blk_get_request(sdev->request_queue, 275 rq = blk_get_request(sdev->request_queue,
275 (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO); 276 (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
276 if (!rq) { 277 if (!rq) {
277 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); 278 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
278 return NULL; 279 return NULL;
@@ -286,14 +287,17 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
286 len = sizeof(short_trespass); 287 len = sizeof(short_trespass);
287 rq->cmd_flags |= REQ_RW; 288 rq->cmd_flags |= REQ_RW;
288 rq->cmd[1] = 0x10; 289 rq->cmd[1] = 0x10;
290 rq->cmd[4] = len;
289 break; 291 break;
290 case MODE_SELECT_10: 292 case MODE_SELECT_10:
291 len = sizeof(long_trespass); 293 len = sizeof(long_trespass);
292 rq->cmd_flags |= REQ_RW; 294 rq->cmd_flags |= REQ_RW;
293 rq->cmd[1] = 0x10; 295 rq->cmd[1] = 0x10;
296 rq->cmd[8] = len;
294 break; 297 break;
295 case INQUIRY: 298 case INQUIRY:
296 len = CLARIION_BUFFER_SIZE; 299 len = CLARIION_BUFFER_SIZE;
300 rq->cmd[4] = len;
297 memset(buffer, 0, len); 301 memset(buffer, 0, len);
298 break; 302 break;
299 default: 303 default:
@@ -301,7 +305,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
301 break; 305 break;
302 } 306 }
303 307
304 rq->cmd[4] = len;
305 rq->cmd_type = REQ_TYPE_BLOCK_PC; 308 rq->cmd_type = REQ_TYPE_BLOCK_PC;
306 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 309 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
307 REQ_FAILFAST_DRIVER; 310 REQ_FAILFAST_DRIVER;
@@ -528,7 +531,8 @@ retry:
528 return err; 531 return err;
529} 532}
530 533
531static int clariion_activate(struct scsi_device *sdev) 534static int clariion_activate(struct scsi_device *sdev,
535 activate_complete fn, void *data)
532{ 536{
533 struct clariion_dh_data *csdev = get_clariion_data(sdev); 537 struct clariion_dh_data *csdev = get_clariion_data(sdev);
534 int result; 538 int result;
@@ -559,7 +563,9 @@ done:
559 csdev->port, lun_state[csdev->lun_state], 563 csdev->port, lun_state[csdev->lun_state],
560 csdev->default_sp + 'A'); 564 csdev->default_sp + 'A');
561 565
562 return result; 566 if (fn)
567 fn(data, result);
568 return 0;
563} 569}
564/* 570/*
565 * params - parameters in the following format 571 * params - parameters in the following format
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index f7da7530875e..e3916641e627 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -21,6 +21,7 @@
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 */ 22 */
23 23
24#include <linux/slab.h>
24#include <scsi/scsi.h> 25#include <scsi/scsi.h>
25#include <scsi/scsi_dbg.h> 26#include <scsi/scsi_dbg.h>
26#include <scsi/scsi_eh.h> 27#include <scsi/scsi_eh.h>
@@ -39,8 +40,14 @@ struct hp_sw_dh_data {
39 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 40 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
40 int path_state; 41 int path_state;
41 int retries; 42 int retries;
43 int retry_cnt;
44 struct scsi_device *sdev;
45 activate_complete callback_fn;
46 void *callback_data;
42}; 47};
43 48
49static int hp_sw_start_stop(struct hp_sw_dh_data *);
50
44static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) 51static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
45{ 52{
46 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 53 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
@@ -191,19 +198,53 @@ static int start_done(struct scsi_device *sdev, unsigned char *sense)
191 return rc; 198 return rc;
192} 199}
193 200
201static void start_stop_endio(struct request *req, int error)
202{
203 struct hp_sw_dh_data *h = req->end_io_data;
204 unsigned err = SCSI_DH_OK;
205
206 if (error || host_byte(req->errors) != DID_OK ||
207 msg_byte(req->errors) != COMMAND_COMPLETE) {
208 sdev_printk(KERN_WARNING, h->sdev,
209 "%s: sending start_stop_unit failed with %x\n",
210 HP_SW_NAME, req->errors);
211 err = SCSI_DH_IO;
212 goto done;
213 }
214
215 if (req->sense_len > 0) {
216 err = start_done(h->sdev, h->sense);
217 if (err == SCSI_DH_RETRY) {
218 err = SCSI_DH_IO;
219 if (--h->retry_cnt) {
220 blk_put_request(req);
221 err = hp_sw_start_stop(h);
222 if (err == SCSI_DH_OK)
223 return;
224 }
225 }
226 }
227done:
228 blk_put_request(req);
229 if (h->callback_fn) {
230 h->callback_fn(h->callback_data, err);
231 h->callback_fn = h->callback_data = NULL;
232 }
233 return;
234
235}
236
194/* 237/*
195 * hp_sw_start_stop - Send START STOP UNIT command 238 * hp_sw_start_stop - Send START STOP UNIT command
196 * @sdev: sdev command should be sent to 239 * @sdev: sdev command should be sent to
197 * 240 *
198 * Sending START STOP UNIT activates the SP. 241 * Sending START STOP UNIT activates the SP.
199 */ 242 */
200static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) 243static int hp_sw_start_stop(struct hp_sw_dh_data *h)
201{ 244{
202 struct request *req; 245 struct request *req;
203 int ret, retry;
204 246
205retry: 247 req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
206 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
207 if (!req) 248 if (!req)
208 return SCSI_DH_RES_TEMP_UNAVAIL; 249 return SCSI_DH_RES_TEMP_UNAVAIL;
209 250
@@ -217,32 +258,10 @@ retry:
217 req->sense = h->sense; 258 req->sense = h->sense;
218 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 259 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
219 req->sense_len = 0; 260 req->sense_len = 0;
220 retry = h->retries; 261 req->end_io_data = h;
221
222 ret = blk_execute_rq(req->q, NULL, req, 1);
223 if (ret == -EIO) {
224 if (req->sense_len > 0) {
225 ret = start_done(sdev, h->sense);
226 } else {
227 sdev_printk(KERN_WARNING, sdev,
228 "%s: sending start_stop_unit failed with %x\n",
229 HP_SW_NAME, req->errors);
230 ret = SCSI_DH_IO;
231 }
232 } else
233 ret = SCSI_DH_OK;
234 262
235 if (ret == SCSI_DH_RETRY) { 263 blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio);
236 if (--retry) { 264 return SCSI_DH_OK;
237 blk_put_request(req);
238 goto retry;
239 }
240 ret = SCSI_DH_IO;
241 }
242
243 blk_put_request(req);
244
245 return ret;
246} 265}
247 266
248static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) 267static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
@@ -268,7 +287,8 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
268 * activate the passive path (and deactivate the 287 * activate the passive path (and deactivate the
269 * previously active one). 288 * previously active one).
270 */ 289 */
271static int hp_sw_activate(struct scsi_device *sdev) 290static int hp_sw_activate(struct scsi_device *sdev,
291 activate_complete fn, void *data)
272{ 292{
273 int ret = SCSI_DH_OK; 293 int ret = SCSI_DH_OK;
274 struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 294 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
@@ -276,14 +296,18 @@ static int hp_sw_activate(struct scsi_device *sdev)
276 ret = hp_sw_tur(sdev, h); 296 ret = hp_sw_tur(sdev, h);
277 297
278 if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { 298 if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
279 ret = hp_sw_start_stop(sdev, h); 299 h->retry_cnt = h->retries;
300 h->callback_fn = fn;
301 h->callback_data = data;
302 ret = hp_sw_start_stop(h);
280 if (ret == SCSI_DH_OK) 303 if (ret == SCSI_DH_OK)
281 sdev_printk(KERN_INFO, sdev, 304 return 0;
282 "%s: activated path\n", 305 h->callback_fn = h->callback_data = NULL;
283 HP_SW_NAME);
284 } 306 }
285 307
286 return ret; 308 if (fn)
309 fn(data, ret);
310 return 0;
287} 311}
288 312
289static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { 313static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
@@ -326,6 +350,7 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
326 h = (struct hp_sw_dh_data *) scsi_dh_data->buf; 350 h = (struct hp_sw_dh_data *) scsi_dh_data->buf;
327 h->path_state = HP_SW_PATH_UNINITIALIZED; 351 h->path_state = HP_SW_PATH_UNINITIALIZED;
328 h->retries = HP_SW_RETRIES; 352 h->retries = HP_SW_RETRIES;
353 h->sdev = sdev;
329 354
330 ret = hp_sw_tur(sdev, h); 355 ret = hp_sw_tur(sdev, h);
331 if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) 356 if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 268189d31d9c..5b683e429542 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -22,6 +22,8 @@
22#include <scsi/scsi.h> 22#include <scsi/scsi.h>
23#include <scsi/scsi_eh.h> 23#include <scsi/scsi_eh.h>
24#include <scsi/scsi_dh.h> 24#include <scsi/scsi_dh.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
25 27
26#define RDAC_NAME "rdac" 28#define RDAC_NAME "rdac"
27#define RDAC_RETRY_COUNT 5 29#define RDAC_RETRY_COUNT 5
@@ -138,7 +140,13 @@ struct rdac_controller {
138 } mode_select; 140 } mode_select;
139 u8 index; 141 u8 index;
140 u8 array_name[ARRAY_LABEL_LEN]; 142 u8 array_name[ARRAY_LABEL_LEN];
143 spinlock_t ms_lock;
144 int ms_queued;
145 struct work_struct ms_work;
146 struct scsi_device *ms_sdev;
147 struct list_head ms_head;
141}; 148};
149
142struct c8_inquiry { 150struct c8_inquiry {
143 u8 peripheral_info; 151 u8 peripheral_info;
144 u8 page_code; /* 0xC8 */ 152 u8 page_code; /* 0xC8 */
@@ -198,8 +206,17 @@ static const char *lun_state[] =
198 "owned (AVT mode)", 206 "owned (AVT mode)",
199}; 207};
200 208
209struct rdac_queue_data {
210 struct list_head entry;
211 struct rdac_dh_data *h;
212 activate_complete callback_fn;
213 void *callback_data;
214};
215
201static LIST_HEAD(ctlr_list); 216static LIST_HEAD(ctlr_list);
202static DEFINE_SPINLOCK(list_lock); 217static DEFINE_SPINLOCK(list_lock);
218static struct workqueue_struct *kmpath_rdacd;
219static void send_mode_select(struct work_struct *work);
203 220
204/* 221/*
205 * module parameter to enable rdac debug logging. 222 * module parameter to enable rdac debug logging.
@@ -281,7 +298,6 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
281 rdac_pg->subpage_code = 0x1; 298 rdac_pg->subpage_code = 0x1;
282 rdac_pg->page_len[0] = 0x01; 299 rdac_pg->page_len[0] = 0x01;
283 rdac_pg->page_len[1] = 0x28; 300 rdac_pg->page_len[1] = 0x28;
284 rdac_pg->lun_table[h->lun] = 0x81;
285 } else { 301 } else {
286 struct rdac_pg_legacy *rdac_pg; 302 struct rdac_pg_legacy *rdac_pg;
287 303
@@ -291,7 +307,6 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
291 common = &rdac_pg->common; 307 common = &rdac_pg->common;
292 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; 308 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
293 rdac_pg->page_len = 0x68; 309 rdac_pg->page_len = 0x68;
294 rdac_pg->lun_table[h->lun] = 0x81;
295 } 310 }
296 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; 311 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
297 common->quiescence_timeout = RDAC_QUIESCENCE_TIME; 312 common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
@@ -325,6 +340,7 @@ static void release_controller(struct kref *kref)
325 struct rdac_controller *ctlr; 340 struct rdac_controller *ctlr;
326 ctlr = container_of(kref, struct rdac_controller, kref); 341 ctlr = container_of(kref, struct rdac_controller, kref);
327 342
343 flush_workqueue(kmpath_rdacd);
328 spin_lock(&list_lock); 344 spin_lock(&list_lock);
329 list_del(&ctlr->node); 345 list_del(&ctlr->node);
330 spin_unlock(&list_lock); 346 spin_unlock(&list_lock);
@@ -363,6 +379,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
363 379
364 kref_init(&ctlr->kref); 380 kref_init(&ctlr->kref);
365 ctlr->use_ms10 = -1; 381 ctlr->use_ms10 = -1;
382 ctlr->ms_queued = 0;
383 ctlr->ms_sdev = NULL;
384 spin_lock_init(&ctlr->ms_lock);
385 INIT_WORK(&ctlr->ms_work, send_mode_select);
386 INIT_LIST_HEAD(&ctlr->ms_head);
366 list_add(&ctlr->node, &ctlr_list); 387 list_add(&ctlr->node, &ctlr_list);
367done: 388done:
368 spin_unlock(&list_lock); 389 spin_unlock(&list_lock);
@@ -490,7 +511,7 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
490} 511}
491 512
492static int mode_select_handle_sense(struct scsi_device *sdev, 513static int mode_select_handle_sense(struct scsi_device *sdev,
493 unsigned char *sensebuf) 514 unsigned char *sensebuf)
494{ 515{
495 struct scsi_sense_hdr sense_hdr; 516 struct scsi_sense_hdr sense_hdr;
496 int err = SCSI_DH_IO, ret; 517 int err = SCSI_DH_IO, ret;
@@ -533,11 +554,29 @@ done:
533 return err; 554 return err;
534} 555}
535 556
536static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) 557static void send_mode_select(struct work_struct *work)
537{ 558{
559 struct rdac_controller *ctlr =
560 container_of(work, struct rdac_controller, ms_work);
538 struct request *rq; 561 struct request *rq;
562 struct scsi_device *sdev = ctlr->ms_sdev;
563 struct rdac_dh_data *h = get_rdac_data(sdev);
539 struct request_queue *q = sdev->request_queue; 564 struct request_queue *q = sdev->request_queue;
540 int err, retry_cnt = RDAC_RETRY_COUNT; 565 int err, retry_cnt = RDAC_RETRY_COUNT;
566 struct rdac_queue_data *tmp, *qdata;
567 LIST_HEAD(list);
568 u8 *lun_table;
569
570 spin_lock(&ctlr->ms_lock);
571 list_splice_init(&ctlr->ms_head, &list);
572 ctlr->ms_queued = 0;
573 ctlr->ms_sdev = NULL;
574 spin_unlock(&ctlr->ms_lock);
575
576 if (ctlr->use_ms10)
577 lun_table = ctlr->mode_select.expanded.lun_table;
578 else
579 lun_table = ctlr->mode_select.legacy.lun_table;
541 580
542retry: 581retry:
543 err = SCSI_DH_RES_TEMP_UNAVAIL; 582 err = SCSI_DH_RES_TEMP_UNAVAIL;
@@ -545,6 +584,10 @@ retry:
545 if (!rq) 584 if (!rq)
546 goto done; 585 goto done;
547 586
587 list_for_each_entry(qdata, &list, entry) {
588 lun_table[qdata->h->lun] = 0x81;
589 }
590
548 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " 591 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
549 "%s MODE_SELECT command", 592 "%s MODE_SELECT command",
550 (char *) h->ctlr->array_name, h->ctlr->index, 593 (char *) h->ctlr->array_name, h->ctlr->index,
@@ -565,10 +608,45 @@ retry:
565 } 608 }
566 609
567done: 610done:
568 return err; 611 list_for_each_entry_safe(qdata, tmp, &list, entry) {
612 list_del(&qdata->entry);
613 if (err == SCSI_DH_OK)
614 qdata->h->state = RDAC_STATE_ACTIVE;
615 if (qdata->callback_fn)
616 qdata->callback_fn(qdata->callback_data, err);
617 kfree(qdata);
618 }
619 return;
620}
621
622static int queue_mode_select(struct scsi_device *sdev,
623 activate_complete fn, void *data)
624{
625 struct rdac_queue_data *qdata;
626 struct rdac_controller *ctlr;
627
628 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
629 if (!qdata)
630 return SCSI_DH_RETRY;
631
632 qdata->h = get_rdac_data(sdev);
633 qdata->callback_fn = fn;
634 qdata->callback_data = data;
635
636 ctlr = qdata->h->ctlr;
637 spin_lock(&ctlr->ms_lock);
638 list_add_tail(&qdata->entry, &ctlr->ms_head);
639 if (!ctlr->ms_queued) {
640 ctlr->ms_queued = 1;
641 ctlr->ms_sdev = sdev;
642 queue_work(kmpath_rdacd, &ctlr->ms_work);
643 }
644 spin_unlock(&ctlr->ms_lock);
645 return SCSI_DH_OK;
569} 646}
570 647
571static int rdac_activate(struct scsi_device *sdev) 648static int rdac_activate(struct scsi_device *sdev,
649 activate_complete fn, void *data)
572{ 650{
573 struct rdac_dh_data *h = get_rdac_data(sdev); 651 struct rdac_dh_data *h = get_rdac_data(sdev);
574 int err = SCSI_DH_OK; 652 int err = SCSI_DH_OK;
@@ -577,10 +655,15 @@ static int rdac_activate(struct scsi_device *sdev)
577 if (err != SCSI_DH_OK) 655 if (err != SCSI_DH_OK)
578 goto done; 656 goto done;
579 657
580 if (h->lun_state == RDAC_LUN_UNOWNED) 658 if (h->lun_state == RDAC_LUN_UNOWNED) {
581 err = send_mode_select(sdev, h); 659 err = queue_mode_select(sdev, fn, data);
660 if (err == SCSI_DH_OK)
661 return 0;
662 }
582done: 663done:
583 return err; 664 if (fn)
665 fn(data, err);
666 return 0;
584} 667}
585 668
586static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) 669static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
@@ -666,6 +749,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
666 {"IBM", "1724"}, 749 {"IBM", "1724"},
667 {"IBM", "1726"}, 750 {"IBM", "1726"},
668 {"IBM", "1742"}, 751 {"IBM", "1742"},
752 {"IBM", "1745"},
753 {"IBM", "1746"},
669 {"IBM", "1814"}, 754 {"IBM", "1814"},
670 {"IBM", "1815"}, 755 {"IBM", "1815"},
671 {"IBM", "1818"}, 756 {"IBM", "1818"},
@@ -790,13 +875,26 @@ static int __init rdac_init(void)
790 int r; 875 int r;
791 876
792 r = scsi_register_device_handler(&rdac_dh); 877 r = scsi_register_device_handler(&rdac_dh);
793 if (r != 0) 878 if (r != 0) {
794 printk(KERN_ERR "Failed to register scsi device handler."); 879 printk(KERN_ERR "Failed to register scsi device handler.");
880 goto done;
881 }
882
883 /*
884 * Create workqueue to handle mode selects for rdac
885 */
886 kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
887 if (!kmpath_rdacd) {
888 scsi_unregister_device_handler(&rdac_dh);
889 printk(KERN_ERR "kmpath_rdacd creation failed.\n");
890 }
891done:
795 return r; 892 return r;
796} 893}
797 894
798static void __exit rdac_exit(void) 895static void __exit rdac_exit(void)
799{ 896{
897 destroy_workqueue(kmpath_rdacd);
800 scsi_unregister_device_handler(&rdac_dh); 898 scsi_unregister_device_handler(&rdac_dh);
801} 899}
802 900
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index fa738ec8692a..207352cc70cc 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -31,7 +31,7 @@
31#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
32 32
33/* 33/*
34 * Defintions for the generic 5380 driver. 34 * Definitions for the generic 5380 driver.
35 */ 35 */
36#define AUTOSENSE 36#define AUTOSENSE
37 37
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 496764349c41..0435d044c9da 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -188,7 +188,8 @@ MODULE_DEVICE_TABLE(pci,dptids);
188static int adpt_detect(struct scsi_host_template* sht) 188static int adpt_detect(struct scsi_host_template* sht)
189{ 189{
190 struct pci_dev *pDev = NULL; 190 struct pci_dev *pDev = NULL;
191 adpt_hba* pHba; 191 adpt_hba *pHba;
192 adpt_hba *next;
192 193
193 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 194 PINFO("Detecting Adaptec I2O RAID controllers...\n");
194 195
@@ -206,7 +207,8 @@ static int adpt_detect(struct scsi_host_template* sht)
206 } 207 }
207 208
208 /* In INIT state, Activate IOPs */ 209 /* In INIT state, Activate IOPs */
209 for (pHba = hba_chain; pHba; pHba = pHba->next) { 210 for (pHba = hba_chain; pHba; pHba = next) {
211 next = pHba->next;
210 // Activate does get status , init outbound, and get hrt 212 // Activate does get status , init outbound, and get hrt
211 if (adpt_i2o_activate_hba(pHba) < 0) { 213 if (adpt_i2o_activate_hba(pHba) < 0) {
212 adpt_i2o_delete_hba(pHba); 214 adpt_i2o_delete_hba(pHba);
@@ -243,7 +245,8 @@ rebuild_sys_tab:
243 PDEBUG("HBA's in OPERATIONAL state\n"); 245 PDEBUG("HBA's in OPERATIONAL state\n");
244 246
245 printk("dpti: If you have a lot of devices this could take a few minutes.\n"); 247 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
246 for (pHba = hba_chain; pHba; pHba = pHba->next) { 248 for (pHba = hba_chain; pHba; pHba = next) {
249 next = pHba->next;
247 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); 250 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
248 if (adpt_i2o_lct_get(pHba) < 0){ 251 if (adpt_i2o_lct_get(pHba) < 0){
249 adpt_i2o_delete_hba(pHba); 252 adpt_i2o_delete_hba(pHba);
@@ -263,7 +266,8 @@ rebuild_sys_tab:
263 adpt_sysfs_class = NULL; 266 adpt_sysfs_class = NULL;
264 } 267 }
265 268
266 for (pHba = hba_chain; pHba; pHba = pHba->next) { 269 for (pHba = hba_chain; pHba; pHba = next) {
270 next = pHba->next;
267 if (adpt_scsi_host_alloc(pHba, sht) < 0){ 271 if (adpt_scsi_host_alloc(pHba, sht) < 0){
268 adpt_i2o_delete_hba(pHba); 272 adpt_i2o_delete_hba(pHba);
269 continue; 273 continue;
@@ -1229,11 +1233,10 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1229 } 1233 }
1230 } 1234 }
1231 pci_dev_put(pHba->pDev); 1235 pci_dev_put(pHba->pDev);
1232 kfree(pHba);
1233
1234 if (adpt_sysfs_class) 1236 if (adpt_sysfs_class)
1235 device_destroy(adpt_sysfs_class, 1237 device_destroy(adpt_sysfs_class,
1236 MKDEV(DPTI_I2O_MAJOR, pHba->unit)); 1238 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1239 kfree(pHba);
1237 1240
1238 if(hba_count <= 0){ 1241 if(hba_count <= 0){
1239 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); 1242 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index c7076ce25e21..d1c31378f6da 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -490,6 +490,7 @@
490#include <linux/ctype.h> 490#include <linux/ctype.h>
491#include <linux/spinlock.h> 491#include <linux/spinlock.h>
492#include <linux/dma-mapping.h> 492#include <linux/dma-mapping.h>
493#include <linux/slab.h>
493#include <asm/byteorder.h> 494#include <asm/byteorder.h>
494#include <asm/dma.h> 495#include <asm/dma.h>
495#include <asm/io.h> 496#include <asm/io.h>
@@ -1509,7 +1510,7 @@ static int option_setup(char *str)
1509 char *cur = str; 1510 char *cur = str;
1510 int i = 1; 1511 int i = 1;
1511 1512
1512 while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) { 1513 while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
1513 ints[i++] = simple_strtoul(cur, NULL, 0); 1514 ints[i++] = simple_strtoul(cur, NULL, 0);
1514 1515
1515 if ((cur = strchr(cur, ',')) != NULL) 1516 if ((cur = strchr(cur, ',')) != NULL)
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 152dd15db276..60886c19065e 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -50,7 +50,6 @@
50#include <linux/kernel.h> 50#include <linux/kernel.h>
51#include <linux/string.h> 51#include <linux/string.h>
52#include <linux/ioport.h> 52#include <linux/ioport.h>
53#include <linux/slab.h>
54#include <linux/in.h> 53#include <linux/in.h>
55#include <linux/pci.h> 54#include <linux/pci.h>
56#include <linux/proc_fs.h> 55#include <linux/proc_fs.h>
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index a680e18b5f3b..e2bc779f86c1 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1449,9 +1449,6 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1449 if (offset > 15) 1449 if (offset > 15)
1450 goto do_reject; 1450 goto do_reject;
1451 1451
1452 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
1453 offset = 0;
1454
1455 if (offset) { 1452 if (offset) {
1456 int one_clock; 1453 int one_clock;
1457 1454
@@ -2405,12 +2402,6 @@ static int esp_slave_configure(struct scsi_device *dev)
2405 struct esp_target_data *tp = &esp->target[dev->id]; 2402 struct esp_target_data *tp = &esp->target[dev->id];
2406 int goal_tags, queue_depth; 2403 int goal_tags, queue_depth;
2407 2404
2408 if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
2409 /* Bypass async domain validation */
2410 dev->ppr = 0;
2411 dev->sdtr = 0;
2412 }
2413
2414 goal_tags = 0; 2405 goal_tags = 0;
2415 2406
2416 if (dev->tagged_supported) { 2407 if (dev->tagged_supported) {
@@ -2660,7 +2651,10 @@ static void esp_set_offset(struct scsi_target *target, int offset)
2660 struct esp *esp = shost_priv(host); 2651 struct esp *esp = shost_priv(host);
2661 struct esp_target_data *tp = &esp->target[target->id]; 2652 struct esp_target_data *tp = &esp->target[target->id];
2662 2653
2663 tp->nego_goal_offset = offset; 2654 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2655 tp->nego_goal_offset = 0;
2656 else
2657 tp->nego_goal_offset = offset;
2664 tp->flags |= ESP_TGT_CHECK_NEGO; 2658 tp->flags |= ESP_TGT_CHECK_NEGO;
2665} 2659}
2666 2660
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 704b8e034946..f01b9b44e8aa 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -26,6 +26,7 @@
26#include <linux/if_ether.h> 26#include <linux/if_ether.h>
27#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
28#include <linux/crc32.h> 28#include <linux/crc32.h>
29#include <linux/slab.h>
29#include <linux/cpu.h> 30#include <linux/cpu.h>
30#include <linux/fs.h> 31#include <linux/fs.h>
31#include <linux/sysfs.h> 32#include <linux/sysfs.h>
@@ -66,14 +67,14 @@ LIST_HEAD(fcoe_hostlist);
66DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 67DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
67 68
68/* Function Prototypes */ 69/* Function Prototypes */
69static int fcoe_reset(struct Scsi_Host *shost); 70static int fcoe_reset(struct Scsi_Host *);
70static int fcoe_xmit(struct fc_lport *, struct fc_frame *); 71static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
71static int fcoe_rcv(struct sk_buff *, struct net_device *, 72static int fcoe_rcv(struct sk_buff *, struct net_device *,
72 struct packet_type *, struct net_device *); 73 struct packet_type *, struct net_device *);
73static int fcoe_percpu_receive_thread(void *arg); 74static int fcoe_percpu_receive_thread(void *);
74static void fcoe_clean_pending_queue(struct fc_lport *lp); 75static void fcoe_clean_pending_queue(struct fc_lport *);
75static void fcoe_percpu_clean(struct fc_lport *lp); 76static void fcoe_percpu_clean(struct fc_lport *);
76static int fcoe_link_ok(struct fc_lport *lp); 77static int fcoe_link_ok(struct fc_lport *);
77 78
78static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); 79static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
79static int fcoe_hostlist_add(const struct fc_lport *); 80static int fcoe_hostlist_add(const struct fc_lport *);
@@ -82,15 +83,77 @@ static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
82static int fcoe_device_notification(struct notifier_block *, ulong, void *); 83static int fcoe_device_notification(struct notifier_block *, ulong, void *);
83static void fcoe_dev_setup(void); 84static void fcoe_dev_setup(void);
84static void fcoe_dev_cleanup(void); 85static void fcoe_dev_cleanup(void);
85static struct fcoe_interface * 86static struct fcoe_interface
86 fcoe_hostlist_lookup_port(const struct net_device *dev); 87*fcoe_hostlist_lookup_port(const struct net_device *);
88
89static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
90 struct packet_type *, struct net_device *);
91
92static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
93static void fcoe_update_src_mac(struct fc_lport *, u8 *);
94static u8 *fcoe_get_src_mac(struct fc_lport *);
95static void fcoe_destroy_work(struct work_struct *);
96
97static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
98 unsigned int);
99static int fcoe_ddp_done(struct fc_lport *, u16);
100
101static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
102
103static int fcoe_create(const char *, struct kernel_param *);
104static int fcoe_destroy(const char *, struct kernel_param *);
105static int fcoe_enable(const char *, struct kernel_param *);
106static int fcoe_disable(const char *, struct kernel_param *);
107
108static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
109 u32 did, struct fc_frame *,
110 unsigned int op,
111 void (*resp)(struct fc_seq *,
112 struct fc_frame *,
113 void *),
114 void *, u32 timeout);
115static void fcoe_recv_frame(struct sk_buff *skb);
87 116
88/* notification function from net device */ 117static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
118
119module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
120__MODULE_PARM_TYPE(create, "string");
121MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
122module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
123__MODULE_PARM_TYPE(destroy, "string");
124MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
125module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
126__MODULE_PARM_TYPE(enable, "string");
127MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
128module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
129__MODULE_PARM_TYPE(disable, "string");
130MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
131
132/* notification function for packets from net device */
89static struct notifier_block fcoe_notifier = { 133static struct notifier_block fcoe_notifier = {
90 .notifier_call = fcoe_device_notification, 134 .notifier_call = fcoe_device_notification,
91}; 135};
92 136
93static struct scsi_transport_template *scsi_transport_fcoe_sw; 137/* notification function for CPU hotplug events */
138static struct notifier_block fcoe_cpu_notifier = {
139 .notifier_call = fcoe_cpu_callback,
140};
141
142static struct scsi_transport_template *fcoe_transport_template;
143static struct scsi_transport_template *fcoe_vport_transport_template;
144
145static int fcoe_vport_destroy(struct fc_vport *);
146static int fcoe_vport_create(struct fc_vport *, bool disabled);
147static int fcoe_vport_disable(struct fc_vport *, bool disable);
148static void fcoe_set_vport_symbolic_name(struct fc_vport *);
149
150static struct libfc_function_template fcoe_libfc_fcn_templ = {
151 .frame_send = fcoe_xmit,
152 .ddp_setup = fcoe_ddp_setup,
153 .ddp_done = fcoe_ddp_done,
154 .elsct_send = fcoe_elsct_send,
155 .get_lesb = fcoe_get_lesb,
156};
94 157
95struct fc_function_template fcoe_transport_function = { 158struct fc_function_template fcoe_transport_function = {
96 .show_host_node_name = 1, 159 .show_host_node_name = 1,
@@ -123,6 +186,48 @@ struct fc_function_template fcoe_transport_function = {
123 .issue_fc_host_lip = fcoe_reset, 186 .issue_fc_host_lip = fcoe_reset,
124 187
125 .terminate_rport_io = fc_rport_terminate_io, 188 .terminate_rport_io = fc_rport_terminate_io,
189
190 .vport_create = fcoe_vport_create,
191 .vport_delete = fcoe_vport_destroy,
192 .vport_disable = fcoe_vport_disable,
193 .set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
194
195 .bsg_request = fc_lport_bsg_request,
196};
197
198struct fc_function_template fcoe_vport_transport_function = {
199 .show_host_node_name = 1,
200 .show_host_port_name = 1,
201 .show_host_supported_classes = 1,
202 .show_host_supported_fc4s = 1,
203 .show_host_active_fc4s = 1,
204 .show_host_maxframe_size = 1,
205
206 .show_host_port_id = 1,
207 .show_host_supported_speeds = 1,
208 .get_host_speed = fc_get_host_speed,
209 .show_host_speed = 1,
210 .show_host_port_type = 1,
211 .get_host_port_state = fc_get_host_port_state,
212 .show_host_port_state = 1,
213 .show_host_symbolic_name = 1,
214
215 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
216 .show_rport_maxframe_size = 1,
217 .show_rport_supported_classes = 1,
218
219 .show_host_fabric_name = 1,
220 .show_starget_node_name = 1,
221 .show_starget_port_name = 1,
222 .show_starget_port_id = 1,
223 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
224 .show_rport_dev_loss_tmo = 1,
225 .get_fc_host_stats = fc_get_host_stats,
226 .issue_fc_host_lip = fcoe_reset,
227
228 .terminate_rport_io = fc_rport_terminate_io,
229
230 .bsg_request = fc_lport_bsg_request,
126}; 231};
127 232
128static struct scsi_host_template fcoe_shost_template = { 233static struct scsi_host_template fcoe_shost_template = {
@@ -137,20 +242,17 @@ static struct scsi_host_template fcoe_shost_template = {
137 .change_queue_depth = fc_change_queue_depth, 242 .change_queue_depth = fc_change_queue_depth,
138 .change_queue_type = fc_change_queue_type, 243 .change_queue_type = fc_change_queue_type,
139 .this_id = -1, 244 .this_id = -1,
140 .cmd_per_lun = 32, 245 .cmd_per_lun = 3,
141 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, 246 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
142 .use_clustering = ENABLE_CLUSTERING, 247 .use_clustering = ENABLE_CLUSTERING,
143 .sg_tablesize = SG_ALL, 248 .sg_tablesize = SG_ALL,
144 .max_sectors = 0xffff, 249 .max_sectors = 0xffff,
145}; 250};
146 251
147static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
148 struct packet_type *ptype,
149 struct net_device *orig_dev);
150/** 252/**
151 * fcoe_interface_setup() 253 * fcoe_interface_setup() - Setup a FCoE interface
152 * @fcoe: new fcoe_interface 254 * @fcoe: The new FCoE interface
153 * @netdev : ptr to the associated netdevice struct 255 * @netdev: The net device that the fcoe interface is on
154 * 256 *
155 * Returns : 0 for success 257 * Returns : 0 for success
156 * Locking: must be called with the RTNL mutex held 258 * Locking: must be called with the RTNL mutex held
@@ -160,23 +262,36 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
160{ 262{
161 struct fcoe_ctlr *fip = &fcoe->ctlr; 263 struct fcoe_ctlr *fip = &fcoe->ctlr;
162 struct netdev_hw_addr *ha; 264 struct netdev_hw_addr *ha;
265 struct net_device *real_dev;
163 u8 flogi_maddr[ETH_ALEN]; 266 u8 flogi_maddr[ETH_ALEN];
267 const struct net_device_ops *ops;
164 268
165 fcoe->netdev = netdev; 269 fcoe->netdev = netdev;
166 270
271 /* Let LLD initialize for FCoE */
272 ops = netdev->netdev_ops;
273 if (ops->ndo_fcoe_enable) {
274 if (ops->ndo_fcoe_enable(netdev))
275 FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
276 " specific feature for LLD.\n");
277 }
278
167 /* Do not support for bonding device */ 279 /* Do not support for bonding device */
168 if ((netdev->priv_flags & IFF_MASTER_ALB) || 280 if ((netdev->priv_flags & IFF_MASTER_ALB) ||
169 (netdev->priv_flags & IFF_SLAVE_INACTIVE) || 281 (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
170 (netdev->priv_flags & IFF_MASTER_8023AD)) { 282 (netdev->priv_flags & IFF_MASTER_8023AD)) {
283 FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
171 return -EOPNOTSUPP; 284 return -EOPNOTSUPP;
172 } 285 }
173 286
174 /* look for SAN MAC address, if multiple SAN MACs exist, only 287 /* look for SAN MAC address, if multiple SAN MACs exist, only
175 * use the first one for SPMA */ 288 * use the first one for SPMA */
289 real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
290 vlan_dev_real_dev(netdev) : netdev;
176 rcu_read_lock(); 291 rcu_read_lock();
177 for_each_dev_addr(netdev, ha) { 292 for_each_dev_addr(real_dev, ha) {
178 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 293 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
179 (is_valid_ether_addr(fip->ctl_src_addr))) { 294 (is_valid_ether_addr(ha->addr))) {
180 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); 295 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
181 fip->spma = 1; 296 fip->spma = 1;
182 break; 297 break;
@@ -216,19 +331,16 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
216 return 0; 331 return 0;
217} 332}
218 333
219static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
220static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new);
221static void fcoe_destroy_work(struct work_struct *work);
222
223/** 334/**
224 * fcoe_interface_create() 335 * fcoe_interface_create() - Create a FCoE interface on a net device
225 * @netdev: network interface 336 * @netdev: The net device to create the FCoE interface on
226 * 337 *
227 * Returns: pointer to a struct fcoe_interface or NULL on error 338 * Returns: pointer to a struct fcoe_interface or NULL on error
228 */ 339 */
229static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) 340static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
230{ 341{
231 struct fcoe_interface *fcoe; 342 struct fcoe_interface *fcoe;
343 int err;
232 344
233 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); 345 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
234 if (!fcoe) { 346 if (!fcoe) {
@@ -245,15 +357,22 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
245 fcoe_ctlr_init(&fcoe->ctlr); 357 fcoe_ctlr_init(&fcoe->ctlr);
246 fcoe->ctlr.send = fcoe_fip_send; 358 fcoe->ctlr.send = fcoe_fip_send;
247 fcoe->ctlr.update_mac = fcoe_update_src_mac; 359 fcoe->ctlr.update_mac = fcoe_update_src_mac;
360 fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
248 361
249 fcoe_interface_setup(fcoe, netdev); 362 err = fcoe_interface_setup(fcoe, netdev);
363 if (err) {
364 fcoe_ctlr_destroy(&fcoe->ctlr);
365 kfree(fcoe);
366 dev_put(netdev);
367 return NULL;
368 }
250 369
251 return fcoe; 370 return fcoe;
252} 371}
253 372
254/** 373/**
255 * fcoe_interface_cleanup() - clean up netdev configurations 374 * fcoe_interface_cleanup() - Clean up a FCoE interface
256 * @fcoe: 375 * @fcoe: The FCoE interface to be cleaned up
257 * 376 *
258 * Caller must be holding the RTNL mutex 377 * Caller must be holding the RTNL mutex
259 */ 378 */
@@ -262,6 +381,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
262 struct net_device *netdev = fcoe->netdev; 381 struct net_device *netdev = fcoe->netdev;
263 struct fcoe_ctlr *fip = &fcoe->ctlr; 382 struct fcoe_ctlr *fip = &fcoe->ctlr;
264 u8 flogi_maddr[ETH_ALEN]; 383 u8 flogi_maddr[ETH_ALEN];
384 const struct net_device_ops *ops;
265 385
266 /* 386 /*
267 * Don't listen for Ethernet packets anymore. 387 * Don't listen for Ethernet packets anymore.
@@ -276,16 +396,22 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
276 /* Delete secondary MAC addresses */ 396 /* Delete secondary MAC addresses */
277 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 397 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
278 dev_unicast_delete(netdev, flogi_maddr); 398 dev_unicast_delete(netdev, flogi_maddr);
279 if (!is_zero_ether_addr(fip->data_src_addr))
280 dev_unicast_delete(netdev, fip->data_src_addr);
281 if (fip->spma) 399 if (fip->spma)
282 dev_unicast_delete(netdev, fip->ctl_src_addr); 400 dev_unicast_delete(netdev, fip->ctl_src_addr);
283 dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); 401 dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
402
403 /* Tell the LLD we are done w/ FCoE */
404 ops = netdev->netdev_ops;
405 if (ops->ndo_fcoe_disable) {
406 if (ops->ndo_fcoe_disable(netdev))
407 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
408 " specific feature for LLD.\n");
409 }
284} 410}
285 411
286/** 412/**
287 * fcoe_interface_release() - fcoe_port kref release function 413 * fcoe_interface_release() - fcoe_port kref release function
288 * @kref: embedded reference count in an fcoe_interface struct 414 * @kref: Embedded reference count in an fcoe_interface struct
289 */ 415 */
290static void fcoe_interface_release(struct kref *kref) 416static void fcoe_interface_release(struct kref *kref)
291{ 417{
@@ -301,8 +427,8 @@ static void fcoe_interface_release(struct kref *kref)
301} 427}
302 428
303/** 429/**
304 * fcoe_interface_get() 430 * fcoe_interface_get() - Get a reference to a FCoE interface
305 * @fcoe: 431 * @fcoe: The FCoE interface to be held
306 */ 432 */
307static inline void fcoe_interface_get(struct fcoe_interface *fcoe) 433static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
308{ 434{
@@ -310,8 +436,8 @@ static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
310} 436}
311 437
312/** 438/**
313 * fcoe_interface_put() 439 * fcoe_interface_put() - Put a reference to a FCoE interface
314 * @fcoe: 440 * @fcoe: The FCoE interface to be released
315 */ 441 */
316static inline void fcoe_interface_put(struct fcoe_interface *fcoe) 442static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
317{ 443{
@@ -319,15 +445,16 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
319} 445}
320 446
321/** 447/**
322 * fcoe_fip_recv - handle a received FIP frame. 448 * fcoe_fip_recv() - Handler for received FIP frames
323 * @skb: the receive skb 449 * @skb: The receive skb
324 * @dev: associated &net_device 450 * @netdev: The associated net device
325 * @ptype: the &packet_type structure which was used to register this handler. 451 * @ptype: The packet_type structure which was used to register this handler
326 * @orig_dev: original receive &net_device, in case @dev is a bond. 452 * @orig_dev: The original net_device the the skb was received on.
453 * (in case dev is a bond)
327 * 454 *
328 * Returns: 0 for success 455 * Returns: 0 for success
329 */ 456 */
330static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, 457static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
331 struct packet_type *ptype, 458 struct packet_type *ptype,
332 struct net_device *orig_dev) 459 struct net_device *orig_dev)
333{ 460{
@@ -339,9 +466,9 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
339} 466}
340 467
341/** 468/**
342 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame. 469 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
343 * @fip: FCoE controller. 470 * @fip: The FCoE controller
344 * @skb: FIP Packet. 471 * @skb: The FIP packet to be sent
345 */ 472 */
346static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 473static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
347{ 474{
@@ -350,88 +477,118 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
350} 477}
351 478
352/** 479/**
353 * fcoe_update_src_mac() - Update Ethernet MAC filters. 480 * fcoe_update_src_mac() - Update the Ethernet MAC filters
354 * @fip: FCoE controller. 481 * @lport: The local port to update the source MAC on
355 * @old: Unicast MAC address to delete if the MAC is non-zero. 482 * @addr: Unicast MAC address to add
356 * @new: Unicast MAC address to add.
357 * 483 *
358 * Remove any previously-set unicast MAC filter. 484 * Remove any previously-set unicast MAC filter.
359 * Add secondary FCoE MAC address filter for our OUI. 485 * Add secondary FCoE MAC address filter for our OUI.
360 */ 486 */
361static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new) 487static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
362{ 488{
363 struct fcoe_interface *fcoe; 489 struct fcoe_port *port = lport_priv(lport);
490 struct fcoe_interface *fcoe = port->fcoe;
364 491
365 fcoe = fcoe_from_ctlr(fip);
366 rtnl_lock(); 492 rtnl_lock();
367 if (!is_zero_ether_addr(old)) 493 if (!is_zero_ether_addr(port->data_src_addr))
368 dev_unicast_delete(fcoe->netdev, old); 494 dev_unicast_delete(fcoe->netdev, port->data_src_addr);
369 dev_unicast_add(fcoe->netdev, new); 495 if (!is_zero_ether_addr(addr))
496 dev_unicast_add(fcoe->netdev, addr);
497 memcpy(port->data_src_addr, addr, ETH_ALEN);
370 rtnl_unlock(); 498 rtnl_unlock();
371} 499}
372 500
373/** 501/**
374 * fcoe_lport_config() - sets up the fc_lport 502 * fcoe_get_src_mac() - return the Ethernet source address for an lport
375 * @lp: ptr to the fc_lport 503 * @lport: libfc lport
504 */
505static u8 *fcoe_get_src_mac(struct fc_lport *lport)
506{
507 struct fcoe_port *port = lport_priv(lport);
508
509 return port->data_src_addr;
510}
511
512/**
513 * fcoe_lport_config() - Set up a local port
514 * @lport: The local port to be setup
376 * 515 *
377 * Returns: 0 for success 516 * Returns: 0 for success
378 */ 517 */
379static int fcoe_lport_config(struct fc_lport *lp) 518static int fcoe_lport_config(struct fc_lport *lport)
380{ 519{
381 lp->link_up = 0; 520 lport->link_up = 0;
382 lp->qfull = 0; 521 lport->qfull = 0;
383 lp->max_retry_count = 3; 522 lport->max_retry_count = 3;
384 lp->max_rport_retry_count = 3; 523 lport->max_rport_retry_count = 3;
385 lp->e_d_tov = 2 * 1000; /* FC-FS default */ 524 lport->e_d_tov = 2 * 1000; /* FC-FS default */
386 lp->r_a_tov = 2 * 2 * 1000; 525 lport->r_a_tov = 2 * 2 * 1000;
387 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 526 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
388 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 527 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
389 528 lport->does_npiv = 1;
390 fc_lport_init_stats(lp); 529
530 fc_lport_init_stats(lport);
391 531
392 /* lport fc_lport related configuration */ 532 /* lport fc_lport related configuration */
393 fc_lport_config(lp); 533 fc_lport_config(lport);
394 534
395 /* offload related configuration */ 535 /* offload related configuration */
396 lp->crc_offload = 0; 536 lport->crc_offload = 0;
397 lp->seq_offload = 0; 537 lport->seq_offload = 0;
398 lp->lro_enabled = 0; 538 lport->lro_enabled = 0;
399 lp->lro_xid = 0; 539 lport->lro_xid = 0;
400 lp->lso_max = 0; 540 lport->lso_max = 0;
401 541
402 return 0; 542 return 0;
403} 543}
404 544
405/** 545/**
406 * fcoe_queue_timer() - fcoe queue timer 546 * fcoe_queue_timer() - The fcoe queue timer
407 * @lp: the fc_lport pointer 547 * @lport: The local port
408 * 548 *
409 * Calls fcoe_check_wait_queue on timeout 549 * Calls fcoe_check_wait_queue on timeout
550 */
551static void fcoe_queue_timer(ulong lport)
552{
553 fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
554}
555
556/**
557 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
558 * @netdev: the associated net device
559 * @wwn: the output WWN
560 * @type: the type of WWN (WWPN or WWNN)
410 * 561 *
562 * Returns: 0 for success
411 */ 563 */
412static void fcoe_queue_timer(ulong lp) 564static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
413{ 565{
414 fcoe_check_wait_queue((struct fc_lport *)lp, NULL); 566 const struct net_device_ops *ops = netdev->netdev_ops;
567
568 if (ops->ndo_fcoe_get_wwn)
569 return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
570 return -EINVAL;
415} 571}
416 572
417/** 573/**
418 * fcoe_netdev_config() - Set up netdev for SW FCoE 574 * fcoe_netdev_config() - Set up net devive for SW FCoE
419 * @lp : ptr to the fc_lport 575 * @lport: The local port that is associated with the net device
420 * @netdev : ptr to the associated netdevice struct 576 * @netdev: The associated net device
421 * 577 *
422 * Must be called after fcoe_lport_config() as it will use lport mutex 578 * Must be called after fcoe_lport_config() as it will use local port mutex
423 * 579 *
424 * Returns : 0 for success 580 * Returns: 0 for success
425 */ 581 */
426static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) 582static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
427{ 583{
428 u32 mfs; 584 u32 mfs;
429 u64 wwnn, wwpn; 585 u64 wwnn, wwpn;
430 struct fcoe_interface *fcoe; 586 struct fcoe_interface *fcoe;
431 struct fcoe_port *port; 587 struct fcoe_port *port;
588 int vid = 0;
432 589
433 /* Setup lport private data to point to fcoe softc */ 590 /* Setup lport private data to point to fcoe softc */
434 port = lport_priv(lp); 591 port = lport_priv(lport);
435 fcoe = port->fcoe; 592 fcoe = port->fcoe;
436 593
437 /* 594 /*
@@ -439,86 +596,116 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
439 * user-configured limit. If the MFS is too low, fcoe_link_ok() 596 * user-configured limit. If the MFS is too low, fcoe_link_ok()
440 * will return 0, so do this first. 597 * will return 0, so do this first.
441 */ 598 */
442 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + 599 mfs = netdev->mtu;
443 sizeof(struct fcoe_crc_eof)); 600 if (netdev->features & NETIF_F_FCOE_MTU) {
444 if (fc_set_mfs(lp, mfs)) 601 mfs = FCOE_MTU;
602 FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
603 }
604 mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
605 if (fc_set_mfs(lport, mfs))
445 return -EINVAL; 606 return -EINVAL;
446 607
447 /* offload features support */ 608 /* offload features support */
448 if (netdev->features & NETIF_F_SG) 609 if (netdev->features & NETIF_F_SG)
449 lp->sg_supp = 1; 610 lport->sg_supp = 1;
450 611
451 if (netdev->features & NETIF_F_FCOE_CRC) { 612 if (netdev->features & NETIF_F_FCOE_CRC) {
452 lp->crc_offload = 1; 613 lport->crc_offload = 1;
453 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); 614 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
454 } 615 }
455 if (netdev->features & NETIF_F_FSO) { 616 if (netdev->features & NETIF_F_FSO) {
456 lp->seq_offload = 1; 617 lport->seq_offload = 1;
457 lp->lso_max = netdev->gso_max_size; 618 lport->lso_max = netdev->gso_max_size;
458 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", 619 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
459 lp->lso_max); 620 lport->lso_max);
460 } 621 }
461 if (netdev->fcoe_ddp_xid) { 622 if (netdev->fcoe_ddp_xid) {
462 lp->lro_enabled = 1; 623 lport->lro_enabled = 1;
463 lp->lro_xid = netdev->fcoe_ddp_xid; 624 lport->lro_xid = netdev->fcoe_ddp_xid;
464 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", 625 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
465 lp->lro_xid); 626 lport->lro_xid);
466 } 627 }
467 skb_queue_head_init(&port->fcoe_pending_queue); 628 skb_queue_head_init(&port->fcoe_pending_queue);
468 port->fcoe_pending_queue_active = 0; 629 port->fcoe_pending_queue_active = 0;
469 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp); 630 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
470 631
471 wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); 632 if (!lport->vport) {
472 fc_set_wwnn(lp, wwnn); 633 /*
473 /* XXX - 3rd arg needs to be vlan id */ 634 * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN:
474 wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0); 635 * For WWNN, we use NAA 1 w/ bit 27-16 of word 0 as 0.
475 fc_set_wwpn(lp, wwpn); 636 * For WWPN, we use NAA 2 w/ bit 27-16 of word 0 from VLAN ID
637 */
638 if (netdev->priv_flags & IFF_802_1Q_VLAN)
639 vid = vlan_dev_vlan_id(netdev);
640
641 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
642 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
643 fc_set_wwnn(lport, wwnn);
644 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
645 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
646 2, vid);
647 fc_set_wwpn(lport, wwpn);
648 }
476 649
477 return 0; 650 return 0;
478} 651}
479 652
480/** 653/**
481 * fcoe_shost_config() - Sets up fc_lport->host 654 * fcoe_shost_config() - Set up the SCSI host associated with a local port
482 * @lp : ptr to the fc_lport 655 * @lport: The local port
483 * @shost : ptr to the associated scsi host 656 * @shost: The SCSI host to associate with the local port
484 * @dev : device associated to scsi host 657 * @dev: The device associated with the SCSI host
485 * 658 *
486 * Must be called after fcoe_lport_config() and fcoe_netdev_config() 659 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
487 * 660 *
488 * Returns : 0 for success 661 * Returns: 0 for success
489 */ 662 */
490static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, 663static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
491 struct device *dev) 664 struct device *dev)
492{ 665{
493 int rc = 0; 666 int rc = 0;
494 667
495 /* lport scsi host config */ 668 /* lport scsi host config */
496 lp->host = shost; 669 lport->host->max_lun = FCOE_MAX_LUN;
497 670 lport->host->max_id = FCOE_MAX_FCP_TARGET;
498 lp->host->max_lun = FCOE_MAX_LUN; 671 lport->host->max_channel = 0;
499 lp->host->max_id = FCOE_MAX_FCP_TARGET; 672 if (lport->vport)
500 lp->host->max_channel = 0; 673 lport->host->transportt = fcoe_vport_transport_template;
501 lp->host->transportt = scsi_transport_fcoe_sw; 674 else
675 lport->host->transportt = fcoe_transport_template;
502 676
503 /* add the new host to the SCSI-ml */ 677 /* add the new host to the SCSI-ml */
504 rc = scsi_add_host(lp->host, dev); 678 rc = scsi_add_host(lport->host, dev);
505 if (rc) { 679 if (rc) {
506 FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: " 680 FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
507 "error on scsi_add_host\n"); 681 "error on scsi_add_host\n");
508 return rc; 682 return rc;
509 } 683 }
510 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", 684
511 FCOE_NAME, FCOE_VERSION, 685 if (!lport->vport)
512 fcoe_netdev(lp)->name); 686 fc_host_max_npiv_vports(lport->host) = USHORT_MAX;
687
688 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
689 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
690 fcoe_netdev(lport)->name);
513 691
514 return 0; 692 return 0;
515} 693}
516 694
517/* 695/**
518 * fcoe_oem_match() - match for read types IO 696 * fcoe_oem_match() - The match routine for the offloaded exchange manager
519 * @fp: the fc_frame for new IO. 697 * @fp: The I/O frame
698 *
699 * This routine will be associated with an exchange manager (EM). When
700 * the libfc exchange handling code is looking for an EM to use it will
701 * call this routine and pass it the frame that it wishes to send. This
702 * routine will return True if the associated EM is to be used and False
703 * if the echange code should continue looking for an EM.
704 *
705 * The offload EM that this routine is associated with will handle any
706 * packets that are for SCSI read requests.
520 * 707 *
521 * Returns : true for read types IO, otherwise returns false. 708 * Returns: True for read types I/O, otherwise returns false.
522 */ 709 */
523bool fcoe_oem_match(struct fc_frame *fp) 710bool fcoe_oem_match(struct fc_frame *fp)
524{ 711{
@@ -527,14 +714,14 @@ bool fcoe_oem_match(struct fc_frame *fp)
527} 714}
528 715
529/** 716/**
530 * fcoe_em_config() - allocates em for this lport 717 * fcoe_em_config() - Allocate and configure an exchange manager
531 * @lp: the fcoe that em is to allocated for 718 * @lport: The local port that the new EM will be associated with
532 * 719 *
533 * Returns : 0 on success 720 * Returns: 0 on success
534 */ 721 */
535static inline int fcoe_em_config(struct fc_lport *lp) 722static inline int fcoe_em_config(struct fc_lport *lport)
536{ 723{
537 struct fcoe_port *port = lport_priv(lp); 724 struct fcoe_port *port = lport_priv(lport);
538 struct fcoe_interface *fcoe = port->fcoe; 725 struct fcoe_interface *fcoe = port->fcoe;
539 struct fcoe_interface *oldfcoe = NULL; 726 struct fcoe_interface *oldfcoe = NULL;
540 struct net_device *old_real_dev, *cur_real_dev; 727 struct net_device *old_real_dev, *cur_real_dev;
@@ -545,8 +732,9 @@ static inline int fcoe_em_config(struct fc_lport *lp)
545 * Check if need to allocate an em instance for 732 * Check if need to allocate an em instance for
546 * offload exchange ids to be shared across all VN_PORTs/lport. 733 * offload exchange ids to be shared across all VN_PORTs/lport.
547 */ 734 */
548 if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) { 735 if (!lport->lro_enabled || !lport->lro_xid ||
549 lp->lro_xid = 0; 736 (lport->lro_xid >= max_xid)) {
737 lport->lro_xid = 0;
550 goto skip_oem; 738 goto skip_oem;
551 } 739 }
552 740
@@ -572,16 +760,16 @@ static inline int fcoe_em_config(struct fc_lport *lp)
572 } 760 }
573 761
574 if (fcoe->oem) { 762 if (fcoe->oem) {
575 if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) { 763 if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
576 printk(KERN_ERR "fcoe_em_config: failed to add " 764 printk(KERN_ERR "fcoe_em_config: failed to add "
577 "offload em:%p on interface:%s\n", 765 "offload em:%p on interface:%s\n",
578 fcoe->oem, fcoe->netdev->name); 766 fcoe->oem, fcoe->netdev->name);
579 return -ENOMEM; 767 return -ENOMEM;
580 } 768 }
581 } else { 769 } else {
582 fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3, 770 fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
583 FCOE_MIN_XID, lp->lro_xid, 771 FCOE_MIN_XID, lport->lro_xid,
584 fcoe_oem_match); 772 fcoe_oem_match);
585 if (!fcoe->oem) { 773 if (!fcoe->oem) {
586 printk(KERN_ERR "fcoe_em_config: failed to allocate " 774 printk(KERN_ERR "fcoe_em_config: failed to allocate "
587 "em for offload exches on interface:%s\n", 775 "em for offload exches on interface:%s\n",
@@ -593,10 +781,10 @@ static inline int fcoe_em_config(struct fc_lport *lp)
593 /* 781 /*
594 * Exclude offload EM xid range from next EM xid range. 782 * Exclude offload EM xid range from next EM xid range.
595 */ 783 */
596 min_xid += lp->lro_xid + 1; 784 min_xid += lport->lro_xid + 1;
597 785
598skip_oem: 786skip_oem:
599 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) { 787 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
600 printk(KERN_ERR "fcoe_em_config: failed to " 788 printk(KERN_ERR "fcoe_em_config: failed to "
601 "allocate em on interface %s\n", fcoe->netdev->name); 789 "allocate em on interface %s\n", fcoe->netdev->name);
602 return -ENOMEM; 790 return -ENOMEM;
@@ -606,8 +794,8 @@ skip_oem:
606} 794}
607 795
608/** 796/**
609 * fcoe_if_destroy() - FCoE software HBA tear-down function 797 * fcoe_if_destroy() - Tear down a SW FCoE instance
610 * @lport: fc_lport to destroy 798 * @lport: The local port to be destroyed
611 */ 799 */
612static void fcoe_if_destroy(struct fc_lport *lport) 800static void fcoe_if_destroy(struct fc_lport *lport)
613{ 801{
@@ -630,6 +818,11 @@ static void fcoe_if_destroy(struct fc_lport *lport)
630 /* Free existing transmit skbs */ 818 /* Free existing transmit skbs */
631 fcoe_clean_pending_queue(lport); 819 fcoe_clean_pending_queue(lport);
632 820
821 rtnl_lock();
822 if (!is_zero_ether_addr(port->data_src_addr))
823 dev_unicast_delete(netdev, port->data_src_addr);
824 rtnl_unlock();
825
633 /* receives may not be stopped until after this */ 826 /* receives may not be stopped until after this */
634 fcoe_interface_put(fcoe); 827 fcoe_interface_put(fcoe);
635 828
@@ -650,82 +843,89 @@ static void fcoe_if_destroy(struct fc_lport *lport)
650 scsi_host_put(lport->host); 843 scsi_host_put(lport->host);
651} 844}
652 845
653/* 846/**
654 * fcoe_ddp_setup - calls LLD's ddp_setup through net_device 847 * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
655 * @lp: the corresponding fc_lport 848 * @lport: The local port to setup DDP for
656 * @xid: the exchange id for this ddp transfer 849 * @xid: The exchange ID for this DDP transfer
657 * @sgl: the scatterlist describing this transfer 850 * @sgl: The scatterlist describing this transfer
658 * @sgc: number of sg items 851 * @sgc: The number of sg items
659 * 852 *
660 * Returns : 0 no ddp 853 * Returns: 0 if the DDP context was not configured
661 */ 854 */
662static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid, 855static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
663 struct scatterlist *sgl, unsigned int sgc) 856 struct scatterlist *sgl, unsigned int sgc)
664{ 857{
665 struct net_device *n = fcoe_netdev(lp); 858 struct net_device *netdev = fcoe_netdev(lport);
666 859
667 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup) 860 if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
668 return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc); 861 return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
862 xid, sgl,
863 sgc);
669 864
670 return 0; 865 return 0;
671} 866}
672 867
673/* 868/**
674 * fcoe_ddp_done - calls LLD's ddp_done through net_device 869 * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
675 * @lp: the corresponding fc_lport 870 * @lport: The local port to complete DDP on
676 * @xid: the exchange id for this ddp transfer 871 * @xid: The exchange ID for this DDP transfer
677 * 872 *
678 * Returns : the length of data that have been completed by ddp 873 * Returns: the length of data that have been completed by DDP
679 */ 874 */
680static int fcoe_ddp_done(struct fc_lport *lp, u16 xid) 875static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
681{ 876{
682 struct net_device *n = fcoe_netdev(lp); 877 struct net_device *netdev = fcoe_netdev(lport);
683 878
684 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done) 879 if (netdev->netdev_ops->ndo_fcoe_ddp_done)
685 return n->netdev_ops->ndo_fcoe_ddp_done(n, xid); 880 return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
686 return 0; 881 return 0;
687} 882}
688 883
689static struct libfc_function_template fcoe_libfc_fcn_templ = {
690 .frame_send = fcoe_xmit,
691 .ddp_setup = fcoe_ddp_setup,
692 .ddp_done = fcoe_ddp_done,
693};
694
695/** 884/**
696 * fcoe_if_create() - this function creates the fcoe port 885 * fcoe_if_create() - Create a FCoE instance on an interface
697 * @fcoe: fcoe_interface structure to create an fc_lport instance on 886 * @fcoe: The FCoE interface to create a local port on
698 * @parent: device pointer to be the parent in sysfs for the SCSI host 887 * @parent: The device pointer to be the parent in sysfs for the SCSI host
888 * @npiv: Indicates if the port is a vport or not
699 * 889 *
700 * Creates fc_lport struct and scsi_host for lport, configures lport. 890 * Creates a fc_lport instance and a Scsi_Host instance and configure them.
701 * 891 *
702 * Returns : The allocated fc_lport or an error pointer 892 * Returns: The allocated fc_lport or an error pointer
703 */ 893 */
704static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, 894static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
705 struct device *parent) 895 struct device *parent, int npiv)
706{ 896{
707 int rc; 897 struct net_device *netdev = fcoe->netdev;
708 struct fc_lport *lport = NULL; 898 struct fc_lport *lport = NULL;
709 struct fcoe_port *port; 899 struct fcoe_port *port;
710 struct Scsi_Host *shost; 900 struct Scsi_Host *shost;
711 struct net_device *netdev = fcoe->netdev; 901 int rc;
902 /*
903 * parent is only a vport if npiv is 1,
904 * but we'll only use vport in that case so go ahead and set it
905 */
906 struct fc_vport *vport = dev_to_vport(parent);
712 907
713 FCOE_NETDEV_DBG(netdev, "Create Interface\n"); 908 FCOE_NETDEV_DBG(netdev, "Create Interface\n");
714 909
715 shost = libfc_host_alloc(&fcoe_shost_template, 910 if (!npiv) {
716 sizeof(struct fcoe_port)); 911 lport = libfc_host_alloc(&fcoe_shost_template,
717 if (!shost) { 912 sizeof(struct fcoe_port));
913 } else {
914 lport = libfc_vport_create(vport,
915 sizeof(struct fcoe_port));
916 }
917 if (!lport) {
718 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); 918 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
719 rc = -ENOMEM; 919 rc = -ENOMEM;
720 goto out; 920 goto out;
721 } 921 }
722 lport = shost_priv(shost); 922 shost = lport->host;
723 port = lport_priv(lport); 923 port = lport_priv(lport);
724 port->lport = lport; 924 port->lport = lport;
725 port->fcoe = fcoe; 925 port->fcoe = fcoe;
726 INIT_WORK(&port->destroy_work, fcoe_destroy_work); 926 INIT_WORK(&port->destroy_work, fcoe_destroy_work);
727 927
728 /* configure fc_lport, e.g., em */ 928 /* configure a fc_lport including the exchange manager */
729 rc = fcoe_lport_config(lport); 929 rc = fcoe_lport_config(lport);
730 if (rc) { 930 if (rc) {
731 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " 931 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
@@ -733,6 +933,13 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
733 goto out_host_put; 933 goto out_host_put;
734 } 934 }
735 935
936 if (npiv) {
937 FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n",
938 vport->node_name, vport->port_name);
939 fc_set_wwnn(lport, vport->node_name);
940 fc_set_wwpn(lport, vport->port_name);
941 }
942
736 /* configure lport network properties */ 943 /* configure lport network properties */
737 rc = fcoe_netdev_config(lport, netdev); 944 rc = fcoe_netdev_config(lport, netdev);
738 if (rc) { 945 if (rc) {
@@ -757,21 +964,24 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
757 goto out_lp_destroy; 964 goto out_lp_destroy;
758 } 965 }
759 966
760 /* 967 if (!npiv) {
761 * fcoe_em_alloc() and fcoe_hostlist_add() both 968 /*
762 * need to be atomic with respect to other changes to the hostlist 969 * fcoe_em_alloc() and fcoe_hostlist_add() both
763 * since fcoe_em_alloc() looks for an existing EM 970 * need to be atomic with respect to other changes to the
764 * instance on host list updated by fcoe_hostlist_add(). 971 * hostlist since fcoe_em_alloc() looks for an existing EM
765 * 972 * instance on host list updated by fcoe_hostlist_add().
766 * This is currently handled through the fcoe_config_mutex begin held. 973 *
767 */ 974 * This is currently handled through the fcoe_config_mutex
975 * begin held.
976 */
768 977
769 /* lport exch manager allocation */ 978 /* lport exch manager allocation */
770 rc = fcoe_em_config(lport); 979 rc = fcoe_em_config(lport);
771 if (rc) { 980 if (rc) {
772 FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " 981 FCOE_NETDEV_DBG(netdev, "Could not configure the EM "
773 "interface\n"); 982 "for the interface\n");
774 goto out_lp_destroy; 983 goto out_lp_destroy;
984 }
775 } 985 }
776 986
777 fcoe_interface_get(fcoe); 987 fcoe_interface_get(fcoe);
@@ -786,17 +996,20 @@ out:
786} 996}
787 997
788/** 998/**
789 * fcoe_if_init() - attach to scsi transport 999 * fcoe_if_init() - Initialization routine for fcoe.ko
790 * 1000 *
791 * Returns : 0 on success 1001 * Attaches the SW FCoE transport to the FC transport
1002 *
1003 * Returns: 0 on success
792 */ 1004 */
793static int __init fcoe_if_init(void) 1005static int __init fcoe_if_init(void)
794{ 1006{
795 /* attach to scsi transport */ 1007 /* attach to scsi transport */
796 scsi_transport_fcoe_sw = 1008 fcoe_transport_template = fc_attach_transport(&fcoe_transport_function);
797 fc_attach_transport(&fcoe_transport_function); 1009 fcoe_vport_transport_template =
1010 fc_attach_transport(&fcoe_vport_transport_function);
798 1011
799 if (!scsi_transport_fcoe_sw) { 1012 if (!fcoe_transport_template) {
800 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); 1013 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
801 return -ENODEV; 1014 return -ENODEV;
802 } 1015 }
@@ -805,20 +1018,24 @@ static int __init fcoe_if_init(void)
805} 1018}
806 1019
807/** 1020/**
808 * fcoe_if_exit() - detach from scsi transport 1021 * fcoe_if_exit() - Tear down fcoe.ko
1022 *
1023 * Detaches the SW FCoE transport from the FC transport
809 * 1024 *
810 * Returns : 0 on success 1025 * Returns: 0 on success
811 */ 1026 */
812int __exit fcoe_if_exit(void) 1027int __exit fcoe_if_exit(void)
813{ 1028{
814 fc_release_transport(scsi_transport_fcoe_sw); 1029 fc_release_transport(fcoe_transport_template);
815 scsi_transport_fcoe_sw = NULL; 1030 fc_release_transport(fcoe_vport_transport_template);
1031 fcoe_transport_template = NULL;
1032 fcoe_vport_transport_template = NULL;
816 return 0; 1033 return 0;
817} 1034}
818 1035
819/** 1036/**
820 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu 1037 * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
821 * @cpu: cpu index for the online cpu 1038 * @cpu: The CPU index of the CPU to create a receive thread for
822 */ 1039 */
823static void fcoe_percpu_thread_create(unsigned int cpu) 1040static void fcoe_percpu_thread_create(unsigned int cpu)
824{ 1041{
@@ -841,8 +1058,8 @@ static void fcoe_percpu_thread_create(unsigned int cpu)
841} 1058}
842 1059
843/** 1060/**
844 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu 1061 * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
845 * @cpu: cpu index the rx thread is to be removed 1062 * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
846 * 1063 *
847 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the 1064 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
848 * current CPU's Rx thread. If the thread being destroyed is bound to 1065 * current CPU's Rx thread. If the thread being destroyed is bound to
@@ -890,7 +1107,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
890 } else { 1107 } else {
891 /* 1108 /*
892 * The targeted CPU is not initialized and cannot accept 1109 * The targeted CPU is not initialized and cannot accept
893 * new skbs. Unlock the targeted CPU and drop the skbs 1110 * new skbs. Unlock the targeted CPU and drop the skbs
894 * on the CPU that is going offline. 1111 * on the CPU that is going offline.
895 */ 1112 */
896 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1113 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
@@ -931,12 +1148,12 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
931} 1148}
932 1149
933/** 1150/**
934 * fcoe_cpu_callback() - fcoe cpu hotplug event callback 1151 * fcoe_cpu_callback() - Handler for CPU hotplug events
935 * @nfb: callback data block 1152 * @nfb: The callback data block
936 * @action: event triggering the callback 1153 * @action: The event triggering the callback
937 * @hcpu: index for the cpu of this event 1154 * @hcpu: The index of the CPU that the event is for
938 * 1155 *
939 * This creates or destroys per cpu data for fcoe 1156 * This creates or destroys per-CPU data for fcoe
940 * 1157 *
941 * Returns NOTIFY_OK always. 1158 * Returns NOTIFY_OK always.
942 */ 1159 */
@@ -962,25 +1179,22 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
962 return NOTIFY_OK; 1179 return NOTIFY_OK;
963} 1180}
964 1181
965static struct notifier_block fcoe_cpu_notifier = {
966 .notifier_call = fcoe_cpu_callback,
967};
968
969/** 1182/**
970 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ 1183 * fcoe_rcv() - Receive packets from a net device
971 * @skb: the receive skb 1184 * @skb: The received packet
972 * @dev: associated net device 1185 * @netdev: The net device that the packet was received on
973 * @ptype: context 1186 * @ptype: The packet type context
974 * @olddev: last device 1187 * @olddev: The last device net device
975 * 1188 *
976 * this function will receive the packet and build fc frame and pass it up 1189 * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
1190 * FC frame and passes the frame to libfc.
977 * 1191 *
978 * Returns: 0 for success 1192 * Returns: 0 for success
979 */ 1193 */
980int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, 1194int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
981 struct packet_type *ptype, struct net_device *olddev) 1195 struct packet_type *ptype, struct net_device *olddev)
982{ 1196{
983 struct fc_lport *lp; 1197 struct fc_lport *lport;
984 struct fcoe_rcv_info *fr; 1198 struct fcoe_rcv_info *fr;
985 struct fcoe_interface *fcoe; 1199 struct fcoe_interface *fcoe;
986 struct fc_frame_header *fh; 1200 struct fc_frame_header *fh;
@@ -988,15 +1202,15 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
988 unsigned int cpu; 1202 unsigned int cpu;
989 1203
990 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); 1204 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
991 lp = fcoe->ctlr.lp; 1205 lport = fcoe->ctlr.lp;
992 if (unlikely(lp == NULL)) { 1206 if (unlikely(!lport)) {
993 FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); 1207 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
994 goto err2; 1208 goto err2;
995 } 1209 }
996 if (!lp->link_up) 1210 if (!lport->link_up)
997 goto err2; 1211 goto err2;
998 1212
999 FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p " 1213 FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
1000 "data:%p tail:%p end:%p sum:%d dev:%s", 1214 "data:%p tail:%p end:%p sum:%d dev:%s",
1001 skb->len, skb->data_len, skb->head, skb->data, 1215 skb->len, skb->data_len, skb->head, skb->data,
1002 skb_tail_pointer(skb), skb_end_pointer(skb), 1216 skb_tail_pointer(skb), skb_end_pointer(skb),
@@ -1004,7 +1218,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
1004 1218
1005 /* check for FCOE packet type */ 1219 /* check for FCOE packet type */
1006 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 1220 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
1007 FCOE_NETDEV_DBG(dev, "Wrong FC type frame"); 1221 FCOE_NETDEV_DBG(netdev, "Wrong FC type frame");
1008 goto err; 1222 goto err;
1009 } 1223 }
1010 1224
@@ -1013,14 +1227,14 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
1013 * and FC headers are pulled into the linear data area. 1227 * and FC headers are pulled into the linear data area.
1014 */ 1228 */
1015 if (unlikely((skb->len < FCOE_MIN_FRAME) || 1229 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1016 !pskb_may_pull(skb, FCOE_HEADER_LEN))) 1230 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1017 goto err; 1231 goto err;
1018 1232
1019 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 1233 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1020 fh = (struct fc_frame_header *) skb_transport_header(skb); 1234 fh = (struct fc_frame_header *) skb_transport_header(skb);
1021 1235
1022 fr = fcoe_dev_from_skb(skb); 1236 fr = fcoe_dev_from_skb(skb);
1023 fr->fr_dev = lp; 1237 fr->fr_dev = lport;
1024 fr->ptype = ptype; 1238 fr->ptype = ptype;
1025 1239
1026 /* 1240 /*
@@ -1042,12 +1256,12 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
1042 * the first CPU now. For non-SMP systems this 1256 * the first CPU now. For non-SMP systems this
1043 * will check the same CPU twice. 1257 * will check the same CPU twice.
1044 */ 1258 */
1045 FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread " 1259 FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
1046 "ready for incoming skb- using first online " 1260 "ready for incoming skb- using first online "
1047 "CPU.\n"); 1261 "CPU.\n");
1048 1262
1049 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1263 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1050 cpu = first_cpu(cpu_online_map); 1264 cpu = cpumask_first(cpu_online_mask);
1051 fps = &per_cpu(fcoe_percpu, cpu); 1265 fps = &per_cpu(fcoe_percpu, cpu);
1052 spin_lock_bh(&fps->fcoe_rx_list.lock); 1266 spin_lock_bh(&fps->fcoe_rx_list.lock);
1053 if (!fps->thread) { 1267 if (!fps->thread) {
@@ -1061,15 +1275,29 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
1061 * this skb. We also have this receive thread locked, 1275 * this skb. We also have this receive thread locked,
1062 * so we're free to queue skbs into it's queue. 1276 * so we're free to queue skbs into it's queue.
1063 */ 1277 */
1064 __skb_queue_tail(&fps->fcoe_rx_list, skb);
1065 if (fps->fcoe_rx_list.qlen == 1)
1066 wake_up_process(fps->thread);
1067 1278
1068 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1279 /* If this is a SCSI-FCP frame, and this is already executing on the
1280 * correct CPU, and the queue for this CPU is empty, then go ahead
1281 * and process the frame directly in the softirq context.
1282 * This lets us process completions without context switching from the
1283 * NET_RX softirq, to our receive processing thread, and then back to
1284 * BLOCK softirq context.
1285 */
1286 if (fh->fh_type == FC_TYPE_FCP &&
1287 cpu == smp_processor_id() &&
1288 skb_queue_empty(&fps->fcoe_rx_list)) {
1289 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1290 fcoe_recv_frame(skb);
1291 } else {
1292 __skb_queue_tail(&fps->fcoe_rx_list, skb);
1293 if (fps->fcoe_rx_list.qlen == 1)
1294 wake_up_process(fps->thread);
1295 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1296 }
1069 1297
1070 return 0; 1298 return 0;
1071err: 1299err:
1072 fc_lport_get_stats(lp)->ErrorFrames++; 1300 fc_lport_get_stats(lport)->ErrorFrames++;
1073 1301
1074err2: 1302err2:
1075 kfree_skb(skb); 1303 kfree_skb(skb);
@@ -1077,17 +1305,21 @@ err2:
1077} 1305}
1078 1306
1079/** 1307/**
1080 * fcoe_start_io() - pass to netdev to start xmit for fcoe 1308 * fcoe_start_io() - Start FCoE I/O
1081 * @skb: the skb to be xmitted 1309 * @skb: The packet to be transmitted
1310 *
1311 * This routine is called from the net device to start transmitting
1312 * FCoE packets.
1082 * 1313 *
1083 * Returns: 0 for success 1314 * Returns: 0 for success
1084 */ 1315 */
1085static inline int fcoe_start_io(struct sk_buff *skb) 1316static inline int fcoe_start_io(struct sk_buff *skb)
1086{ 1317{
1318 struct sk_buff *nskb;
1087 int rc; 1319 int rc;
1088 1320
1089 skb_get(skb); 1321 nskb = skb_clone(skb, GFP_ATOMIC);
1090 rc = dev_queue_xmit(skb); 1322 rc = dev_queue_xmit(nskb);
1091 if (rc != 0) 1323 if (rc != 0)
1092 return rc; 1324 return rc;
1093 kfree_skb(skb); 1325 kfree_skb(skb);
@@ -1095,9 +1327,15 @@ static inline int fcoe_start_io(struct sk_buff *skb)
1095} 1327}
1096 1328
1097/** 1329/**
1098 * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof 1330 * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
1099 * @skb: the skb to be xmitted 1331 * @skb: The packet to be transmitted
1100 * @tlen: total len 1332 * @tlen: The total length of the trailer
1333 *
1334 * This routine allocates a page for frame trailers. The page is re-used if
1335 * there is enough room left on it for the current trailer. If there isn't
1336 * enough buffer left a new page is allocated for the trailer. Reference to
1337 * the page from this function as well as the skbs using the page fragments
1338 * ensure that the page is freed at the appropriate time.
1101 * 1339 *
1102 * Returns: 0 for success 1340 * Returns: 0 for success
1103 */ 1341 */
@@ -1136,11 +1374,12 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1136} 1374}
1137 1375
1138/** 1376/**
1139 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb 1377 * fcoe_fc_crc() - Calculates the CRC for a given frame
1140 * @fp: the fc_frame containing data to be checksummed 1378 * @fp: The frame to be checksumed
1141 * 1379 *
1142 * This uses crc32() to calculate the crc for port frame 1380 * This uses crc32() routine to calculate the CRC for a frame
1143 * Return : 32 bit crc 1381 *
1382 * Return: The 32 bit CRC value
1144 */ 1383 */
1145u32 fcoe_fc_crc(struct fc_frame *fp) 1384u32 fcoe_fc_crc(struct fc_frame *fp)
1146{ 1385{
@@ -1171,13 +1410,13 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
1171} 1410}
1172 1411
1173/** 1412/**
1174 * fcoe_xmit() - FCoE frame transmit function 1413 * fcoe_xmit() - Transmit a FCoE frame
1175 * @lp: the associated local fcoe 1414 * @lport: The local port that the frame is to be transmitted for
1176 * @fp: the fc_frame to be transmitted 1415 * @fp: The frame to be transmitted
1177 * 1416 *
1178 * Return : 0 for success 1417 * Return: 0 for success
1179 */ 1418 */
1180int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) 1419int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1181{ 1420{
1182 int wlen; 1421 int wlen;
1183 u32 crc; 1422 u32 crc;
@@ -1189,7 +1428,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1189 unsigned int hlen; /* header length implies the version */ 1428 unsigned int hlen; /* header length implies the version */
1190 unsigned int tlen; /* trailer length */ 1429 unsigned int tlen; /* trailer length */
1191 unsigned int elen; /* eth header, may include vlan */ 1430 unsigned int elen; /* eth header, may include vlan */
1192 struct fcoe_port *port = lport_priv(lp); 1431 struct fcoe_port *port = lport_priv(lport);
1193 struct fcoe_interface *fcoe = port->fcoe; 1432 struct fcoe_interface *fcoe = port->fcoe;
1194 u8 sof, eof; 1433 u8 sof, eof;
1195 struct fcoe_hdr *hp; 1434 struct fcoe_hdr *hp;
@@ -1200,13 +1439,13 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1200 skb = fp_skb(fp); 1439 skb = fp_skb(fp);
1201 wlen = skb->len / FCOE_WORD_TO_BYTE; 1440 wlen = skb->len / FCOE_WORD_TO_BYTE;
1202 1441
1203 if (!lp->link_up) { 1442 if (!lport->link_up) {
1204 kfree_skb(skb); 1443 kfree_skb(skb);
1205 return 0; 1444 return 0;
1206 } 1445 }
1207 1446
1208 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && 1447 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1209 fcoe_ctlr_els_send(&fcoe->ctlr, skb)) 1448 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
1210 return 0; 1449 return 0;
1211 1450
1212 sof = fr_sof(fp); 1451 sof = fr_sof(fp);
@@ -1218,7 +1457,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1218 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1457 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1219 1458
1220 /* crc offload */ 1459 /* crc offload */
1221 if (likely(lp->crc_offload)) { 1460 if (likely(lport->crc_offload)) {
1222 skb->ip_summed = CHECKSUM_PARTIAL; 1461 skb->ip_summed = CHECKSUM_PARTIAL;
1223 skb->csum_start = skb_headroom(skb); 1462 skb->csum_start = skb_headroom(skb);
1224 skb->csum_offset = skb->len; 1463 skb->csum_offset = skb->len;
@@ -1271,7 +1510,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1271 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 1510 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1272 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); 1511 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1273 else 1512 else
1274 memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN); 1513 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1275 1514
1276 hp = (struct fcoe_hdr *)(eh + 1); 1515 hp = (struct fcoe_hdr *)(eh + 1);
1277 memset(hp, 0, sizeof(*hp)); 1516 memset(hp, 0, sizeof(*hp));
@@ -1280,7 +1519,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1280 hp->fcoe_sof = sof; 1519 hp->fcoe_sof = sof;
1281 1520
1282 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ 1521 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1283 if (lp->seq_offload && fr_max_payload(fp)) { 1522 if (lport->seq_offload && fr_max_payload(fp)) {
1284 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; 1523 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1285 skb_shinfo(skb)->gso_size = fr_max_payload(fp); 1524 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1286 } else { 1525 } else {
@@ -1288,23 +1527,23 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1288 skb_shinfo(skb)->gso_size = 0; 1527 skb_shinfo(skb)->gso_size = 0;
1289 } 1528 }
1290 /* update tx stats: regardless if LLD fails */ 1529 /* update tx stats: regardless if LLD fails */
1291 stats = fc_lport_get_stats(lp); 1530 stats = fc_lport_get_stats(lport);
1292 stats->TxFrames++; 1531 stats->TxFrames++;
1293 stats->TxWords += wlen; 1532 stats->TxWords += wlen;
1294 1533
1295 /* send down to lld */ 1534 /* send down to lld */
1296 fr_dev(fp) = lp; 1535 fr_dev(fp) = lport;
1297 if (port->fcoe_pending_queue.qlen) 1536 if (port->fcoe_pending_queue.qlen)
1298 fcoe_check_wait_queue(lp, skb); 1537 fcoe_check_wait_queue(lport, skb);
1299 else if (fcoe_start_io(skb)) 1538 else if (fcoe_start_io(skb))
1300 fcoe_check_wait_queue(lp, skb); 1539 fcoe_check_wait_queue(lport, skb);
1301 1540
1302 return 0; 1541 return 0;
1303} 1542}
1304 1543
1305/** 1544/**
1306 * fcoe_percpu_flush_done() - Indicate percpu queue flush completion. 1545 * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
1307 * @skb: the skb being completed. 1546 * @skb: The completed skb (argument required by destructor)
1308 */ 1547 */
1309static void fcoe_percpu_flush_done(struct sk_buff *skb) 1548static void fcoe_percpu_flush_done(struct sk_buff *skb)
1310{ 1549{
@@ -1312,26 +1551,134 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
1312} 1551}
1313 1552
1314/** 1553/**
1315 * fcoe_percpu_receive_thread() - recv thread per cpu 1554 * fcoe_recv_frame() - process a single received frame
1316 * @arg: ptr to the fcoe per cpu struct 1555 * @skb: frame to process
1317 *
1318 * Return: 0 for success
1319 */ 1556 */
1320int fcoe_percpu_receive_thread(void *arg) 1557static void fcoe_recv_frame(struct sk_buff *skb)
1321{ 1558{
1322 struct fcoe_percpu_s *p = arg;
1323 u32 fr_len; 1559 u32 fr_len;
1324 struct fc_lport *lp; 1560 struct fc_lport *lport;
1325 struct fcoe_rcv_info *fr; 1561 struct fcoe_rcv_info *fr;
1326 struct fcoe_dev_stats *stats; 1562 struct fcoe_dev_stats *stats;
1327 struct fc_frame_header *fh; 1563 struct fc_frame_header *fh;
1328 struct sk_buff *skb;
1329 struct fcoe_crc_eof crc_eof; 1564 struct fcoe_crc_eof crc_eof;
1330 struct fc_frame *fp; 1565 struct fc_frame *fp;
1331 u8 *mac = NULL; 1566 u8 *mac = NULL;
1332 struct fcoe_port *port; 1567 struct fcoe_port *port;
1333 struct fcoe_hdr *hp; 1568 struct fcoe_hdr *hp;
1334 1569
1570 fr = fcoe_dev_from_skb(skb);
1571 lport = fr->fr_dev;
1572 if (unlikely(!lport)) {
1573 if (skb->destructor != fcoe_percpu_flush_done)
1574 FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1575 kfree_skb(skb);
1576 return;
1577 }
1578
1579 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1580 "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1581 skb->len, skb->data_len,
1582 skb->head, skb->data, skb_tail_pointer(skb),
1583 skb_end_pointer(skb), skb->csum,
1584 skb->dev ? skb->dev->name : "<NULL>");
1585
1586 /*
1587 * Save source MAC address before discarding header.
1588 */
1589 port = lport_priv(lport);
1590 if (skb_is_nonlinear(skb))
1591 skb_linearize(skb); /* not ideal */
1592 mac = eth_hdr(skb)->h_source;
1593
1594 /*
1595 * Frame length checks and setting up the header pointers
1596 * was done in fcoe_rcv already.
1597 */
1598 hp = (struct fcoe_hdr *) skb_network_header(skb);
1599 fh = (struct fc_frame_header *) skb_transport_header(skb);
1600
1601 stats = fc_lport_get_stats(lport);
1602 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1603 if (stats->ErrorFrames < 5)
1604 printk(KERN_WARNING "fcoe: FCoE version "
1605 "mismatch: The frame has "
1606 "version %x, but the "
1607 "initiator supports version "
1608 "%x\n", FC_FCOE_DECAPS_VER(hp),
1609 FC_FCOE_VER);
1610 stats->ErrorFrames++;
1611 kfree_skb(skb);
1612 return;
1613 }
1614
1615 skb_pull(skb, sizeof(struct fcoe_hdr));
1616 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1617
1618 stats->RxFrames++;
1619 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1620
1621 fp = (struct fc_frame *)skb;
1622 fc_frame_init(fp);
1623 fr_dev(fp) = lport;
1624 fr_sof(fp) = hp->fcoe_sof;
1625
1626 /* Copy out the CRC and EOF trailer for access */
1627 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1628 kfree_skb(skb);
1629 return;
1630 }
1631 fr_eof(fp) = crc_eof.fcoe_eof;
1632 fr_crc(fp) = crc_eof.fcoe_crc32;
1633 if (pskb_trim(skb, fr_len)) {
1634 kfree_skb(skb);
1635 return;
1636 }
1637
1638 /*
1639 * We only check CRC if no offload is available and if it is
1640 * it's solicited data, in which case, the FCP layer would
1641 * check it during the copy.
1642 */
1643 if (lport->crc_offload &&
1644 skb->ip_summed == CHECKSUM_UNNECESSARY)
1645 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1646 else
1647 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1648
1649 fh = fc_frame_header_get(fp);
1650 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1651 fh->fh_type == FC_TYPE_FCP) {
1652 fc_exch_recv(lport, fp);
1653 return;
1654 }
1655 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1656 if (le32_to_cpu(fr_crc(fp)) !=
1657 ~crc32(~0, skb->data, fr_len)) {
1658 if (stats->InvalidCRCCount < 5)
1659 printk(KERN_WARNING "fcoe: dropping "
1660 "frame with CRC error\n");
1661 stats->InvalidCRCCount++;
1662 stats->ErrorFrames++;
1663 fc_frame_free(fp);
1664 return;
1665 }
1666 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1667 }
1668 fc_exch_recv(lport, fp);
1669}
1670
1671/**
1672 * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
1673 * @arg: The per-CPU context
1674 *
1675 * Return: 0 for success
1676 */
1677int fcoe_percpu_receive_thread(void *arg)
1678{
1679 struct fcoe_percpu_s *p = arg;
1680 struct sk_buff *skb;
1681
1335 set_user_nice(current, -20); 1682 set_user_nice(current, -20);
1336 1683
1337 while (!kthread_should_stop()) { 1684 while (!kthread_should_stop()) {
@@ -1347,129 +1694,27 @@ int fcoe_percpu_receive_thread(void *arg)
1347 spin_lock_bh(&p->fcoe_rx_list.lock); 1694 spin_lock_bh(&p->fcoe_rx_list.lock);
1348 } 1695 }
1349 spin_unlock_bh(&p->fcoe_rx_list.lock); 1696 spin_unlock_bh(&p->fcoe_rx_list.lock);
1350 fr = fcoe_dev_from_skb(skb); 1697 fcoe_recv_frame(skb);
1351 lp = fr->fr_dev;
1352 if (unlikely(lp == NULL)) {
1353 if (skb->destructor != fcoe_percpu_flush_done)
1354 FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1355 kfree_skb(skb);
1356 continue;
1357 }
1358
1359 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1360 "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1361 skb->len, skb->data_len,
1362 skb->head, skb->data, skb_tail_pointer(skb),
1363 skb_end_pointer(skb), skb->csum,
1364 skb->dev ? skb->dev->name : "<NULL>");
1365
1366 /*
1367 * Save source MAC address before discarding header.
1368 */
1369 port = lport_priv(lp);
1370 if (skb_is_nonlinear(skb))
1371 skb_linearize(skb); /* not ideal */
1372 mac = eth_hdr(skb)->h_source;
1373
1374 /*
1375 * Frame length checks and setting up the header pointers
1376 * was done in fcoe_rcv already.
1377 */
1378 hp = (struct fcoe_hdr *) skb_network_header(skb);
1379 fh = (struct fc_frame_header *) skb_transport_header(skb);
1380
1381 stats = fc_lport_get_stats(lp);
1382 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1383 if (stats->ErrorFrames < 5)
1384 printk(KERN_WARNING "fcoe: FCoE version "
1385 "mismatch: The frame has "
1386 "version %x, but the "
1387 "initiator supports version "
1388 "%x\n", FC_FCOE_DECAPS_VER(hp),
1389 FC_FCOE_VER);
1390 stats->ErrorFrames++;
1391 kfree_skb(skb);
1392 continue;
1393 }
1394
1395 skb_pull(skb, sizeof(struct fcoe_hdr));
1396 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1397
1398 stats->RxFrames++;
1399 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1400
1401 fp = (struct fc_frame *)skb;
1402 fc_frame_init(fp);
1403 fr_dev(fp) = lp;
1404 fr_sof(fp) = hp->fcoe_sof;
1405
1406 /* Copy out the CRC and EOF trailer for access */
1407 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1408 kfree_skb(skb);
1409 continue;
1410 }
1411 fr_eof(fp) = crc_eof.fcoe_eof;
1412 fr_crc(fp) = crc_eof.fcoe_crc32;
1413 if (pskb_trim(skb, fr_len)) {
1414 kfree_skb(skb);
1415 continue;
1416 }
1417
1418 /*
1419 * We only check CRC if no offload is available and if it is
1420 * it's solicited data, in which case, the FCP layer would
1421 * check it during the copy.
1422 */
1423 if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1424 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1425 else
1426 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1427
1428 fh = fc_frame_header_get(fp);
1429 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1430 fh->fh_type == FC_TYPE_FCP) {
1431 fc_exch_recv(lp, fp);
1432 continue;
1433 }
1434 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1435 if (le32_to_cpu(fr_crc(fp)) !=
1436 ~crc32(~0, skb->data, fr_len)) {
1437 if (stats->InvalidCRCCount < 5)
1438 printk(KERN_WARNING "fcoe: dropping "
1439 "frame with CRC error\n");
1440 stats->InvalidCRCCount++;
1441 stats->ErrorFrames++;
1442 fc_frame_free(fp);
1443 continue;
1444 }
1445 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1446 }
1447 if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
1448 fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) {
1449 fc_frame_free(fp);
1450 continue;
1451 }
1452 fc_exch_recv(lp, fp);
1453 } 1698 }
1454 return 0; 1699 return 0;
1455} 1700}
1456 1701
1457/** 1702/**
1458 * fcoe_check_wait_queue() - attempt to clear the transmit backlog 1703 * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
1459 * @lp: the fc_lport 1704 * @lport: The local port whose backlog is to be cleared
1460 * 1705 *
1461 * This empties the wait_queue, dequeue the head of the wait_queue queue 1706 * This empties the wait_queue, dequeues the head of the wait_queue queue
1462 * and calls fcoe_start_io() for each packet, if all skb have been 1707 * and calls fcoe_start_io() for each packet. If all skb have been
1463 * transmitted, return qlen or -1 if a error occurs, then restore 1708 * transmitted it returns the qlen. If an error occurs it restores
1464 * wait_queue and try again later. 1709 * wait_queue (to try again later) and returns -1.
1465 * 1710 *
1466 * The wait_queue is used when the skb transmit fails. skb will go 1711 * The wait_queue is used when the skb transmit fails. The failed skb
1467 * in the wait_queue which will be emptied by the timer function or 1712 * will go in the wait_queue which will be emptied by the timer function or
1468 * by the next skb transmit. 1713 * by the next skb transmit.
1469 */ 1714 */
1470static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) 1715static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
1471{ 1716{
1472 struct fcoe_port *port = lport_priv(lp); 1717 struct fcoe_port *port = lport_priv(lport);
1473 int rc; 1718 int rc;
1474 1719
1475 spin_lock_bh(&port->fcoe_pending_queue.lock); 1720 spin_lock_bh(&port->fcoe_pending_queue.lock);
@@ -1501,19 +1746,19 @@ static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1501 } 1746 }
1502 1747
1503 if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1748 if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1504 lp->qfull = 0; 1749 lport->qfull = 0;
1505 if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) 1750 if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
1506 mod_timer(&port->timer, jiffies + 2); 1751 mod_timer(&port->timer, jiffies + 2);
1507 port->fcoe_pending_queue_active = 0; 1752 port->fcoe_pending_queue_active = 0;
1508out: 1753out:
1509 if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) 1754 if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1510 lp->qfull = 1; 1755 lport->qfull = 1;
1511 spin_unlock_bh(&port->fcoe_pending_queue.lock); 1756 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1512 return; 1757 return;
1513} 1758}
1514 1759
1515/** 1760/**
1516 * fcoe_dev_setup() - setup link change notification interface 1761 * fcoe_dev_setup() - Setup the link change notification interface
1517 */ 1762 */
1518static void fcoe_dev_setup(void) 1763static void fcoe_dev_setup(void)
1519{ 1764{
@@ -1521,7 +1766,7 @@ static void fcoe_dev_setup(void)
1521} 1766}
1522 1767
1523/** 1768/**
1524 * fcoe_dev_cleanup() - cleanup link change notification interface 1769 * fcoe_dev_cleanup() - Cleanup the link change notification interface
1525 */ 1770 */
1526static void fcoe_dev_cleanup(void) 1771static void fcoe_dev_cleanup(void)
1527{ 1772{
@@ -1529,19 +1774,19 @@ static void fcoe_dev_cleanup(void)
1529} 1774}
1530 1775
1531/** 1776/**
1532 * fcoe_device_notification() - netdev event notification callback 1777 * fcoe_device_notification() - Handler for net device events
1533 * @notifier: context of the notification 1778 * @notifier: The context of the notification
1534 * @event: type of event 1779 * @event: The type of event
1535 * @ptr: fixed array for output parsed ifname 1780 * @ptr: The net device that the event was on
1536 * 1781 *
1537 * This function is called by the ethernet driver in case of link change event 1782 * This function is called by the Ethernet driver in case of link change event.
1538 * 1783 *
1539 * Returns: 0 for success 1784 * Returns: 0 for success
1540 */ 1785 */
1541static int fcoe_device_notification(struct notifier_block *notifier, 1786static int fcoe_device_notification(struct notifier_block *notifier,
1542 ulong event, void *ptr) 1787 ulong event, void *ptr)
1543{ 1788{
1544 struct fc_lport *lp = NULL; 1789 struct fc_lport *lport = NULL;
1545 struct net_device *netdev = ptr; 1790 struct net_device *netdev = ptr;
1546 struct fcoe_interface *fcoe; 1791 struct fcoe_interface *fcoe;
1547 struct fcoe_port *port; 1792 struct fcoe_port *port;
@@ -1552,11 +1797,11 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1552 1797
1553 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 1798 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1554 if (fcoe->netdev == netdev) { 1799 if (fcoe->netdev == netdev) {
1555 lp = fcoe->ctlr.lp; 1800 lport = fcoe->ctlr.lp;
1556 break; 1801 break;
1557 } 1802 }
1558 } 1803 }
1559 if (lp == NULL) { 1804 if (!lport) {
1560 rc = NOTIFY_DONE; 1805 rc = NOTIFY_DONE;
1561 goto out; 1806 goto out;
1562 } 1807 }
@@ -1570,10 +1815,12 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1570 case NETDEV_CHANGE: 1815 case NETDEV_CHANGE:
1571 break; 1816 break;
1572 case NETDEV_CHANGEMTU: 1817 case NETDEV_CHANGEMTU:
1818 if (netdev->features & NETIF_F_FCOE_MTU)
1819 break;
1573 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + 1820 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1574 sizeof(struct fcoe_crc_eof)); 1821 sizeof(struct fcoe_crc_eof));
1575 if (mfs >= FC_MIN_MAX_FRAME) 1822 if (mfs >= FC_MIN_MAX_FRAME)
1576 fc_set_mfs(lp, mfs); 1823 fc_set_mfs(lport, mfs);
1577 break; 1824 break;
1578 case NETDEV_REGISTER: 1825 case NETDEV_REGISTER:
1579 break; 1826 break;
@@ -1588,22 +1835,22 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1588 FCOE_NETDEV_DBG(netdev, "Unknown event %ld " 1835 FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1589 "from netdev netlink\n", event); 1836 "from netdev netlink\n", event);
1590 } 1837 }
1591 if (link_possible && !fcoe_link_ok(lp)) 1838 if (link_possible && !fcoe_link_ok(lport))
1592 fcoe_ctlr_link_up(&fcoe->ctlr); 1839 fcoe_ctlr_link_up(&fcoe->ctlr);
1593 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { 1840 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1594 stats = fc_lport_get_stats(lp); 1841 stats = fc_lport_get_stats(lport);
1595 stats->LinkFailureCount++; 1842 stats->LinkFailureCount++;
1596 fcoe_clean_pending_queue(lp); 1843 fcoe_clean_pending_queue(lport);
1597 } 1844 }
1598out: 1845out:
1599 return rc; 1846 return rc;
1600} 1847}
1601 1848
1602/** 1849/**
1603 * fcoe_if_to_netdev() - parse a name buffer to get netdev 1850 * fcoe_if_to_netdev() - Parse a name buffer to get a net device
1604 * @buffer: incoming buffer to be copied 1851 * @buffer: The name of the net device
1605 * 1852 *
1606 * Returns: NULL or ptr to net_device 1853 * Returns: NULL or a ptr to net_device
1607 */ 1854 */
1608static struct net_device *fcoe_if_to_netdev(const char *buffer) 1855static struct net_device *fcoe_if_to_netdev(const char *buffer)
1609{ 1856{
@@ -1621,9 +1868,109 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
1621} 1868}
1622 1869
1623/** 1870/**
1624 * fcoe_destroy() - handles the destroy from sysfs 1871 * fcoe_disable() - Disables a FCoE interface
1625 * @buffer: expected to be an eth if name 1872 * @buffer: The name of the Ethernet interface to be disabled
1626 * @kp: associated kernel param 1873 * @kp: The associated kernel parameter
1874 *
1875 * Called from sysfs.
1876 *
1877 * Returns: 0 for success
1878 */
1879static int fcoe_disable(const char *buffer, struct kernel_param *kp)
1880{
1881 struct fcoe_interface *fcoe;
1882 struct net_device *netdev;
1883 int rc = 0;
1884
1885 mutex_lock(&fcoe_config_mutex);
1886#ifdef CONFIG_FCOE_MODULE
1887 /*
1888 * Make sure the module has been initialized, and is not about to be
1889 * removed. Module paramter sysfs files are writable before the
1890 * module_init function is called and after module_exit.
1891 */
1892 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1893 rc = -ENODEV;
1894 goto out_nodev;
1895 }
1896#endif
1897
1898 netdev = fcoe_if_to_netdev(buffer);
1899 if (!netdev) {
1900 rc = -ENODEV;
1901 goto out_nodev;
1902 }
1903
1904 rtnl_lock();
1905 fcoe = fcoe_hostlist_lookup_port(netdev);
1906 rtnl_unlock();
1907
1908 if (fcoe)
1909 fc_fabric_logoff(fcoe->ctlr.lp);
1910 else
1911 rc = -ENODEV;
1912
1913 dev_put(netdev);
1914out_nodev:
1915 mutex_unlock(&fcoe_config_mutex);
1916 return rc;
1917}
1918
1919/**
1920 * fcoe_enable() - Enables a FCoE interface
1921 * @buffer: The name of the Ethernet interface to be enabled
1922 * @kp: The associated kernel parameter
1923 *
1924 * Called from sysfs.
1925 *
1926 * Returns: 0 for success
1927 */
1928static int fcoe_enable(const char *buffer, struct kernel_param *kp)
1929{
1930 struct fcoe_interface *fcoe;
1931 struct net_device *netdev;
1932 int rc = 0;
1933
1934 mutex_lock(&fcoe_config_mutex);
1935#ifdef CONFIG_FCOE_MODULE
1936 /*
1937 * Make sure the module has been initialized, and is not about to be
1938 * removed. Module paramter sysfs files are writable before the
1939 * module_init function is called and after module_exit.
1940 */
1941 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1942 rc = -ENODEV;
1943 goto out_nodev;
1944 }
1945#endif
1946
1947 netdev = fcoe_if_to_netdev(buffer);
1948 if (!netdev) {
1949 rc = -ENODEV;
1950 goto out_nodev;
1951 }
1952
1953 rtnl_lock();
1954 fcoe = fcoe_hostlist_lookup_port(netdev);
1955 rtnl_unlock();
1956
1957 if (fcoe)
1958 rc = fc_fabric_login(fcoe->ctlr.lp);
1959 else
1960 rc = -ENODEV;
1961
1962 dev_put(netdev);
1963out_nodev:
1964 mutex_unlock(&fcoe_config_mutex);
1965 return rc;
1966}
1967
1968/**
1969 * fcoe_destroy() - Destroy a FCoE interface
1970 * @buffer: The name of the Ethernet interface to be destroyed
1971 * @kp: The associated kernel parameter
1972 *
1973 * Called from sysfs.
1627 * 1974 *
1628 * Returns: 0 for success 1975 * Returns: 0 for success
1629 */ 1976 */
@@ -1631,7 +1978,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1631{ 1978{
1632 struct fcoe_interface *fcoe; 1979 struct fcoe_interface *fcoe;
1633 struct net_device *netdev; 1980 struct net_device *netdev;
1634 int rc; 1981 int rc = 0;
1635 1982
1636 mutex_lock(&fcoe_config_mutex); 1983 mutex_lock(&fcoe_config_mutex);
1637#ifdef CONFIG_FCOE_MODULE 1984#ifdef CONFIG_FCOE_MODULE
@@ -1663,6 +2010,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1663 fcoe_interface_cleanup(fcoe); 2010 fcoe_interface_cleanup(fcoe);
1664 rtnl_unlock(); 2011 rtnl_unlock();
1665 fcoe_if_destroy(fcoe->ctlr.lp); 2012 fcoe_if_destroy(fcoe->ctlr.lp);
2013 module_put(THIS_MODULE);
2014
1666out_putdev: 2015out_putdev:
1667 dev_put(netdev); 2016 dev_put(netdev);
1668out_nodev: 2017out_nodev:
@@ -1670,6 +2019,10 @@ out_nodev:
1670 return rc; 2019 return rc;
1671} 2020}
1672 2021
2022/**
2023 * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
2024 * @work: Handle to the FCoE port to be destroyed
2025 */
1673static void fcoe_destroy_work(struct work_struct *work) 2026static void fcoe_destroy_work(struct work_struct *work)
1674{ 2027{
1675 struct fcoe_port *port; 2028 struct fcoe_port *port;
@@ -1681,9 +2034,11 @@ static void fcoe_destroy_work(struct work_struct *work)
1681} 2034}
1682 2035
1683/** 2036/**
1684 * fcoe_create() - Handles the create call from sysfs 2037 * fcoe_create() - Create a fcoe interface
1685 * @buffer: expected to be an eth if name 2038 * @buffer: The name of the Ethernet interface to create on
1686 * @kp: associated kernel param 2039 * @kp: The associated kernel param
2040 *
2041 * Called from sysfs.
1687 * 2042 *
1688 * Returns: 0 for success 2043 * Returns: 0 for success
1689 */ 2044 */
@@ -1707,6 +2062,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
1707 } 2062 }
1708#endif 2063#endif
1709 2064
2065 if (!try_module_get(THIS_MODULE)) {
2066 rc = -EINVAL;
2067 goto out_nomod;
2068 }
2069
1710 rtnl_lock(); 2070 rtnl_lock();
1711 netdev = fcoe_if_to_netdev(buffer); 2071 netdev = fcoe_if_to_netdev(buffer);
1712 if (!netdev) { 2072 if (!netdev) {
@@ -1726,7 +2086,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
1726 goto out_putdev; 2086 goto out_putdev;
1727 } 2087 }
1728 2088
1729 lport = fcoe_if_create(fcoe, &netdev->dev); 2089 lport = fcoe_if_create(fcoe, &netdev->dev, 0);
1730 if (IS_ERR(lport)) { 2090 if (IS_ERR(lport)) {
1731 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2091 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
1732 netdev->name); 2092 netdev->name);
@@ -1747,31 +2107,31 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
1747 if (!fcoe_link_ok(lport)) 2107 if (!fcoe_link_ok(lport))
1748 fcoe_ctlr_link_up(&fcoe->ctlr); 2108 fcoe_ctlr_link_up(&fcoe->ctlr);
1749 2109
1750 rc = 0;
1751out_free:
1752 /* 2110 /*
1753 * Release from init in fcoe_interface_create(), on success lport 2111 * Release from init in fcoe_interface_create(), on success lport
1754 * should be holding a reference taken in fcoe_if_create(). 2112 * should be holding a reference taken in fcoe_if_create().
1755 */ 2113 */
1756 fcoe_interface_put(fcoe); 2114 fcoe_interface_put(fcoe);
2115 dev_put(netdev);
2116 rtnl_unlock();
2117 mutex_unlock(&fcoe_config_mutex);
2118
2119 return 0;
2120out_free:
2121 fcoe_interface_put(fcoe);
1757out_putdev: 2122out_putdev:
1758 dev_put(netdev); 2123 dev_put(netdev);
1759out_nodev: 2124out_nodev:
1760 rtnl_unlock(); 2125 rtnl_unlock();
2126 module_put(THIS_MODULE);
2127out_nomod:
1761 mutex_unlock(&fcoe_config_mutex); 2128 mutex_unlock(&fcoe_config_mutex);
1762 return rc; 2129 return rc;
1763} 2130}
1764 2131
1765module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1766__MODULE_PARM_TYPE(create, "string");
1767MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
1768module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1769__MODULE_PARM_TYPE(destroy, "string");
1770MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
1771
1772/** 2132/**
1773 * fcoe_link_ok() - Check if link is ok for the fc_lport 2133 * fcoe_link_ok() - Check if the link is OK for a local port
1774 * @lp: ptr to the fc_lport 2134 * @lport: The local port to check link on
1775 * 2135 *
1776 * Any permanently-disqualifying conditions have been previously checked. 2136 * Any permanently-disqualifying conditions have been previously checked.
1777 * This also updates the speed setting, which may change with link for 100/1000. 2137 * This also updates the speed setting, which may change with link for 100/1000.
@@ -1783,26 +2143,26 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
1783 * Returns: 0 if link is OK for use by FCoE. 2143 * Returns: 0 if link is OK for use by FCoE.
1784 * 2144 *
1785 */ 2145 */
1786int fcoe_link_ok(struct fc_lport *lp) 2146int fcoe_link_ok(struct fc_lport *lport)
1787{ 2147{
1788 struct fcoe_port *port = lport_priv(lp); 2148 struct fcoe_port *port = lport_priv(lport);
1789 struct net_device *dev = port->fcoe->netdev; 2149 struct net_device *netdev = port->fcoe->netdev;
1790 struct ethtool_cmd ecmd = { ETHTOOL_GSET }; 2150 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1791 2151
1792 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && 2152 if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) &&
1793 (!dev_ethtool_get_settings(dev, &ecmd))) { 2153 (!dev_ethtool_get_settings(netdev, &ecmd))) {
1794 lp->link_supported_speeds &= 2154 lport->link_supported_speeds &=
1795 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); 2155 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1796 if (ecmd.supported & (SUPPORTED_1000baseT_Half | 2156 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1797 SUPPORTED_1000baseT_Full)) 2157 SUPPORTED_1000baseT_Full))
1798 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; 2158 lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1799 if (ecmd.supported & SUPPORTED_10000baseT_Full) 2159 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1800 lp->link_supported_speeds |= 2160 lport->link_supported_speeds |=
1801 FC_PORTSPEED_10GBIT; 2161 FC_PORTSPEED_10GBIT;
1802 if (ecmd.speed == SPEED_1000) 2162 if (ecmd.speed == SPEED_1000)
1803 lp->link_speed = FC_PORTSPEED_1GBIT; 2163 lport->link_speed = FC_PORTSPEED_1GBIT;
1804 if (ecmd.speed == SPEED_10000) 2164 if (ecmd.speed == SPEED_10000)
1805 lp->link_speed = FC_PORTSPEED_10GBIT; 2165 lport->link_speed = FC_PORTSPEED_10GBIT;
1806 2166
1807 return 0; 2167 return 0;
1808 } 2168 }
@@ -1810,8 +2170,8 @@ int fcoe_link_ok(struct fc_lport *lp)
1810} 2170}
1811 2171
1812/** 2172/**
1813 * fcoe_percpu_clean() - Clear the pending skbs for an lport 2173 * fcoe_percpu_clean() - Clear all pending skbs for an local port
1814 * @lp: the fc_lport 2174 * @lport: The local port whose skbs are to be cleared
1815 * 2175 *
1816 * Must be called with fcoe_create_mutex held to single-thread completion. 2176 * Must be called with fcoe_create_mutex held to single-thread completion.
1817 * 2177 *
@@ -1820,7 +2180,7 @@ int fcoe_link_ok(struct fc_lport *lp)
1820 * there no packets that will be handled by the lport, but also that any 2180 * there no packets that will be handled by the lport, but also that any
1821 * threads already handling packet have returned. 2181 * threads already handling packet have returned.
1822 */ 2182 */
1823void fcoe_percpu_clean(struct fc_lport *lp) 2183void fcoe_percpu_clean(struct fc_lport *lport)
1824{ 2184{
1825 struct fcoe_percpu_s *pp; 2185 struct fcoe_percpu_s *pp;
1826 struct fcoe_rcv_info *fr; 2186 struct fcoe_rcv_info *fr;
@@ -1838,7 +2198,7 @@ void fcoe_percpu_clean(struct fc_lport *lp)
1838 skb = next) { 2198 skb = next) {
1839 next = skb->next; 2199 next = skb->next;
1840 fr = fcoe_dev_from_skb(skb); 2200 fr = fcoe_dev_from_skb(skb);
1841 if (fr->fr_dev == lp) { 2201 if (fr->fr_dev == lport) {
1842 __skb_unlink(skb, list); 2202 __skb_unlink(skb, list);
1843 kfree_skb(skb); 2203 kfree_skb(skb);
1844 } 2204 }
@@ -1867,13 +2227,11 @@ void fcoe_percpu_clean(struct fc_lport *lp)
1867 2227
1868/** 2228/**
1869 * fcoe_clean_pending_queue() - Dequeue a skb and free it 2229 * fcoe_clean_pending_queue() - Dequeue a skb and free it
1870 * @lp: the corresponding fc_lport 2230 * @lport: The local port to dequeue a skb on
1871 *
1872 * Returns: none
1873 */ 2231 */
1874void fcoe_clean_pending_queue(struct fc_lport *lp) 2232void fcoe_clean_pending_queue(struct fc_lport *lport)
1875{ 2233{
1876 struct fcoe_port *port = lport_priv(lp); 2234 struct fcoe_port *port = lport_priv(lport);
1877 struct sk_buff *skb; 2235 struct sk_buff *skb;
1878 2236
1879 spin_lock_bh(&port->fcoe_pending_queue.lock); 2237 spin_lock_bh(&port->fcoe_pending_queue.lock);
@@ -1886,10 +2244,10 @@ void fcoe_clean_pending_queue(struct fc_lport *lp)
1886} 2244}
1887 2245
1888/** 2246/**
1889 * fcoe_reset() - Resets the fcoe 2247 * fcoe_reset() - Reset a local port
1890 * @shost: shost the reset is from 2248 * @shost: The SCSI host associated with the local port to be reset
1891 * 2249 *
1892 * Returns: always 0 2250 * Returns: Always 0 (return value required by FC transport template)
1893 */ 2251 */
1894int fcoe_reset(struct Scsi_Host *shost) 2252int fcoe_reset(struct Scsi_Host *shost)
1895{ 2253{
@@ -1899,30 +2257,33 @@ int fcoe_reset(struct Scsi_Host *shost)
1899} 2257}
1900 2258
1901/** 2259/**
1902 * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device 2260 * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
1903 * @dev: this is currently ptr to net_device 2261 * @netdev: The net device used as a key
1904 * 2262 *
1905 * Returns: NULL or the located fcoe_port 2263 * Locking: Must be called with the RNL mutex held.
1906 * Locking: must be called with the RNL mutex held 2264 *
2265 * Returns: NULL or the FCoE interface
1907 */ 2266 */
1908static struct fcoe_interface * 2267static struct fcoe_interface *
1909fcoe_hostlist_lookup_port(const struct net_device *dev) 2268fcoe_hostlist_lookup_port(const struct net_device *netdev)
1910{ 2269{
1911 struct fcoe_interface *fcoe; 2270 struct fcoe_interface *fcoe;
1912 2271
1913 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 2272 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1914 if (fcoe->netdev == dev) 2273 if (fcoe->netdev == netdev)
1915 return fcoe; 2274 return fcoe;
1916 } 2275 }
1917 return NULL; 2276 return NULL;
1918} 2277}
1919 2278
1920/** 2279/**
1921 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev 2280 * fcoe_hostlist_lookup() - Find the local port associated with a
1922 * @netdev: ptr to net_device 2281 * given net device
2282 * @netdev: The netdevice used as a key
1923 * 2283 *
1924 * Returns: 0 for success 2284 * Locking: Must be called with the RTNL mutex held
1925 * Locking: must be called with the RTNL mutex held 2285 *
2286 * Returns: NULL or the local port
1926 */ 2287 */
1927static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) 2288static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1928{ 2289{
@@ -1933,11 +2294,13 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1933} 2294}
1934 2295
1935/** 2296/**
1936 * fcoe_hostlist_add() - Add a lport to lports list 2297 * fcoe_hostlist_add() - Add the FCoE interface identified by a local
1937 * @lp: ptr to the fc_lport to be added 2298 * port to the hostlist
2299 * @lport: The local port that identifies the FCoE interface to be added
1938 * 2300 *
1939 * Returns: 0 for success
1940 * Locking: must be called with the RTNL mutex held 2301 * Locking: must be called with the RTNL mutex held
2302 *
2303 * Returns: 0 for success
1941 */ 2304 */
1942static int fcoe_hostlist_add(const struct fc_lport *lport) 2305static int fcoe_hostlist_add(const struct fc_lport *lport)
1943{ 2306{
@@ -1954,15 +2317,15 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
1954} 2317}
1955 2318
1956/** 2319/**
1957 * fcoe_init() - fcoe module loading initialization 2320 * fcoe_init() - Initialize fcoe.ko
1958 * 2321 *
1959 * Returns 0 on success, negative on failure 2322 * Returns: 0 on success, or a negative value on failure
1960 */ 2323 */
1961static int __init fcoe_init(void) 2324static int __init fcoe_init(void)
1962{ 2325{
2326 struct fcoe_percpu_s *p;
1963 unsigned int cpu; 2327 unsigned int cpu;
1964 int rc = 0; 2328 int rc = 0;
1965 struct fcoe_percpu_s *p;
1966 2329
1967 mutex_lock(&fcoe_config_mutex); 2330 mutex_lock(&fcoe_config_mutex);
1968 2331
@@ -1999,15 +2362,15 @@ out_free:
1999module_init(fcoe_init); 2362module_init(fcoe_init);
2000 2363
2001/** 2364/**
2002 * fcoe_exit() - fcoe module unloading cleanup 2365 * fcoe_exit() - Clean up fcoe.ko
2003 * 2366 *
2004 * Returns 0 on success, negative on failure 2367 * Returns: 0 on success or a negative value on failure
2005 */ 2368 */
2006static void __exit fcoe_exit(void) 2369static void __exit fcoe_exit(void)
2007{ 2370{
2008 unsigned int cpu;
2009 struct fcoe_interface *fcoe, *tmp; 2371 struct fcoe_interface *fcoe, *tmp;
2010 struct fcoe_port *port; 2372 struct fcoe_port *port;
2373 unsigned int cpu;
2011 2374
2012 mutex_lock(&fcoe_config_mutex); 2375 mutex_lock(&fcoe_config_mutex);
2013 2376
@@ -2033,9 +2396,238 @@ static void __exit fcoe_exit(void)
2033 /* flush any asyncronous interface destroys, 2396 /* flush any asyncronous interface destroys,
2034 * this should happen after the netdev notifier is unregistered */ 2397 * this should happen after the netdev notifier is unregistered */
2035 flush_scheduled_work(); 2398 flush_scheduled_work();
2399 /* That will flush out all the N_Ports on the hostlist, but now we
2400 * may have NPIV VN_Ports scheduled for destruction */
2401 flush_scheduled_work();
2036 2402
2037 /* detach from scsi transport 2403 /* detach from scsi transport
2038 * must happen after all destroys are done, therefor after the flush */ 2404 * must happen after all destroys are done, therefor after the flush */
2039 fcoe_if_exit(); 2405 fcoe_if_exit();
2040} 2406}
2041module_exit(fcoe_exit); 2407module_exit(fcoe_exit);
2408
2409/**
2410 * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
2411 * @seq: active sequence in the FLOGI or FDISC exchange
2412 * @fp: response frame, or error encoded in a pointer (timeout)
2413 * @arg: pointer the the fcoe_ctlr structure
2414 *
2415 * This handles MAC address managment for FCoE, then passes control on to
2416 * the libfc FLOGI response handler.
2417 */
2418static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2419{
2420 struct fcoe_ctlr *fip = arg;
2421 struct fc_exch *exch = fc_seq_exch(seq);
2422 struct fc_lport *lport = exch->lp;
2423 u8 *mac;
2424
2425 if (IS_ERR(fp))
2426 goto done;
2427
2428 mac = fr_cb(fp)->granted_mac;
2429 if (is_zero_ether_addr(mac)) {
2430 /* pre-FIP */
2431 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
2432 fc_frame_free(fp);
2433 return;
2434 }
2435 }
2436 fcoe_update_src_mac(lport, mac);
2437done:
2438 fc_lport_flogi_resp(seq, fp, lport);
2439}
2440
2441/**
2442 * fcoe_logo_resp() - FCoE specific LOGO response handler
2443 * @seq: active sequence in the LOGO exchange
2444 * @fp: response frame, or error encoded in a pointer (timeout)
2445 * @arg: pointer the the fcoe_ctlr structure
2446 *
2447 * This handles MAC address managment for FCoE, then passes control on to
2448 * the libfc LOGO response handler.
2449 */
2450static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2451{
2452 struct fc_lport *lport = arg;
2453 static u8 zero_mac[ETH_ALEN] = { 0 };
2454
2455 if (!IS_ERR(fp))
2456 fcoe_update_src_mac(lport, zero_mac);
2457 fc_lport_logo_resp(seq, fp, lport);
2458}
2459
2460/**
2461 * fcoe_elsct_send - FCoE specific ELS handler
2462 *
2463 * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
2464 * using FCoE specific response handlers and passing the FIP controller as
2465 * the argument (the lport is still available from the exchange).
2466 *
2467 * Most of the work here is just handed off to the libfc routine.
2468 */
2469static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2470 struct fc_frame *fp, unsigned int op,
2471 void (*resp)(struct fc_seq *,
2472 struct fc_frame *,
2473 void *),
2474 void *arg, u32 timeout)
2475{
2476 struct fcoe_port *port = lport_priv(lport);
2477 struct fcoe_interface *fcoe = port->fcoe;
2478 struct fcoe_ctlr *fip = &fcoe->ctlr;
2479 struct fc_frame_header *fh = fc_frame_header_get(fp);
2480
2481 switch (op) {
2482 case ELS_FLOGI:
2483 case ELS_FDISC:
2484 return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
2485 fip, timeout);
2486 case ELS_LOGO:
2487 /* only hook onto fabric logouts, not port logouts */
2488 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
2489 break;
2490 return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
2491 lport, timeout);
2492 }
2493 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
2494}
2495
2496/**
2497 * fcoe_vport_create() - create an fc_host/scsi_host for a vport
2498 * @vport: fc_vport object to create a new fc_host for
2499 * @disabled: start the new fc_host in a disabled state by default?
2500 *
2501 * Returns: 0 for success
2502 */
2503static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2504{
2505 struct Scsi_Host *shost = vport_to_shost(vport);
2506 struct fc_lport *n_port = shost_priv(shost);
2507 struct fcoe_port *port = lport_priv(n_port);
2508 struct fcoe_interface *fcoe = port->fcoe;
2509 struct net_device *netdev = fcoe->netdev;
2510 struct fc_lport *vn_port;
2511
2512 mutex_lock(&fcoe_config_mutex);
2513 vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
2514 mutex_unlock(&fcoe_config_mutex);
2515
2516 if (IS_ERR(vn_port)) {
2517 printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
2518 netdev->name);
2519 return -EIO;
2520 }
2521
2522 if (disabled) {
2523 fc_vport_set_state(vport, FC_VPORT_DISABLED);
2524 } else {
2525 vn_port->boot_time = jiffies;
2526 fc_fabric_login(vn_port);
2527 fc_vport_setlink(vn_port);
2528 }
2529 return 0;
2530}
2531
2532/**
2533 * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
2534 * @vport: fc_vport object that is being destroyed
2535 *
2536 * Returns: 0 for success
2537 */
2538static int fcoe_vport_destroy(struct fc_vport *vport)
2539{
2540 struct Scsi_Host *shost = vport_to_shost(vport);
2541 struct fc_lport *n_port = shost_priv(shost);
2542 struct fc_lport *vn_port = vport->dd_data;
2543 struct fcoe_port *port = lport_priv(vn_port);
2544
2545 mutex_lock(&n_port->lp_mutex);
2546 list_del(&vn_port->list);
2547 mutex_unlock(&n_port->lp_mutex);
2548 schedule_work(&port->destroy_work);
2549 return 0;
2550}
2551
2552/**
2553 * fcoe_vport_disable() - change vport state
2554 * @vport: vport to bring online/offline
2555 * @disable: should the vport be disabled?
2556 */
2557static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
2558{
2559 struct fc_lport *lport = vport->dd_data;
2560
2561 if (disable) {
2562 fc_vport_set_state(vport, FC_VPORT_DISABLED);
2563 fc_fabric_logoff(lport);
2564 } else {
2565 lport->boot_time = jiffies;
2566 fc_fabric_login(lport);
2567 fc_vport_setlink(lport);
2568 }
2569
2570 return 0;
2571}
2572
2573/**
2574 * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
2575 * @vport: fc_vport with a new symbolic name string
2576 *
2577 * After generating a new symbolic name string, a new RSPN_ID request is
2578 * sent to the name server. There is no response handler, so if it fails
2579 * for some reason it will not be retried.
2580 */
2581static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
2582{
2583 struct fc_lport *lport = vport->dd_data;
2584 struct fc_frame *fp;
2585 size_t len;
2586
2587 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
2588 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
2589 fcoe_netdev(lport)->name, vport->symbolic_name);
2590
2591 if (lport->state != LPORT_ST_READY)
2592 return;
2593
2594 len = strnlen(fc_host_symbolic_name(lport->host), 255);
2595 fp = fc_frame_alloc(lport,
2596 sizeof(struct fc_ct_hdr) +
2597 sizeof(struct fc_ns_rspn) + len);
2598 if (!fp)
2599 return;
2600 lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
2601 NULL, NULL, 3 * lport->r_a_tov);
2602}
2603
2604/**
2605 * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
2606 * @lport: the local port
2607 * @fc_lesb: the link error status block
2608 */
2609static void fcoe_get_lesb(struct fc_lport *lport,
2610 struct fc_els_lesb *fc_lesb)
2611{
2612 unsigned int cpu;
2613 u32 lfc, vlfc, mdac;
2614 struct fcoe_dev_stats *devst;
2615 struct fcoe_fc_els_lesb *lesb;
2616 struct net_device *netdev = fcoe_netdev(lport);
2617
2618 lfc = 0;
2619 vlfc = 0;
2620 mdac = 0;
2621 lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
2622 memset(lesb, 0, sizeof(*lesb));
2623 for_each_possible_cpu(cpu) {
2624 devst = per_cpu_ptr(lport->dev_stats, cpu);
2625 lfc += devst->LinkFailureCount;
2626 vlfc += devst->VLinkFailureCount;
2627 mdac += devst->MissDiscAdvCount;
2628 }
2629 lesb->lesb_link_fail = htonl(lfc);
2630 lesb->lesb_vlink_fail = htonl(vlfc);
2631 lesb->lesb_miss_fka = htonl(mdac);
2632 lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
2633}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index ce7f60fb1bc0..c69b2c56c2d1 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -32,7 +32,7 @@
32#define FCOE_NAME "fcoe" 32#define FCOE_NAME "fcoe"
33#define FCOE_VENDOR "Open-FCoE.org" 33#define FCOE_VENDOR "Open-FCoE.org"
34 34
35#define FCOE_MAX_LUN 255 35#define FCOE_MAX_LUN 0xFFFF
36#define FCOE_MAX_FCP_TARGET 256 36#define FCOE_MAX_FCP_TARGET 256
37 37
38#define FCOE_MAX_OUTSTANDING_COMMANDS 1024 38#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
@@ -40,11 +40,17 @@
40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ 40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ 41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
42 42
43/*
44 * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
45 * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes
46 */
47#define FCOE_MTU 2158
48
43unsigned int fcoe_debug_logging; 49unsigned int fcoe_debug_logging;
44module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); 50module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 51MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
46 52
47#define FCOE_LOGGING 0x01 /* General logging, not categorized */ 53#define FCOE_LOGGING 0x01 /* General logging, not categorized */
48#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ 54#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
49 55
50#define FCOE_CHECK_LOGGING(LEVEL, CMD) \ 56#define FCOE_CHECK_LOGGING(LEVEL, CMD) \
@@ -64,8 +70,13 @@ do { \
64 printk(KERN_INFO "fcoe: %s: " fmt, \ 70 printk(KERN_INFO "fcoe: %s: " fmt, \
65 netdev->name, ##args);) 71 netdev->name, ##args);)
66 72
67/* 73/**
68 * this percpu struct for fcoe 74 * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
75 * @thread: The thread context
76 * @fcoe_rx_list: The queue of pending packets to process
77 * @page: The memory page for calculating frame trailer CRCs
78 * @crc_eof_offset: The offset into the CRC page pointing to available
79 * memory for a new trailer
69 */ 80 */
70struct fcoe_percpu_s { 81struct fcoe_percpu_s {
71 struct task_struct *thread; 82 struct task_struct *thread;
@@ -74,37 +85,62 @@ struct fcoe_percpu_s {
74 int crc_eof_offset; 85 int crc_eof_offset;
75}; 86};
76 87
77/* 88/**
78 * an FCoE interface, 1:1 with netdev 89 * struct fcoe_interface - A FCoE interface
90 * @list: Handle for a list of FCoE interfaces
91 * @netdev: The associated net device
92 * @fcoe_packet_type: FCoE packet type
93 * @fip_packet_type: FIP packet type
94 * @ctlr: The FCoE controller (for FIP)
95 * @oem: The offload exchange manager for all local port
96 * instances associated with this port
97 * @kref: The kernel reference
98 *
99 * This structure is 1:1 with a net devive.
79 */ 100 */
80struct fcoe_interface { 101struct fcoe_interface {
81 struct list_head list; 102 struct list_head list;
82 struct net_device *netdev; 103 struct net_device *netdev;
83 struct packet_type fcoe_packet_type; 104 struct packet_type fcoe_packet_type;
84 struct packet_type fip_packet_type; 105 struct packet_type fip_packet_type;
85 struct fcoe_ctlr ctlr; 106 struct fcoe_ctlr ctlr;
86 struct fc_exch_mgr *oem; /* offload exchange manager */ 107 struct fc_exch_mgr *oem;
87 struct kref kref; 108 struct kref kref;
88}; 109};
89 110
90/* 111/**
91 * the FCoE private structure that's allocated along with the 112 * struct fcoe_port - The FCoE private structure
92 * Scsi_Host and libfc fc_lport structures 113 * @fcoe: The associated fcoe interface
114 * @lport: The associated local port
115 * @fcoe_pending_queue: The pending Rx queue of skbs
116 * @fcoe_pending_queue_active: Indicates if the pending queue is active
117 * @timer: The queue timer
118 * @destroy_work: Handle for work context
119 * (to prevent RTNL deadlocks)
120 * @data_srt_addr: Source address for data
121 *
122 * An instance of this structure is to be allocated along with the
123 * Scsi_Host and libfc fc_lport structures.
93 */ 124 */
94struct fcoe_port { 125struct fcoe_port {
95 struct fcoe_interface *fcoe; 126 struct fcoe_interface *fcoe;
96 struct fc_lport *lport; 127 struct fc_lport *lport;
97 struct sk_buff_head fcoe_pending_queue; 128 struct sk_buff_head fcoe_pending_queue;
98 u8 fcoe_pending_queue_active; 129 u8 fcoe_pending_queue_active;
99 struct timer_list timer; /* queue timer */ 130 struct timer_list timer;
100 struct work_struct destroy_work; /* to prevent rtnl deadlocks */ 131 struct work_struct destroy_work;
132 u8 data_src_addr[ETH_ALEN];
101}; 133};
102 134
103#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) 135#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
104 136
105static inline struct net_device *fcoe_netdev(const struct fc_lport *lp) 137/**
138 * fcoe_netdev() - Return the net device associated with a local port
139 * @lport: The local port to get the net device from
140 */
141static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
106{ 142{
107 return ((struct fcoe_port *)lport_priv(lp))->fcoe->netdev; 143 return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev;
108} 144}
109 145
110#endif /* _FCOE_H_ */ 146#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 11ae5c94608b..3440da48d169 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -31,6 +31,7 @@
31#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/errno.h> 32#include <linux/errno.h>
33#include <linux/bitops.h> 33#include <linux/bitops.h>
34#include <linux/slab.h>
34#include <net/rtnetlink.h> 35#include <net/rtnetlink.h>
35 36
36#include <scsi/fc/fc_els.h> 37#include <scsi/fc/fc_els.h>
@@ -59,26 +60,30 @@ unsigned int libfcoe_debug_logging;
59module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); 60module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
60MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 61MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
61 62
62#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ 63#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
63#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ 64#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
64 65
65#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ 66#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
66do { \ 67do { \
67 if (unlikely(libfcoe_debug_logging & LEVEL)) \ 68 if (unlikely(libfcoe_debug_logging & LEVEL)) \
68 do { \ 69 do { \
69 CMD; \ 70 CMD; \
70 } while (0); \ 71 } while (0); \
71} while (0) 72} while (0)
72 73
73#define LIBFCOE_DBG(fmt, args...) \ 74#define LIBFCOE_DBG(fmt, args...) \
74 LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ 75 LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
75 printk(KERN_INFO "libfcoe: " fmt, ##args);) 76 printk(KERN_INFO "libfcoe: " fmt, ##args);)
76 77
77#define LIBFCOE_FIP_DBG(fmt, args...) \ 78#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
78 LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ 79 LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
79 printk(KERN_INFO "fip: " fmt, ##args);) 80 printk(KERN_INFO "host%d: fip: " fmt, \
81 (fip)->lp->host->host_no, ##args);)
80 82
81/* 83/**
84 * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid
85 * @fcf: The FCF to check
86 *
82 * Return non-zero if FCF fcoe_size has been validated. 87 * Return non-zero if FCF fcoe_size has been validated.
83 */ 88 */
84static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) 89static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf)
@@ -86,7 +91,10 @@ static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf)
86 return (fcf->flags & FIP_FL_SOL) != 0; 91 return (fcf->flags & FIP_FL_SOL) != 0;
87} 92}
88 93
89/* 94/**
95 * fcoe_ctlr_fcf_usable() - Check if a FCF is usable
96 * @fcf: The FCF to check
97 *
90 * Return non-zero if the FCF is usable. 98 * Return non-zero if the FCF is usable.
91 */ 99 */
92static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) 100static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
@@ -97,12 +105,13 @@ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
97} 105}
98 106
99/** 107/**
100 * fcoe_ctlr_init() - Initialize the FCoE Controller instance. 108 * fcoe_ctlr_init() - Initialize the FCoE Controller instance
101 * @fip: FCoE controller. 109 * @fip: The FCoE controller to initialize
102 */ 110 */
103void fcoe_ctlr_init(struct fcoe_ctlr *fip) 111void fcoe_ctlr_init(struct fcoe_ctlr *fip)
104{ 112{
105 fip->state = FIP_ST_LINK_WAIT; 113 fip->state = FIP_ST_LINK_WAIT;
114 fip->mode = FIP_ST_AUTO;
106 INIT_LIST_HEAD(&fip->fcfs); 115 INIT_LIST_HEAD(&fip->fcfs);
107 spin_lock_init(&fip->lock); 116 spin_lock_init(&fip->lock);
108 fip->flogi_oxid = FC_XID_UNKNOWN; 117 fip->flogi_oxid = FC_XID_UNKNOWN;
@@ -114,8 +123,8 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip)
114EXPORT_SYMBOL(fcoe_ctlr_init); 123EXPORT_SYMBOL(fcoe_ctlr_init);
115 124
116/** 125/**
117 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller. 126 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
118 * @fip: FCoE controller. 127 * @fip: The FCoE controller whose FCFs are to be reset
119 * 128 *
120 * Called with &fcoe_ctlr lock held. 129 * Called with &fcoe_ctlr lock held.
121 */ 130 */
@@ -134,8 +143,8 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
134} 143}
135 144
136/** 145/**
137 * fcoe_ctlr_destroy() - Disable and tear-down the FCoE controller. 146 * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller
138 * @fip: FCoE controller. 147 * @fip: The FCoE controller to tear down
139 * 148 *
140 * This is called by FCoE drivers before freeing the &fcoe_ctlr. 149 * This is called by FCoE drivers before freeing the &fcoe_ctlr.
141 * 150 *
@@ -148,9 +157,7 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
148void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) 157void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
149{ 158{
150 cancel_work_sync(&fip->recv_work); 159 cancel_work_sync(&fip->recv_work);
151 spin_lock_bh(&fip->fip_recv_list.lock); 160 skb_queue_purge(&fip->fip_recv_list);
152 __skb_queue_purge(&fip->fip_recv_list);
153 spin_unlock_bh(&fip->fip_recv_list.lock);
154 161
155 spin_lock_bh(&fip->lock); 162 spin_lock_bh(&fip->lock);
156 fip->state = FIP_ST_DISABLED; 163 fip->state = FIP_ST_DISABLED;
@@ -162,8 +169,8 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
162EXPORT_SYMBOL(fcoe_ctlr_destroy); 169EXPORT_SYMBOL(fcoe_ctlr_destroy);
163 170
164/** 171/**
165 * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port. 172 * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port
166 * @fip: FCoE controller. 173 * @fip: The FCoE controller to get the maximum FCoE size from
167 * 174 *
168 * Returns the maximum packet size including the FCoE header and trailer, 175 * Returns the maximum packet size including the FCoE header and trailer,
169 * but not including any Ethernet or VLAN headers. 176 * but not including any Ethernet or VLAN headers.
@@ -180,9 +187,9 @@ static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip)
180} 187}
181 188
182/** 189/**
183 * fcoe_ctlr_solicit() - Send a solicitation. 190 * fcoe_ctlr_solicit() - Send a FIP solicitation
184 * @fip: FCoE controller. 191 * @fip: The FCoE controller to send the solicitation on
185 * @fcf: Destination FCF. If NULL, a multicast solicitation is sent. 192 * @fcf: The destination FCF (if NULL, a multicast solicitation is sent)
186 */ 193 */
187static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) 194static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
188{ 195{
@@ -241,8 +248,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
241} 248}
242 249
243/** 250/**
244 * fcoe_ctlr_link_up() - Start FCoE controller. 251 * fcoe_ctlr_link_up() - Start FCoE controller
245 * @fip: FCoE controller. 252 * @fip: The FCoE controller to start
246 * 253 *
247 * Called from the LLD when the network link is ready. 254 * Called from the LLD when the network link is ready.
248 */ 255 */
@@ -255,11 +262,12 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
255 spin_unlock_bh(&fip->lock); 262 spin_unlock_bh(&fip->lock);
256 fc_linkup(fip->lp); 263 fc_linkup(fip->lp);
257 } else if (fip->state == FIP_ST_LINK_WAIT) { 264 } else if (fip->state == FIP_ST_LINK_WAIT) {
258 fip->state = FIP_ST_AUTO; 265 fip->state = fip->mode;
259 fip->last_link = 1; 266 fip->last_link = 1;
260 fip->link = 1; 267 fip->link = 1;
261 spin_unlock_bh(&fip->lock); 268 spin_unlock_bh(&fip->lock);
262 LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n"); 269 if (fip->state == FIP_ST_AUTO)
270 LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
263 fc_linkup(fip->lp); 271 fc_linkup(fip->lp);
264 fcoe_ctlr_solicit(fip, NULL); 272 fcoe_ctlr_solicit(fip, NULL);
265 } else 273 } else
@@ -268,45 +276,23 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
268EXPORT_SYMBOL(fcoe_ctlr_link_up); 276EXPORT_SYMBOL(fcoe_ctlr_link_up);
269 277
270/** 278/**
271 * fcoe_ctlr_reset() - Reset FIP. 279 * fcoe_ctlr_reset() - Reset a FCoE controller
272 * @fip: FCoE controller. 280 * @fip: The FCoE controller to reset
273 * @new_state: FIP state to be entered.
274 *
275 * Returns non-zero if the link was up and now isn't.
276 */ 281 */
277static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state) 282static void fcoe_ctlr_reset(struct fcoe_ctlr *fip)
278{ 283{
279 struct fc_lport *lp = fip->lp;
280 int link_dropped;
281
282 spin_lock_bh(&fip->lock);
283 fcoe_ctlr_reset_fcfs(fip); 284 fcoe_ctlr_reset_fcfs(fip);
284 del_timer(&fip->timer); 285 del_timer(&fip->timer);
285 fip->state = new_state;
286 fip->ctlr_ka_time = 0; 286 fip->ctlr_ka_time = 0;
287 fip->port_ka_time = 0; 287 fip->port_ka_time = 0;
288 fip->sol_time = 0; 288 fip->sol_time = 0;
289 fip->flogi_oxid = FC_XID_UNKNOWN; 289 fip->flogi_oxid = FC_XID_UNKNOWN;
290 fip->map_dest = 0; 290 fip->map_dest = 0;
291 fip->last_link = 0;
292 link_dropped = fip->link;
293 fip->link = 0;
294 spin_unlock_bh(&fip->lock);
295
296 if (link_dropped)
297 fc_linkdown(lp);
298
299 if (new_state == FIP_ST_ENABLED) {
300 fcoe_ctlr_solicit(fip, NULL);
301 fc_linkup(lp);
302 link_dropped = 0;
303 }
304 return link_dropped;
305} 291}
306 292
307/** 293/**
308 * fcoe_ctlr_link_down() - Stop FCoE controller. 294 * fcoe_ctlr_link_down() - Stop a FCoE controller
309 * @fip: FCoE controller. 295 * @fip: The FCoE controller to be stopped
310 * 296 *
311 * Returns non-zero if the link was up and now isn't. 297 * Returns non-zero if the link was up and now isn't.
312 * 298 *
@@ -315,15 +301,29 @@ static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state)
315 */ 301 */
316int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) 302int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
317{ 303{
318 return fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT); 304 int link_dropped;
305
306 LIBFCOE_FIP_DBG(fip, "link down.\n");
307 spin_lock_bh(&fip->lock);
308 fcoe_ctlr_reset(fip);
309 link_dropped = fip->link;
310 fip->link = 0;
311 fip->last_link = 0;
312 fip->state = FIP_ST_LINK_WAIT;
313 spin_unlock_bh(&fip->lock);
314
315 if (link_dropped)
316 fc_linkdown(fip->lp);
317 return link_dropped;
319} 318}
320EXPORT_SYMBOL(fcoe_ctlr_link_down); 319EXPORT_SYMBOL(fcoe_ctlr_link_down);
321 320
322/** 321/**
323 * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF. 322 * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF
324 * @fip: FCoE controller. 323 * @fip: The FCoE controller to send the FKA on
325 * @ports: 0 for controller keep-alive, 1 for port keep-alive. 324 * @lport: libfc fc_lport to send from
326 * @sa: source MAC address. 325 * @ports: 0 for controller keep-alive, 1 for port keep-alive
326 * @sa: The source MAC address
327 * 327 *
328 * A controller keep-alive is sent every fka_period (typically 8 seconds). 328 * A controller keep-alive is sent every fka_period (typically 8 seconds).
329 * The source MAC is the native MAC address. 329 * The source MAC is the native MAC address.
@@ -332,7 +332,9 @@ EXPORT_SYMBOL(fcoe_ctlr_link_down);
332 * The source MAC is the assigned mapped source address. 332 * The source MAC is the assigned mapped source address.
333 * The destination is the FCF's F-port. 333 * The destination is the FCF's F-port.
334 */ 334 */
335static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) 335static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
336 struct fc_lport *lport,
337 int ports, u8 *sa)
336{ 338{
337 struct sk_buff *skb; 339 struct sk_buff *skb;
338 struct fip_kal { 340 struct fip_kal {
@@ -350,8 +352,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
350 if (!fcf || !fc_host_port_id(lp->host)) 352 if (!fcf || !fc_host_port_id(lp->host))
351 return; 353 return;
352 354
353 len = fcoe_ctlr_fcoe_size(fip) + sizeof(struct ethhdr); 355 len = sizeof(*kal) + ports * sizeof(*vn);
354 BUG_ON(len < sizeof(*kal) + sizeof(*vn));
355 skb = dev_alloc_skb(len); 356 skb = dev_alloc_skb(len);
356 if (!skb) 357 if (!skb)
357 return; 358 return;
@@ -366,7 +367,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
366 kal->fip.fip_op = htons(FIP_OP_CTRL); 367 kal->fip.fip_op = htons(FIP_OP_CTRL);
367 kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE; 368 kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE;
368 kal->fip.fip_dl_len = htons((sizeof(kal->mac) + 369 kal->fip.fip_dl_len = htons((sizeof(kal->mac) +
369 ports * sizeof(*vn)) / FIP_BPW); 370 ports * sizeof(*vn)) / FIP_BPW);
370 kal->fip.fip_flags = htons(FIP_FL_FPMA); 371 kal->fip.fip_flags = htons(FIP_FL_FPMA);
371 if (fip->spma) 372 if (fip->spma)
372 kal->fip.fip_flags |= htons(FIP_FL_SPMA); 373 kal->fip.fip_flags |= htons(FIP_FL_SPMA);
@@ -374,16 +375,14 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
374 kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; 375 kal->mac.fd_desc.fip_dtype = FIP_DT_MAC;
375 kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; 376 kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW;
376 memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); 377 memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
377
378 if (ports) { 378 if (ports) {
379 vn = (struct fip_vn_desc *)(kal + 1); 379 vn = (struct fip_vn_desc *)(kal + 1);
380 vn->fd_desc.fip_dtype = FIP_DT_VN_ID; 380 vn->fd_desc.fip_dtype = FIP_DT_VN_ID;
381 vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; 381 vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW;
382 memcpy(vn->fd_mac, fip->data_src_addr, ETH_ALEN); 382 memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
383 hton24(vn->fd_fc_id, fc_host_port_id(lp->host)); 383 hton24(vn->fd_fc_id, fc_host_port_id(lp->host));
384 put_unaligned_be64(lp->wwpn, &vn->fd_wwpn); 384 put_unaligned_be64(lp->wwpn, &vn->fd_wwpn);
385 } 385 }
386
387 skb_put(skb, len); 386 skb_put(skb, len);
388 skb->protocol = htons(ETH_P_FIP); 387 skb->protocol = htons(ETH_P_FIP);
389 skb_reset_mac_header(skb); 388 skb_reset_mac_header(skb);
@@ -392,10 +391,10 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
392} 391}
393 392
394/** 393/**
395 * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it. 394 * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it
396 * @fip: FCoE controller. 395 * @fip: The FCoE controller for the ELS frame
397 * @dtype: FIP descriptor type for the frame. 396 * @dtype: The FIP descriptor type for the frame
398 * @skb: FCoE ELS frame including FC header but no FCoE headers. 397 * @skb: The FCoE ELS frame including FC header but no FCoE headers
399 * 398 *
400 * Returns non-zero error code on failure. 399 * Returns non-zero error code on failure.
401 * 400 *
@@ -405,7 +404,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
405 * Headroom includes the FIP encapsulation description, FIP header, and 404 * Headroom includes the FIP encapsulation description, FIP header, and
406 * Ethernet header. The tailroom is for the FIP MAC descriptor. 405 * Ethernet header. The tailroom is for the FIP MAC descriptor.
407 */ 406 */
408static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, 407static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
409 u8 dtype, struct sk_buff *skb) 408 u8 dtype, struct sk_buff *skb)
410{ 409{
411 struct fip_encaps_head { 410 struct fip_encaps_head {
@@ -449,8 +448,8 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
449 memset(mac, 0, sizeof(mac)); 448 memset(mac, 0, sizeof(mac));
450 mac->fd_desc.fip_dtype = FIP_DT_MAC; 449 mac->fd_desc.fip_dtype = FIP_DT_MAC;
451 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; 450 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
452 if (dtype != FIP_DT_FLOGI) 451 if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC)
453 memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); 452 memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
454 else if (fip->spma) 453 else if (fip->spma)
455 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); 454 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
456 455
@@ -463,6 +462,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
463/** 462/**
464 * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. 463 * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate.
465 * @fip: FCoE controller. 464 * @fip: FCoE controller.
465 * @lport: libfc fc_lport to send from
466 * @skb: FCoE ELS frame including FC header but no FCoE headers. 466 * @skb: FCoE ELS frame including FC header but no FCoE headers.
467 * 467 *
468 * Returns a non-zero error code if the frame should not be sent. 468 * Returns a non-zero error code if the frame should not be sent.
@@ -471,11 +471,13 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
471 * The caller must check that the length is a multiple of 4. 471 * The caller must check that the length is a multiple of 4.
472 * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). 472 * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
473 */ 473 */
474int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 474int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
475 struct sk_buff *skb)
475{ 476{
476 struct fc_frame_header *fh; 477 struct fc_frame_header *fh;
477 u16 old_xid; 478 u16 old_xid;
478 u8 op; 479 u8 op;
480 u8 mac[ETH_ALEN];
479 481
480 fh = (struct fc_frame_header *)skb->data; 482 fh = (struct fc_frame_header *)skb->data;
481 op = *(u8 *)(fh + 1); 483 op = *(u8 *)(fh + 1);
@@ -498,6 +500,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
498 500
499 if (fip->state == FIP_ST_NON_FIP) 501 if (fip->state == FIP_ST_NON_FIP)
500 return 0; 502 return 0;
503 if (!fip->sel_fcf)
504 goto drop;
501 505
502 switch (op) { 506 switch (op) {
503 case ELS_FLOGI: 507 case ELS_FLOGI:
@@ -530,14 +534,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
530 * FLOGI. 534 * FLOGI.
531 */ 535 */
532 fip->flogi_oxid = FC_XID_UNKNOWN; 536 fip->flogi_oxid = FC_XID_UNKNOWN;
533 fc_fcoe_set_mac(fip->data_src_addr, fh->fh_s_id); 537 fc_fcoe_set_mac(mac, fh->fh_d_id);
538 fip->update_mac(lport, mac);
534 return 0; 539 return 0;
535 default: 540 default:
536 if (fip->state != FIP_ST_ENABLED) 541 if (fip->state != FIP_ST_ENABLED)
537 goto drop; 542 goto drop;
538 return 0; 543 return 0;
539 } 544 }
540 if (fcoe_ctlr_encaps(fip, op, skb)) 545 if (fcoe_ctlr_encaps(fip, lport, op, skb))
541 goto drop; 546 goto drop;
542 fip->send(fip, skb); 547 fip->send(fip, skb);
543 return -EINPROGRESS; 548 return -EINPROGRESS;
@@ -547,9 +552,9 @@ drop:
547} 552}
548EXPORT_SYMBOL(fcoe_ctlr_els_send); 553EXPORT_SYMBOL(fcoe_ctlr_els_send);
549 554
550/* 555/**
551 * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller. 556 * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller
552 * @fip: FCoE controller. 557 * @fip: The FCoE controller to free FCFs on
553 * 558 *
554 * Called with lock held. 559 * Called with lock held.
555 * 560 *
@@ -558,14 +563,28 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send);
558 * times its keep-alive period including fuzz. 563 * times its keep-alive period including fuzz.
559 * 564 *
560 * In addition, determine the time when an FCF selection can occur. 565 * In addition, determine the time when an FCF selection can occur.
566 *
567 * Also, increment the MissDiscAdvCount when no advertisement is received
568 * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB).
561 */ 569 */
562static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) 570static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
563{ 571{
564 struct fcoe_fcf *fcf; 572 struct fcoe_fcf *fcf;
565 struct fcoe_fcf *next; 573 struct fcoe_fcf *next;
566 unsigned long sel_time = 0; 574 unsigned long sel_time = 0;
575 unsigned long mda_time = 0;
567 576
568 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 577 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
578 mda_time = fcf->fka_period + (fcf->fka_period >> 1);
579 if ((fip->sel_fcf == fcf) &&
580 (time_after(jiffies, fcf->time + mda_time))) {
581 mod_timer(&fip->timer, jiffies + mda_time);
582 fc_lport_get_stats(fip->lp)->MissDiscAdvCount++;
583 printk(KERN_INFO "libfcoe: host%d: Missing Discovery "
584 "Advertisement for fab %llx count %lld\n",
585 fip->lp->host->host_no, fcf->fabric_name,
586 fc_lport_get_stats(fip->lp)->MissDiscAdvCount);
587 }
569 if (time_after(jiffies, fcf->time + fcf->fka_period * 3 + 588 if (time_after(jiffies, fcf->time + fcf->fka_period * 3 +
570 msecs_to_jiffies(FIP_FCF_FUZZ * 3))) { 589 msecs_to_jiffies(FIP_FCF_FUZZ * 3))) {
571 if (fip->sel_fcf == fcf) 590 if (fip->sel_fcf == fcf)
@@ -574,6 +593,7 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
574 WARN_ON(!fip->fcf_count); 593 WARN_ON(!fip->fcf_count);
575 fip->fcf_count--; 594 fip->fcf_count--;
576 kfree(fcf); 595 kfree(fcf);
596 fc_lport_get_stats(fip->lp)->VLinkFailureCount++;
577 } else if (fcoe_ctlr_mtu_valid(fcf) && 597 } else if (fcoe_ctlr_mtu_valid(fcf) &&
578 (!sel_time || time_before(sel_time, fcf->time))) { 598 (!sel_time || time_before(sel_time, fcf->time))) {
579 sel_time = fcf->time; 599 sel_time = fcf->time;
@@ -590,14 +610,16 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
590} 610}
591 611
592/** 612/**
593 * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry. 613 * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry
594 * @skb: received FIP advertisement frame 614 * @fip: The FCoE controller receiving the advertisement
595 * @fcf: resulting FCF entry. 615 * @skb: The received FIP advertisement frame
616 * @fcf: The resulting FCF entry
596 * 617 *
597 * Returns zero on a valid parsed advertisement, 618 * Returns zero on a valid parsed advertisement,
598 * otherwise returns non zero value. 619 * otherwise returns non zero value.
599 */ 620 */
600static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) 621static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
622 struct sk_buff *skb, struct fcoe_fcf *fcf)
601{ 623{
602 struct fip_header *fiph; 624 struct fip_header *fiph;
603 struct fip_desc *desc = NULL; 625 struct fip_desc *desc = NULL;
@@ -636,7 +658,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
636 ((struct fip_mac_desc *)desc)->fd_mac, 658 ((struct fip_mac_desc *)desc)->fd_mac,
637 ETH_ALEN); 659 ETH_ALEN);
638 if (!is_valid_ether_addr(fcf->fcf_mac)) { 660 if (!is_valid_ether_addr(fcf->fcf_mac)) {
639 LIBFCOE_FIP_DBG("Invalid MAC address " 661 LIBFCOE_FIP_DBG(fip, "Invalid MAC address "
640 "in FIP adv\n"); 662 "in FIP adv\n");
641 return -EINVAL; 663 return -EINVAL;
642 } 664 }
@@ -659,6 +681,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
659 if (dlen != sizeof(struct fip_fka_desc)) 681 if (dlen != sizeof(struct fip_fka_desc))
660 goto len_err; 682 goto len_err;
661 fka = (struct fip_fka_desc *)desc; 683 fka = (struct fip_fka_desc *)desc;
684 if (fka->fd_flags & FIP_FKA_ADV_D)
685 fcf->fd_flags = 1;
662 t = ntohl(fka->fd_fka_period); 686 t = ntohl(fka->fd_fka_period);
663 if (t >= FCOE_CTLR_MIN_FKA) 687 if (t >= FCOE_CTLR_MIN_FKA)
664 fcf->fka_period = msecs_to_jiffies(t); 688 fcf->fka_period = msecs_to_jiffies(t);
@@ -670,7 +694,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
670 case FIP_DT_LOGO: 694 case FIP_DT_LOGO:
671 case FIP_DT_ELP: 695 case FIP_DT_ELP:
672 default: 696 default:
673 LIBFCOE_FIP_DBG("unexpected descriptor type %x " 697 LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
674 "in FIP adv\n", desc->fip_dtype); 698 "in FIP adv\n", desc->fip_dtype);
675 /* standard says ignore unknown descriptors >= 128 */ 699 /* standard says ignore unknown descriptors >= 128 */
676 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 700 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
@@ -687,15 +711,15 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
687 return 0; 711 return 0;
688 712
689len_err: 713len_err:
690 LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", 714 LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
691 desc->fip_dtype, dlen); 715 desc->fip_dtype, dlen);
692 return -EINVAL; 716 return -EINVAL;
693} 717}
694 718
695/** 719/**
696 * fcoe_ctlr_recv_adv() - Handle an incoming advertisement. 720 * fcoe_ctlr_recv_adv() - Handle an incoming advertisement
697 * @fip: FCoE controller. 721 * @fip: The FCoE controller receiving the advertisement
698 * @skb: Received FIP packet. 722 * @skb: The received FIP packet
699 */ 723 */
700static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) 724static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
701{ 725{
@@ -706,7 +730,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
706 int first = 0; 730 int first = 0;
707 int mtu_valid; 731 int mtu_valid;
708 732
709 if (fcoe_ctlr_parse_adv(skb, &new)) 733 if (fcoe_ctlr_parse_adv(fip, skb, &new))
710 return; 734 return;
711 735
712 spin_lock_bh(&fip->lock); 736 spin_lock_bh(&fip->lock);
@@ -752,7 +776,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
752 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 776 mtu_valid = fcoe_ctlr_mtu_valid(fcf);
753 fcf->time = jiffies; 777 fcf->time = jiffies;
754 if (!found) { 778 if (!found) {
755 LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n", 779 LIBFCOE_FIP_DBG(fip, "New FCF for fab %llx map %x val %d\n",
756 fcf->fabric_name, fcf->fc_map, mtu_valid); 780 fcf->fabric_name, fcf->fc_map, mtu_valid);
757 } 781 }
758 782
@@ -778,7 +802,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
778 */ 802 */
779 if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) { 803 if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) {
780 fip->sel_time = jiffies + 804 fip->sel_time = jiffies +
781 msecs_to_jiffies(FCOE_CTLR_START_DELAY); 805 msecs_to_jiffies(FCOE_CTLR_START_DELAY);
782 if (!timer_pending(&fip->timer) || 806 if (!timer_pending(&fip->timer) ||
783 time_before(fip->sel_time, fip->timer.expires)) 807 time_before(fip->sel_time, fip->timer.expires))
784 mod_timer(&fip->timer, fip->sel_time); 808 mod_timer(&fip->timer, fip->sel_time);
@@ -788,15 +812,15 @@ out:
788} 812}
789 813
790/** 814/**
791 * fcoe_ctlr_recv_els() - Handle an incoming FIP-encapsulated ELS frame. 815 * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame
792 * @fip: FCoE controller. 816 * @fip: The FCoE controller which received the packet
793 * @skb: Received FIP packet. 817 * @skb: The received FIP packet
794 */ 818 */
795static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) 819static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
796{ 820{
797 struct fc_lport *lp = fip->lp; 821 struct fc_lport *lport = fip->lp;
798 struct fip_header *fiph; 822 struct fip_header *fiph;
799 struct fc_frame *fp; 823 struct fc_frame *fp = (struct fc_frame *)skb;
800 struct fc_frame_header *fh = NULL; 824 struct fc_frame_header *fh = NULL;
801 struct fip_desc *desc; 825 struct fip_desc *desc;
802 struct fip_encaps *els; 826 struct fip_encaps *els;
@@ -831,10 +855,11 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
831 ((struct fip_mac_desc *)desc)->fd_mac, 855 ((struct fip_mac_desc *)desc)->fd_mac,
832 ETH_ALEN); 856 ETH_ALEN);
833 if (!is_valid_ether_addr(granted_mac)) { 857 if (!is_valid_ether_addr(granted_mac)) {
834 LIBFCOE_FIP_DBG("Invalid MAC address " 858 LIBFCOE_FIP_DBG(fip, "Invalid MAC address "
835 "in FIP ELS\n"); 859 "in FIP ELS\n");
836 goto drop; 860 goto drop;
837 } 861 }
862 memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
838 break; 863 break;
839 case FIP_DT_FLOGI: 864 case FIP_DT_FLOGI:
840 case FIP_DT_FDISC: 865 case FIP_DT_FDISC:
@@ -850,7 +875,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
850 els_dtype = desc->fip_dtype; 875 els_dtype = desc->fip_dtype;
851 break; 876 break;
852 default: 877 default:
853 LIBFCOE_FIP_DBG("unexpected descriptor type %x " 878 LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
854 "in FIP adv\n", desc->fip_dtype); 879 "in FIP adv\n", desc->fip_dtype);
855 /* standard says ignore unknown descriptors >= 128 */ 880 /* standard says ignore unknown descriptors >= 128 */
856 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 881 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
@@ -867,11 +892,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
867 892
868 if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP && 893 if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP &&
869 fip->flogi_oxid == ntohs(fh->fh_ox_id) && 894 fip->flogi_oxid == ntohs(fh->fh_ox_id) &&
870 els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) { 895 els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac))
871 fip->flogi_oxid = FC_XID_UNKNOWN; 896 fip->flogi_oxid = FC_XID_UNKNOWN;
872 fip->update_mac(fip, fip->data_src_addr, granted_mac);
873 memcpy(fip->data_src_addr, granted_mac, ETH_ALEN);
874 }
875 897
876 /* 898 /*
877 * Convert skb into an fc_frame containing only the ELS. 899 * Convert skb into an fc_frame containing only the ELS.
@@ -882,32 +904,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
882 fc_frame_init(fp); 904 fc_frame_init(fp);
883 fr_sof(fp) = FC_SOF_I3; 905 fr_sof(fp) = FC_SOF_I3;
884 fr_eof(fp) = FC_EOF_T; 906 fr_eof(fp) = FC_EOF_T;
885 fr_dev(fp) = lp; 907 fr_dev(fp) = lport;
886 908
887 stats = fc_lport_get_stats(lp); 909 stats = fc_lport_get_stats(lport);
888 stats->RxFrames++; 910 stats->RxFrames++;
889 stats->RxWords += skb->len / FIP_BPW; 911 stats->RxWords += skb->len / FIP_BPW;
890 912
891 fc_exch_recv(lp, fp); 913 fc_exch_recv(lport, fp);
892 return; 914 return;
893 915
894len_err: 916len_err:
895 LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", 917 LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
896 desc->fip_dtype, dlen); 918 desc->fip_dtype, dlen);
897drop: 919drop:
898 kfree_skb(skb); 920 kfree_skb(skb);
899} 921}
900 922
901/** 923/**
902 * fcoe_ctlr_recv_els() - Handle an incoming link reset frame. 924 * fcoe_ctlr_recv_els() - Handle an incoming link reset frame
903 * @fip: FCoE controller. 925 * @fip: The FCoE controller that received the frame
904 * @fh: Received FIP header. 926 * @fh: The received FIP header
905 * 927 *
906 * There may be multiple VN_Port descriptors. 928 * There may be multiple VN_Port descriptors.
907 * The overall length has already been checked. 929 * The overall length has already been checked.
908 */ 930 */
909static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, 931static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
910 struct fip_header *fh) 932 struct fip_header *fh)
911{ 933{
912 struct fip_desc *desc; 934 struct fip_desc *desc;
913 struct fip_mac_desc *mp; 935 struct fip_mac_desc *mp;
@@ -916,13 +938,13 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
916 size_t rlen; 938 size_t rlen;
917 size_t dlen; 939 size_t dlen;
918 struct fcoe_fcf *fcf = fip->sel_fcf; 940 struct fcoe_fcf *fcf = fip->sel_fcf;
919 struct fc_lport *lp = fip->lp; 941 struct fc_lport *lport = fip->lp;
920 u32 desc_mask; 942 u32 desc_mask;
921 943
922 LIBFCOE_FIP_DBG("Clear Virtual Link received\n"); 944 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
923 if (!fcf) 945 if (!fcf)
924 return; 946 return;
925 if (!fcf || !fc_host_port_id(lp->host)) 947 if (!fcf || !fc_host_port_id(lport->host))
926 return; 948 return;
927 949
928 /* 950 /*
@@ -958,9 +980,10 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
958 if (dlen < sizeof(*vp)) 980 if (dlen < sizeof(*vp))
959 return; 981 return;
960 if (compare_ether_addr(vp->fd_mac, 982 if (compare_ether_addr(vp->fd_mac,
961 fip->data_src_addr) == 0 && 983 fip->get_src_addr(lport)) == 0 &&
962 get_unaligned_be64(&vp->fd_wwpn) == lp->wwpn && 984 get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
963 ntoh24(vp->fd_fc_id) == fc_host_port_id(lp->host)) 985 ntoh24(vp->fd_fc_id) ==
986 fc_host_port_id(lport->host))
964 desc_mask &= ~BIT(FIP_DT_VN_ID); 987 desc_mask &= ~BIT(FIP_DT_VN_ID);
965 break; 988 break;
966 default: 989 default:
@@ -977,33 +1000,39 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
977 * reset only if all required descriptors were present and valid. 1000 * reset only if all required descriptors were present and valid.
978 */ 1001 */
979 if (desc_mask) { 1002 if (desc_mask) {
980 LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask); 1003 LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
1004 desc_mask);
981 } else { 1005 } else {
982 LIBFCOE_FIP_DBG("performing Clear Virtual Link\n"); 1006 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
983 fcoe_ctlr_reset(fip, FIP_ST_ENABLED); 1007
1008 spin_lock_bh(&fip->lock);
1009 fc_lport_get_stats(lport)->VLinkFailureCount++;
1010 fcoe_ctlr_reset(fip);
1011 spin_unlock_bh(&fip->lock);
1012
1013 fc_lport_reset(fip->lp);
1014 fcoe_ctlr_solicit(fip, NULL);
984 } 1015 }
985} 1016}
986 1017
987/** 1018/**
988 * fcoe_ctlr_recv() - Receive a FIP frame. 1019 * fcoe_ctlr_recv() - Receive a FIP packet
989 * @fip: FCoE controller. 1020 * @fip: The FCoE controller that received the packet
990 * @skb: Received FIP packet. 1021 * @skb: The received FIP packet
991 * 1022 *
992 * This is called from NET_RX_SOFTIRQ. 1023 * This may be called from either NET_RX_SOFTIRQ or IRQ.
993 */ 1024 */
994void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) 1025void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
995{ 1026{
996 spin_lock_bh(&fip->fip_recv_list.lock); 1027 skb_queue_tail(&fip->fip_recv_list, skb);
997 __skb_queue_tail(&fip->fip_recv_list, skb);
998 spin_unlock_bh(&fip->fip_recv_list.lock);
999 schedule_work(&fip->recv_work); 1028 schedule_work(&fip->recv_work);
1000} 1029}
1001EXPORT_SYMBOL(fcoe_ctlr_recv); 1030EXPORT_SYMBOL(fcoe_ctlr_recv);
1002 1031
1003/** 1032/**
1004 * fcoe_ctlr_recv_handler() - Receive a FIP frame. 1033 * fcoe_ctlr_recv_handler() - Receive a FIP frame
1005 * @fip: FCoE controller. 1034 * @fip: The FCoE controller that received the frame
1006 * @skb: Received FIP packet. 1035 * @skb: The received FIP frame
1007 * 1036 *
1008 * Returns non-zero if the frame is dropped. 1037 * Returns non-zero if the frame is dropped.
1009 */ 1038 */
@@ -1038,7 +1067,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
1038 fip->map_dest = 0; 1067 fip->map_dest = 0;
1039 fip->state = FIP_ST_ENABLED; 1068 fip->state = FIP_ST_ENABLED;
1040 state = FIP_ST_ENABLED; 1069 state = FIP_ST_ENABLED;
1041 LIBFCOE_FIP_DBG("Using FIP mode\n"); 1070 LIBFCOE_FIP_DBG(fip, "Using FIP mode\n");
1042 } 1071 }
1043 spin_unlock_bh(&fip->lock); 1072 spin_unlock_bh(&fip->lock);
1044 if (state != FIP_ST_ENABLED) 1073 if (state != FIP_ST_ENABLED)
@@ -1060,8 +1089,8 @@ drop:
1060} 1089}
1061 1090
1062/** 1091/**
1063 * fcoe_ctlr_select() - Select the best FCF, if possible. 1092 * fcoe_ctlr_select() - Select the best FCF (if possible)
1064 * @fip: FCoE controller. 1093 * @fip: The FCoE controller
1065 * 1094 *
1066 * If there are conflicting advertisements, no FCF can be chosen. 1095 * If there are conflicting advertisements, no FCF can be chosen.
1067 * 1096 *
@@ -1073,11 +1102,11 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
1073 struct fcoe_fcf *best = NULL; 1102 struct fcoe_fcf *best = NULL;
1074 1103
1075 list_for_each_entry(fcf, &fip->fcfs, list) { 1104 list_for_each_entry(fcf, &fip->fcfs, list) {
1076 LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x " 1105 LIBFCOE_FIP_DBG(fip, "consider FCF for fab %llx VFID %d map %x "
1077 "val %d\n", fcf->fabric_name, fcf->vfid, 1106 "val %d\n", fcf->fabric_name, fcf->vfid,
1078 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); 1107 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
1079 if (!fcoe_ctlr_fcf_usable(fcf)) { 1108 if (!fcoe_ctlr_fcf_usable(fcf)) {
1080 LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid " 1109 LIBFCOE_FIP_DBG(fip, "FCF for fab %llx map %x %svalid "
1081 "%savailable\n", fcf->fabric_name, 1110 "%savailable\n", fcf->fabric_name,
1082 fcf->fc_map, (fcf->flags & FIP_FL_SOL) 1111 fcf->fc_map, (fcf->flags & FIP_FL_SOL)
1083 ? "" : "in", (fcf->flags & FIP_FL_AVAIL) 1112 ? "" : "in", (fcf->flags & FIP_FL_AVAIL)
@@ -1091,7 +1120,7 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
1091 if (fcf->fabric_name != best->fabric_name || 1120 if (fcf->fabric_name != best->fabric_name ||
1092 fcf->vfid != best->vfid || 1121 fcf->vfid != best->vfid ||
1093 fcf->fc_map != best->fc_map) { 1122 fcf->fc_map != best->fc_map) {
1094 LIBFCOE_FIP_DBG("Conflicting fabric, VFID, " 1123 LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
1095 "or FC-MAP\n"); 1124 "or FC-MAP\n");
1096 return; 1125 return;
1097 } 1126 }
@@ -1102,8 +1131,8 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
1102} 1131}
1103 1132
1104/** 1133/**
1105 * fcoe_ctlr_timeout() - FIP timer function. 1134 * fcoe_ctlr_timeout() - FIP timeout handler
1106 * @arg: &fcoe_ctlr pointer. 1135 * @arg: The FCoE controller that timed out
1107 * 1136 *
1108 * Ages FCFs. Triggers FCF selection if possible. Sends keep-alives. 1137 * Ages FCFs. Triggers FCF selection if possible. Sends keep-alives.
1109 */ 1138 */
@@ -1113,8 +1142,6 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1113 struct fcoe_fcf *sel; 1142 struct fcoe_fcf *sel;
1114 struct fcoe_fcf *fcf; 1143 struct fcoe_fcf *fcf;
1115 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); 1144 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
1116 u8 send_ctlr_ka;
1117 u8 send_port_ka;
1118 1145
1119 spin_lock_bh(&fip->lock); 1146 spin_lock_bh(&fip->lock);
1120 if (fip->state == FIP_ST_DISABLED) { 1147 if (fip->state == FIP_ST_DISABLED) {
@@ -1140,53 +1167,47 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1140 fip->lp->host->host_no, sel->fcf_mac); 1167 fip->lp->host->host_no, sel->fcf_mac);
1141 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); 1168 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
1142 fip->port_ka_time = jiffies + 1169 fip->port_ka_time = jiffies +
1143 msecs_to_jiffies(FIP_VN_KA_PERIOD); 1170 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1144 fip->ctlr_ka_time = jiffies + sel->fka_period; 1171 fip->ctlr_ka_time = jiffies + sel->fka_period;
1145 fip->link = 1;
1146 } else { 1172 } else {
1147 printk(KERN_NOTICE "libfcoe: host%d: " 1173 printk(KERN_NOTICE "libfcoe: host%d: "
1148 "FIP Fibre-Channel Forwarder timed out. " 1174 "FIP Fibre-Channel Forwarder timed out. "
1149 "Starting FCF discovery.\n", 1175 "Starting FCF discovery.\n",
1150 fip->lp->host->host_no); 1176 fip->lp->host->host_no);
1151 fip->link = 0; 1177 fip->reset_req = 1;
1178 schedule_work(&fip->link_work);
1152 } 1179 }
1153 schedule_work(&fip->link_work);
1154 } 1180 }
1155 1181
1156 send_ctlr_ka = 0; 1182 if (sel && !sel->fd_flags) {
1157 send_port_ka = 0;
1158 if (sel) {
1159 if (time_after_eq(jiffies, fip->ctlr_ka_time)) { 1183 if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
1160 fip->ctlr_ka_time = jiffies + sel->fka_period; 1184 fip->ctlr_ka_time = jiffies + sel->fka_period;
1161 send_ctlr_ka = 1; 1185 fip->send_ctlr_ka = 1;
1162 } 1186 }
1163 if (time_after(next_timer, fip->ctlr_ka_time)) 1187 if (time_after(next_timer, fip->ctlr_ka_time))
1164 next_timer = fip->ctlr_ka_time; 1188 next_timer = fip->ctlr_ka_time;
1165 1189
1166 if (time_after_eq(jiffies, fip->port_ka_time)) { 1190 if (time_after_eq(jiffies, fip->port_ka_time)) {
1167 fip->port_ka_time += jiffies + 1191 fip->port_ka_time = jiffies +
1168 msecs_to_jiffies(FIP_VN_KA_PERIOD); 1192 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1169 send_port_ka = 1; 1193 fip->send_port_ka = 1;
1170 } 1194 }
1171 if (time_after(next_timer, fip->port_ka_time)) 1195 if (time_after(next_timer, fip->port_ka_time))
1172 next_timer = fip->port_ka_time; 1196 next_timer = fip->port_ka_time;
1173 mod_timer(&fip->timer, next_timer); 1197 mod_timer(&fip->timer, next_timer);
1174 } else if (fip->sel_time) { 1198 } else if (fip->sel_time) {
1175 next_timer = fip->sel_time + 1199 next_timer = fip->sel_time +
1176 msecs_to_jiffies(FCOE_CTLR_START_DELAY); 1200 msecs_to_jiffies(FCOE_CTLR_START_DELAY);
1177 mod_timer(&fip->timer, next_timer); 1201 mod_timer(&fip->timer, next_timer);
1178 } 1202 }
1203 if (fip->send_ctlr_ka || fip->send_port_ka)
1204 schedule_work(&fip->link_work);
1179 spin_unlock_bh(&fip->lock); 1205 spin_unlock_bh(&fip->lock);
1180
1181 if (send_ctlr_ka)
1182 fcoe_ctlr_send_keep_alive(fip, 0, fip->ctl_src_addr);
1183 if (send_port_ka)
1184 fcoe_ctlr_send_keep_alive(fip, 1, fip->data_src_addr);
1185} 1206}
1186 1207
1187/** 1208/**
1188 * fcoe_ctlr_link_work() - worker thread function for link changes. 1209 * fcoe_ctlr_link_work() - Worker thread function for link changes
1189 * @work: pointer to link_work member inside &fcoe_ctlr. 1210 * @work: Handle to a FCoE controller
1190 * 1211 *
1191 * See if the link status has changed and if so, report it. 1212 * See if the link status has changed and if so, report it.
1192 * 1213 *
@@ -1196,27 +1217,49 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1196static void fcoe_ctlr_link_work(struct work_struct *work) 1217static void fcoe_ctlr_link_work(struct work_struct *work)
1197{ 1218{
1198 struct fcoe_ctlr *fip; 1219 struct fcoe_ctlr *fip;
1220 struct fc_lport *vport;
1221 u8 *mac;
1199 int link; 1222 int link;
1200 int last_link; 1223 int last_link;
1224 int reset;
1201 1225
1202 fip = container_of(work, struct fcoe_ctlr, link_work); 1226 fip = container_of(work, struct fcoe_ctlr, link_work);
1203 spin_lock_bh(&fip->lock); 1227 spin_lock_bh(&fip->lock);
1204 last_link = fip->last_link; 1228 last_link = fip->last_link;
1205 link = fip->link; 1229 link = fip->link;
1206 fip->last_link = link; 1230 fip->last_link = link;
1231 reset = fip->reset_req;
1232 fip->reset_req = 0;
1207 spin_unlock_bh(&fip->lock); 1233 spin_unlock_bh(&fip->lock);
1208 1234
1209 if (last_link != link) { 1235 if (last_link != link) {
1210 if (link) 1236 if (link)
1211 fc_linkup(fip->lp); 1237 fc_linkup(fip->lp);
1212 else 1238 else
1213 fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT); 1239 fc_linkdown(fip->lp);
1240 } else if (reset && link)
1241 fc_lport_reset(fip->lp);
1242
1243 if (fip->send_ctlr_ka) {
1244 fip->send_ctlr_ka = 0;
1245 fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr);
1246 }
1247 if (fip->send_port_ka) {
1248 fip->send_port_ka = 0;
1249 mutex_lock(&fip->lp->lp_mutex);
1250 mac = fip->get_src_addr(fip->lp);
1251 fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac);
1252 list_for_each_entry(vport, &fip->lp->vports, list) {
1253 mac = fip->get_src_addr(vport);
1254 fcoe_ctlr_send_keep_alive(fip, vport, 1, mac);
1255 }
1256 mutex_unlock(&fip->lp->lp_mutex);
1214 } 1257 }
1215} 1258}
1216 1259
1217/** 1260/**
1218 * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames. 1261 * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames
1219 * @recv_work: pointer to recv_work member inside &fcoe_ctlr. 1262 * @recv_work: Handle to a FCoE controller
1220 */ 1263 */
1221static void fcoe_ctlr_recv_work(struct work_struct *recv_work) 1264static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
1222{ 1265{
@@ -1224,20 +1267,14 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
1224 struct sk_buff *skb; 1267 struct sk_buff *skb;
1225 1268
1226 fip = container_of(recv_work, struct fcoe_ctlr, recv_work); 1269 fip = container_of(recv_work, struct fcoe_ctlr, recv_work);
1227 spin_lock_bh(&fip->fip_recv_list.lock); 1270 while ((skb = skb_dequeue(&fip->fip_recv_list)))
1228 while ((skb = __skb_dequeue(&fip->fip_recv_list))) {
1229 spin_unlock_bh(&fip->fip_recv_list.lock);
1230 fcoe_ctlr_recv_handler(fip, skb); 1271 fcoe_ctlr_recv_handler(fip, skb);
1231 spin_lock_bh(&fip->fip_recv_list.lock);
1232 }
1233 spin_unlock_bh(&fip->fip_recv_list.lock);
1234} 1272}
1235 1273
1236/** 1274/**
1237 * fcoe_ctlr_recv_flogi() - snoop Pre-FIP receipt of FLOGI response or request. 1275 * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response
1238 * @fip: FCoE controller. 1276 * @fip: The FCoE controller
1239 * @fp: FC frame. 1277 * @fp: The FC frame to snoop
1240 * @sa: Ethernet source MAC address from received FCoE frame.
1241 * 1278 *
1242 * Snoop potential response to FLOGI or even incoming FLOGI. 1279 * Snoop potential response to FLOGI or even incoming FLOGI.
1243 * 1280 *
@@ -1245,15 +1282,18 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
1245 * by fip->flogi_oxid != FC_XID_UNKNOWN. 1282 * by fip->flogi_oxid != FC_XID_UNKNOWN.
1246 * 1283 *
1247 * The caller is responsible for freeing the frame. 1284 * The caller is responsible for freeing the frame.
1285 * Fill in the granted_mac address.
1248 * 1286 *
1249 * Return non-zero if the frame should not be delivered to libfc. 1287 * Return non-zero if the frame should not be delivered to libfc.
1250 */ 1288 */
1251int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) 1289int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
1290 struct fc_frame *fp)
1252{ 1291{
1253 struct fc_frame_header *fh; 1292 struct fc_frame_header *fh;
1254 u8 op; 1293 u8 op;
1255 u8 mac[ETH_ALEN]; 1294 u8 *sa;
1256 1295
1296 sa = eth_hdr(&fp->skb)->h_source;
1257 fh = fc_frame_header_get(fp); 1297 fh = fc_frame_header_get(fp);
1258 if (fh->fh_type != FC_TYPE_ELS) 1298 if (fh->fh_type != FC_TYPE_ELS)
1259 return 0; 1299 return 0;
@@ -1268,7 +1308,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
1268 return -EINVAL; 1308 return -EINVAL;
1269 } 1309 }
1270 fip->state = FIP_ST_NON_FIP; 1310 fip->state = FIP_ST_NON_FIP;
1271 LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); 1311 LIBFCOE_FIP_DBG(fip,
1312 "received FLOGI LS_ACC using non-FIP mode\n");
1272 1313
1273 /* 1314 /*
1274 * FLOGI accepted. 1315 * FLOGI accepted.
@@ -1283,11 +1324,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
1283 fip->map_dest = 0; 1324 fip->map_dest = 0;
1284 } 1325 }
1285 fip->flogi_oxid = FC_XID_UNKNOWN; 1326 fip->flogi_oxid = FC_XID_UNKNOWN;
1286 memcpy(mac, fip->data_src_addr, ETH_ALEN);
1287 fc_fcoe_set_mac(fip->data_src_addr, fh->fh_d_id);
1288 spin_unlock_bh(&fip->lock); 1327 spin_unlock_bh(&fip->lock);
1289 1328 fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id);
1290 fip->update_mac(fip, mac, fip->data_src_addr);
1291 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { 1329 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
1292 /* 1330 /*
1293 * Save source MAC for point-to-point responses. 1331 * Save source MAC for point-to-point responses.
@@ -1297,7 +1335,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
1297 memcpy(fip->dest_addr, sa, ETH_ALEN); 1335 memcpy(fip->dest_addr, sa, ETH_ALEN);
1298 fip->map_dest = 0; 1336 fip->map_dest = 0;
1299 if (fip->state == FIP_ST_NON_FIP) 1337 if (fip->state == FIP_ST_NON_FIP)
1300 LIBFCOE_FIP_DBG("received FLOGI REQ, " 1338 LIBFCOE_FIP_DBG(fip, "received FLOGI REQ, "
1301 "using non-FIP mode\n"); 1339 "using non-FIP mode\n");
1302 fip->state = FIP_ST_NON_FIP; 1340 fip->state = FIP_ST_NON_FIP;
1303 } 1341 }
@@ -1308,10 +1346,10 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
1308EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); 1346EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
1309 1347
1310/** 1348/**
1311 * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN. 1349 * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN
1312 * @mac: mac address 1350 * @mac: The MAC address to convert
1313 * @scheme: check port 1351 * @scheme: The scheme to use when converting
1314 * @port: port indicator for converting 1352 * @port: The port indicator for converting
1315 * 1353 *
1316 * Returns: u64 fc world wide name 1354 * Returns: u64 fc world wide name
1317 */ 1355 */
@@ -1349,24 +1387,26 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1349EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); 1387EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
1350 1388
1351/** 1389/**
1352 * fcoe_libfc_config() - sets up libfc related properties for lport 1390 * fcoe_libfc_config() - Sets up libfc related properties for local port
1353 * @lp: ptr to the fc_lport 1391 * @lp: The local port to configure libfc for
1354 * @tt: libfc function template 1392 * @tt: The libfc function template
1355 * 1393 *
1356 * Returns : 0 for success 1394 * Returns : 0 for success
1357 */ 1395 */
1358int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) 1396int fcoe_libfc_config(struct fc_lport *lport,
1397 struct libfc_function_template *tt)
1359{ 1398{
1360 /* Set the function pointers set by the LLDD */ 1399 /* Set the function pointers set by the LLDD */
1361 memcpy(&lp->tt, tt, sizeof(*tt)); 1400 memcpy(&lport->tt, tt, sizeof(*tt));
1362 if (fc_fcp_init(lp)) 1401 if (fc_fcp_init(lport))
1363 return -ENOMEM; 1402 return -ENOMEM;
1364 fc_exch_init(lp); 1403 fc_exch_init(lport);
1365 fc_elsct_init(lp); 1404 fc_elsct_init(lport);
1366 fc_lport_init(lp); 1405 fc_lport_init(lport);
1367 fc_rport_init(lp); 1406 fc_rport_init(lport);
1368 fc_disc_init(lp); 1407 fc_disc_init(lport);
1369 1408
1370 return 0; 1409 return 0;
1371} 1410}
1372EXPORT_SYMBOL_GPL(fcoe_libfc_config); 1411EXPORT_SYMBOL_GPL(fcoe_libfc_config);
1412
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index 85bd54c77b50..2ad95aa8f585 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -88,6 +88,7 @@
88#include <linux/delay.h> 88#include <linux/delay.h>
89#include <linux/mca.h> 89#include <linux/mca.h>
90#include <linux/spinlock.h> 90#include <linux/spinlock.h>
91#include <linux/slab.h>
91#include <scsi/scsicam.h> 92#include <scsi/scsicam.h>
92#include <linux/mca-legacy.h> 93#include <linux/mca-legacy.h>
93 94
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 32eef66114c7..e296bcc57d5c 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -279,6 +279,7 @@
279#include <linux/stat.h> 279#include <linux/stat.h>
280#include <linux/delay.h> 280#include <linux/delay.h>
281#include <linux/io.h> 281#include <linux/io.h>
282#include <linux/slab.h>
282#include <scsi/scsicam.h> 283#include <scsi/scsicam.h>
283 284
284#include <asm/system.h> 285#include <asm/system.h>
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index e4c0a3d7d87b..3966c71d0095 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -22,6 +22,7 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <scsi/libfc.h> 24#include <scsi/libfc.h>
25#include <scsi/libfcoe.h>
25#include "fnic_io.h" 26#include "fnic_io.h"
26#include "fnic_res.h" 27#include "fnic_res.h"
27#include "vnic_dev.h" 28#include "vnic_dev.h"
@@ -35,7 +36,7 @@
35 36
36#define DRV_NAME "fnic" 37#define DRV_NAME "fnic"
37#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 38#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
38#define DRV_VERSION "1.0.0.1121" 39#define DRV_VERSION "1.4.0.98"
39#define PFX DRV_NAME ": " 40#define PFX DRV_NAME ": "
40#define DFX DRV_NAME "%d: " 41#define DFX DRV_NAME "%d: "
41 42
@@ -44,7 +45,7 @@
44#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ 45#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
45#define FNIC_DFLT_QUEUE_DEPTH 32 46#define FNIC_DFLT_QUEUE_DEPTH 32
46#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ 47#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
47 48#define FNIC_MAX_CMD_LEN 16 /* Supported CDB length */
48/* 49/*
49 * Tag bits used for special requests. 50 * Tag bits used for special requests.
50 */ 51 */
@@ -145,6 +146,7 @@ struct mempool;
145/* Per-instance private data structure */ 146/* Per-instance private data structure */
146struct fnic { 147struct fnic {
147 struct fc_lport *lport; 148 struct fc_lport *lport;
149 struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
148 struct vnic_dev_bar bar0; 150 struct vnic_dev_bar bar0;
149 151
150 struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; 152 struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
@@ -162,23 +164,16 @@ struct fnic {
162 unsigned int wq_count; 164 unsigned int wq_count;
163 unsigned int cq_count; 165 unsigned int cq_count;
164 166
165 u32 fcoui_mode:1; /* use fcoui address*/
166 u32 vlan_hw_insert:1; /* let hw insert the tag */ 167 u32 vlan_hw_insert:1; /* let hw insert the tag */
167 u32 in_remove:1; /* fnic device in removal */ 168 u32 in_remove:1; /* fnic device in removal */
168 u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ 169 u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
169 170
170 struct completion *remove_wait; /* device remove thread blocks */ 171 struct completion *remove_wait; /* device remove thread blocks */
171 172
172 struct fc_frame *flogi;
173 struct fc_frame *flogi_resp;
174 u16 flogi_oxid;
175 unsigned long s_id;
176 enum fnic_state state; 173 enum fnic_state state;
177 spinlock_t fnic_lock; 174 spinlock_t fnic_lock;
178 175
179 u16 vlan_id; /* VLAN tag including priority */ 176 u16 vlan_id; /* VLAN tag including priority */
180 u8 mac_addr[ETH_ALEN];
181 u8 dest_addr[ETH_ALEN];
182 u8 data_src_addr[ETH_ALEN]; 177 u8 data_src_addr[ETH_ALEN];
183 u64 fcp_input_bytes; /* internal statistic */ 178 u64 fcp_input_bytes; /* internal statistic */
184 u64 fcp_output_bytes; /* internal statistic */ 179 u64 fcp_output_bytes; /* internal statistic */
@@ -205,6 +200,7 @@ struct fnic {
205 struct work_struct link_work; 200 struct work_struct link_work;
206 struct work_struct frame_work; 201 struct work_struct frame_work;
207 struct sk_buff_head frame_queue; 202 struct sk_buff_head frame_queue;
203 struct sk_buff_head tx_queue;
208 204
209 /* copy work queue cache line section */ 205 /* copy work queue cache line section */
210 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; 206 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
@@ -224,6 +220,11 @@ struct fnic {
224 ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; 220 ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
225}; 221};
226 222
223static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
224{
225 return container_of(fip, struct fnic, ctlr);
226}
227
227extern struct workqueue_struct *fnic_event_queue; 228extern struct workqueue_struct *fnic_event_queue;
228extern struct device_attribute *fnic_attrs[]; 229extern struct device_attribute *fnic_attrs[];
229 230
@@ -239,7 +240,11 @@ void fnic_handle_link(struct work_struct *work);
239int fnic_rq_cmpl_handler(struct fnic *fnic, int); 240int fnic_rq_cmpl_handler(struct fnic *fnic, int);
240int fnic_alloc_rq_frame(struct vnic_rq *rq); 241int fnic_alloc_rq_frame(struct vnic_rq *rq);
241void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); 242void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
242int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp); 243void fnic_flush_tx(struct fnic *);
244void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
245void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
246void fnic_update_mac(struct fc_lport *, u8 *new);
247void fnic_update_mac_locked(struct fnic *, u8 *new);
243 248
244int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); 249int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
245int fnic_abort_cmd(struct scsi_cmnd *); 250int fnic_abort_cmd(struct scsi_cmnd *);
@@ -252,7 +257,7 @@ void fnic_empty_scsi_cleanup(struct fc_lport *);
252void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); 257void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
253int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); 258int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
254int fnic_wq_cmpl_handler(struct fnic *fnic, int); 259int fnic_wq_cmpl_handler(struct fnic *fnic, int);
255int fnic_flogi_reg_handler(struct fnic *fnic); 260int fnic_flogi_reg_handler(struct fnic *fnic, u32);
256void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, 261void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
257 struct fcpio_host_req *desc); 262 struct fcpio_host_req *desc);
258int fnic_fw_reset_handler(struct fnic *fnic); 263int fnic_fw_reset_handler(struct fnic *fnic);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 50db3e36a619..5259888fbfb1 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -17,12 +17,14 @@
17 */ 17 */
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/slab.h>
20#include <linux/skbuff.h> 21#include <linux/skbuff.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23#include <linux/if_ether.h> 24#include <linux/if_ether.h>
24#include <linux/if_vlan.h> 25#include <linux/if_vlan.h>
25#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <scsi/fc/fc_fip.h>
26#include <scsi/fc/fc_els.h> 28#include <scsi/fc/fc_els.h>
27#include <scsi/fc/fc_fcoe.h> 29#include <scsi/fc/fc_fcoe.h>
28#include <scsi/fc_frame.h> 30#include <scsi/fc_frame.h>
@@ -34,6 +36,8 @@
34 36
35struct workqueue_struct *fnic_event_queue; 37struct workqueue_struct *fnic_event_queue;
36 38
39static void fnic_set_eth_mode(struct fnic *);
40
37void fnic_handle_link(struct work_struct *work) 41void fnic_handle_link(struct work_struct *work)
38{ 42{
39 struct fnic *fnic = container_of(work, struct fnic, link_work); 43 struct fnic *fnic = container_of(work, struct fnic, link_work);
@@ -64,10 +68,10 @@ void fnic_handle_link(struct work_struct *work)
64 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 68 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
65 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 69 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
66 "link down\n"); 70 "link down\n");
67 fc_linkdown(fnic->lport); 71 fcoe_ctlr_link_down(&fnic->ctlr);
68 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 72 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
69 "link up\n"); 73 "link up\n");
70 fc_linkup(fnic->lport); 74 fcoe_ctlr_link_up(&fnic->ctlr);
71 } else 75 } else
72 /* UP -> UP */ 76 /* UP -> UP */
73 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 77 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
@@ -76,13 +80,13 @@ void fnic_handle_link(struct work_struct *work)
76 /* DOWN -> UP */ 80 /* DOWN -> UP */
77 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 81 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
78 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); 82 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
79 fc_linkup(fnic->lport); 83 fcoe_ctlr_link_up(&fnic->ctlr);
80 } else { 84 } else {
81 /* UP -> DOWN */ 85 /* UP -> DOWN */
82 fnic->lport->host_stats.link_failure_count++; 86 fnic->lport->host_stats.link_failure_count++;
83 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 87 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
84 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); 88 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
85 fc_linkdown(fnic->lport); 89 fcoe_ctlr_link_down(&fnic->ctlr);
86 } 90 }
87 91
88} 92}
@@ -107,197 +111,179 @@ void fnic_handle_frame(struct work_struct *work)
107 return; 111 return;
108 } 112 }
109 fp = (struct fc_frame *)skb; 113 fp = (struct fc_frame *)skb;
110 /* if Flogi resp frame, register the address */ 114
111 if (fr_flags(fp)) { 115 /*
112 vnic_dev_add_addr(fnic->vdev, 116 * If we're in a transitional state, just re-queue and return.
113 fnic->data_src_addr); 117 * The queue will be serviced when we get to a stable state.
114 fr_flags(fp) = 0; 118 */
119 if (fnic->state != FNIC_IN_FC_MODE &&
120 fnic->state != FNIC_IN_ETH_MODE) {
121 skb_queue_head(&fnic->frame_queue, skb);
122 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
123 return;
115 } 124 }
116 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 125 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
117 126
118 fc_exch_recv(lp, fp); 127 fc_exch_recv(lp, fp);
119 } 128 }
120
121}
122
123static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
124 u32 len, u8 sof, u8 eof)
125{
126 struct fc_frame *fp = (struct fc_frame *)skb;
127
128 skb_trim(skb, len);
129 fr_eof(fp) = eof;
130 fr_sof(fp) = sof;
131} 129}
132 130
133 131/**
134static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) 132 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
133 * @fnic: fnic instance.
134 * @skb: Ethernet Frame.
135 */
136static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
135{ 137{
136 struct fc_frame *fp; 138 struct fc_frame *fp;
137 struct ethhdr *eh; 139 struct ethhdr *eh;
138 struct vlan_ethhdr *vh;
139 struct fcoe_hdr *fcoe_hdr; 140 struct fcoe_hdr *fcoe_hdr;
140 struct fcoe_crc_eof *ft; 141 struct fcoe_crc_eof *ft;
141 u32 transport_len = 0;
142 142
143 /*
144 * Undo VLAN encapsulation if present.
145 */
143 eh = (struct ethhdr *)skb->data; 146 eh = (struct ethhdr *)skb->data;
144 vh = (struct vlan_ethhdr *)skb->data; 147 if (eh->h_proto == htons(ETH_P_8021Q)) {
145 if (vh->h_vlan_proto == htons(ETH_P_8021Q) && 148 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
146 vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { 149 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
147 skb_pull(skb, sizeof(struct vlan_ethhdr)); 150 skb_reset_mac_header(skb);
148 transport_len += sizeof(struct vlan_ethhdr); 151 }
149 } else if (eh->h_proto == htons(ETH_P_FCOE)) { 152 if (eh->h_proto == htons(ETH_P_FIP)) {
150 transport_len += sizeof(struct ethhdr); 153 skb_pull(skb, sizeof(*eh));
151 skb_pull(skb, sizeof(struct ethhdr)); 154 fcoe_ctlr_recv(&fnic->ctlr, skb);
152 } else 155 return 1; /* let caller know packet was used */
153 return -1; 156 }
157 if (eh->h_proto != htons(ETH_P_FCOE))
158 goto drop;
159 skb_set_network_header(skb, sizeof(*eh));
160 skb_pull(skb, sizeof(*eh));
154 161
155 fcoe_hdr = (struct fcoe_hdr *)skb->data; 162 fcoe_hdr = (struct fcoe_hdr *)skb->data;
156 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) 163 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
157 return -1; 164 goto drop;
158 165
159 fp = (struct fc_frame *)skb; 166 fp = (struct fc_frame *)skb;
160 fc_frame_init(fp); 167 fc_frame_init(fp);
161 fr_sof(fp) = fcoe_hdr->fcoe_sof; 168 fr_sof(fp) = fcoe_hdr->fcoe_sof;
162 skb_pull(skb, sizeof(struct fcoe_hdr)); 169 skb_pull(skb, sizeof(struct fcoe_hdr));
163 transport_len += sizeof(struct fcoe_hdr); 170 skb_reset_transport_header(skb);
164 171
165 ft = (struct fcoe_crc_eof *)(skb->data + len - 172 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
166 transport_len - sizeof(*ft));
167 fr_eof(fp) = ft->fcoe_eof; 173 fr_eof(fp) = ft->fcoe_eof;
168 skb_trim(skb, len - transport_len - sizeof(*ft)); 174 skb_trim(skb, skb->len - sizeof(*ft));
169 return 0; 175 return 0;
176drop:
177 dev_kfree_skb_irq(skb);
178 return -1;
170} 179}
171 180
172static inline int fnic_handle_flogi_resp(struct fnic *fnic, 181/**
173 struct fc_frame *fp) 182 * fnic_update_mac_locked() - set data MAC address and filters.
183 * @fnic: fnic instance.
184 * @new: newly-assigned FCoE MAC address.
185 *
186 * Called with the fnic lock held.
187 */
188void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
174{ 189{
175 u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; 190 u8 *ctl = fnic->ctlr.ctl_src_addr;
176 struct ethhdr *eth_hdr; 191 u8 *data = fnic->data_src_addr;
177 struct fc_frame_header *fh;
178 int ret = 0;
179 unsigned long flags;
180 struct fc_frame *old_flogi_resp = NULL;
181 192
182 fh = (struct fc_frame_header *)fr_hdr(fp); 193 if (is_zero_ether_addr(new))
194 new = ctl;
195 if (!compare_ether_addr(data, new))
196 return;
197 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
198 if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
199 vnic_dev_del_addr(fnic->vdev, data);
200 memcpy(data, new, ETH_ALEN);
201 if (compare_ether_addr(new, ctl))
202 vnic_dev_add_addr(fnic->vdev, new);
203}
183 204
184 spin_lock_irqsave(&fnic->fnic_lock, flags); 205/**
206 * fnic_update_mac() - set data MAC address and filters.
207 * @lport: local port.
208 * @new: newly-assigned FCoE MAC address.
209 */
210void fnic_update_mac(struct fc_lport *lport, u8 *new)
211{
212 struct fnic *fnic = lport_priv(lport);
185 213
186 if (fnic->state == FNIC_IN_ETH_MODE) { 214 spin_lock_irq(&fnic->fnic_lock);
215 fnic_update_mac_locked(fnic, new);
216 spin_unlock_irq(&fnic->fnic_lock);
217}
187 218
188 /* 219/**
189 * Check if oxid matches on taking the lock. A new Flogi 220 * fnic_set_port_id() - set the port_ID after successful FLOGI.
190 * issued by libFC might have changed the fnic cached oxid 221 * @lport: local port.
191 */ 222 * @port_id: assigned FC_ID.
192 if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { 223 * @fp: received frame containing the FLOGI accept or NULL.
193 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 224 *
194 "Flogi response oxid not" 225 * This is called from libfc when a new FC_ID has been assigned.
195 " matching cached oxid, dropping frame" 226 * This causes us to reset the firmware to FC_MODE and setup the new MAC
196 "\n"); 227 * address and FC_ID.
197 ret = -1; 228 *
198 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 229 * It is also called with FC_ID 0 when we're logged off.
199 dev_kfree_skb_irq(fp_skb(fp)); 230 *
200 goto handle_flogi_resp_end; 231 * If the FC_ID is due to point-to-point, fp may be NULL.
201 } 232 */
233void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
234{
235 struct fnic *fnic = lport_priv(lport);
236 u8 *mac;
237 int ret;
202 238
203 /* Drop older cached flogi response frame, cache this frame */ 239 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
204 old_flogi_resp = fnic->flogi_resp; 240 port_id, fp);
205 fnic->flogi_resp = fp;
206 fnic->flogi_oxid = FC_XID_UNKNOWN;
207 241
208 /* 242 /*
209 * this frame is part of flogi get the src mac addr from this 243 * If we're clearing the FC_ID, change to use the ctl_src_addr.
210 * frame if the src mac is fcoui based then we mark the 244 * Set ethernet mode to send FLOGI.
211 * address mode flag to use fcoui base for dst mac addr 245 */
212 * otherwise we have to store the fcoe gateway addr 246 if (!port_id) {
213 */ 247 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
214 eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); 248 fnic_set_eth_mode(fnic);
215 memcpy(mac, eth_hdr->h_source, ETH_ALEN); 249 return;
250 }
216 251
217 if (ntoh24(mac) == FC_FCOE_OUI) 252 if (fp) {
218 fnic->fcoui_mode = 1; 253 mac = fr_cb(fp)->granted_mac;
219 else { 254 if (is_zero_ether_addr(mac)) {
220 fnic->fcoui_mode = 0; 255 /* non-FIP - FLOGI already accepted - ignore return */
221 memcpy(fnic->dest_addr, mac, ETH_ALEN); 256 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
222 } 257 }
258 fnic_update_mac(lport, mac);
259 }
223 260
224 /* 261 /* Change state to reflect transition to FC mode */
225 * Except for Flogi frame, all outbound frames from us have the 262 spin_lock_irq(&fnic->fnic_lock);
226 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses 263 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
227 * the vnic MAC address as the Eth Src address
228 */
229 fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
230
231 /* We get our s_id from the d_id of the flogi resp frame */
232 fnic->s_id = ntoh24(fh->fh_d_id);
233
234 /* Change state to reflect transition from Eth to FC mode */
235 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; 264 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
236 265 else {
237 } else {
238 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 266 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
239 "Unexpected fnic state %s while" 267 "Unexpected fnic state %s while"
240 " processing flogi resp\n", 268 " processing flogi resp\n",
241 fnic_state_to_str(fnic->state)); 269 fnic_state_to_str(fnic->state));
242 ret = -1; 270 spin_unlock_irq(&fnic->fnic_lock);
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 271 return;
244 dev_kfree_skb_irq(fp_skb(fp));
245 goto handle_flogi_resp_end;
246 } 272 }
247 273 spin_unlock_irq(&fnic->fnic_lock);
248 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
249
250 /* Drop older cached frame */
251 if (old_flogi_resp)
252 dev_kfree_skb_irq(fp_skb(old_flogi_resp));
253 274
254 /* 275 /*
255 * send flogi reg request to firmware, this will put the fnic in 276 * Send FLOGI registration to firmware to set up FC mode.
256 * in FC mode 277 * The new address will be set up when registration completes.
257 */ 278 */
258 ret = fnic_flogi_reg_handler(fnic); 279 ret = fnic_flogi_reg_handler(fnic, port_id);
259 280
260 if (ret < 0) { 281 if (ret < 0) {
261 int free_fp = 1; 282 spin_lock_irq(&fnic->fnic_lock);
262 spin_lock_irqsave(&fnic->fnic_lock, flags);
263 /*
264 * free the frame is some other thread is not
265 * pointing to it
266 */
267 if (fnic->flogi_resp != fp)
268 free_fp = 0;
269 else
270 fnic->flogi_resp = NULL;
271
272 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) 283 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
273 fnic->state = FNIC_IN_ETH_MODE; 284 fnic->state = FNIC_IN_ETH_MODE;
274 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 285 spin_unlock_irq(&fnic->fnic_lock);
275 if (free_fp)
276 dev_kfree_skb_irq(fp_skb(fp));
277 } 286 }
278
279 handle_flogi_resp_end:
280 return ret;
281}
282
283/* Returns 1 for a response that matches cached flogi oxid */
284static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
285 struct fc_frame *fp)
286{
287 struct fc_frame_header *fh;
288 int ret = 0;
289 u32 f_ctl;
290
291 fh = fc_frame_header_get(fp);
292 f_ctl = ntoh24(fh->fh_f_ctl);
293
294 if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
295 fh->fh_r_ctl == FC_RCTL_ELS_REP &&
296 (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
297 fh->fh_type == FC_TYPE_ELS)
298 ret = 1;
299
300 return ret;
301} 287}
302 288
303static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc 289static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
@@ -326,6 +312,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
326 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 312 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
327 PCI_DMA_FROMDEVICE); 313 PCI_DMA_FROMDEVICE);
328 skb = buf->os_buf; 314 skb = buf->os_buf;
315 fp = (struct fc_frame *)skb;
329 buf->os_buf = NULL; 316 buf->os_buf = NULL;
330 317
331 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); 318 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
@@ -338,6 +325,9 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
338 &fcoe_enc_error, &fcs_ok, &vlan_stripped, 325 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
339 &vlan); 326 &vlan);
340 eth_hdrs_stripped = 1; 327 eth_hdrs_stripped = 1;
328 skb_trim(skb, fcp_bytes_written);
329 fr_sof(fp) = sof;
330 fr_eof(fp) = eof;
341 331
342 } else if (type == CQ_DESC_TYPE_RQ_ENET) { 332 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
343 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 333 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
@@ -352,6 +342,14 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
352 &ipv4_csum_ok, &ipv6, &ipv4, 342 &ipv4_csum_ok, &ipv6, &ipv4,
353 &ipv4_fragment, &fcs_ok); 343 &ipv4_fragment, &fcs_ok);
354 eth_hdrs_stripped = 0; 344 eth_hdrs_stripped = 0;
345 skb_trim(skb, bytes_written);
346 if (!fcs_ok) {
347 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
348 "fcs error. dropping packet.\n");
349 goto drop;
350 }
351 if (fnic_import_rq_eth_pkt(fnic, skb))
352 return;
355 353
356 } else { 354 } else {
357 /* wrong CQ type*/ 355 /* wrong CQ type*/
@@ -370,43 +368,11 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
370 goto drop; 368 goto drop;
371 } 369 }
372 370
373 if (eth_hdrs_stripped)
374 fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
375 else if (fnic_import_rq_eth_pkt(skb, bytes_written))
376 goto drop;
377
378 fp = (struct fc_frame *)skb;
379
380 /*
381 * If frame is an ELS response that matches the cached FLOGI OX_ID,
382 * and is accept, issue flogi_reg_request copy wq request to firmware
383 * to register the S_ID and determine whether FC_OUI mode or GW mode.
384 */
385 if (is_matching_flogi_resp_frame(fnic, fp)) {
386 if (!eth_hdrs_stripped) {
387 if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
388 fnic_handle_flogi_resp(fnic, fp);
389 return;
390 }
391 /*
392 * Recd. Flogi reject. No point registering
393 * with fw, but forward to libFC
394 */
395 goto forward;
396 }
397 goto drop;
398 }
399 if (!eth_hdrs_stripped)
400 goto drop;
401
402forward:
403 spin_lock_irqsave(&fnic->fnic_lock, flags); 371 spin_lock_irqsave(&fnic->fnic_lock, flags);
404 if (fnic->stop_rx_link_events) { 372 if (fnic->stop_rx_link_events) {
405 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 373 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
406 goto drop; 374 goto drop;
407 } 375 }
408 /* Use fr_flags to indicate whether succ. flogi resp or not */
409 fr_flags(fp) = 0;
410 fr_dev(fp) = fnic->lport; 376 fr_dev(fp) = fnic->lport;
411 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 377 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
412 378
@@ -494,12 +460,49 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
494 buf->os_buf = NULL; 460 buf->os_buf = NULL;
495} 461}
496 462
497static inline int is_flogi_frame(struct fc_frame_header *fh) 463/**
464 * fnic_eth_send() - Send Ethernet frame.
465 * @fip: fcoe_ctlr instance.
466 * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
467 */
468void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
498{ 469{
499 return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; 470 struct fnic *fnic = fnic_from_ctlr(fip);
471 struct vnic_wq *wq = &fnic->wq[0];
472 dma_addr_t pa;
473 struct ethhdr *eth_hdr;
474 struct vlan_ethhdr *vlan_hdr;
475 unsigned long flags;
476
477 if (!fnic->vlan_hw_insert) {
478 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
479 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
480 sizeof(*vlan_hdr) - sizeof(*eth_hdr));
481 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
482 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
483 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
484 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
485 }
486
487 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
488
489 spin_lock_irqsave(&fnic->wq_lock[0], flags);
490 if (!vnic_wq_desc_avail(wq)) {
491 pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
492 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
493 kfree_skb(skb);
494 return;
495 }
496
497 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
498 fnic->vlan_hw_insert, fnic->vlan_id, 1);
499 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
500} 500}
501 501
502int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) 502/*
503 * Send FC frame.
504 */
505static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
503{ 506{
504 struct vnic_wq *wq = &fnic->wq[0]; 507 struct vnic_wq *wq = &fnic->wq[0];
505 struct sk_buff *skb; 508 struct sk_buff *skb;
@@ -515,6 +518,10 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
515 fh = fc_frame_header_get(fp); 518 fh = fc_frame_header_get(fp);
516 skb = fp_skb(fp); 519 skb = fp_skb(fp);
517 520
521 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
522 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
523 return 0;
524
518 if (!fnic->vlan_hw_insert) { 525 if (!fnic->vlan_hw_insert) {
519 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); 526 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
520 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); 527 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
@@ -530,16 +537,11 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
530 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); 537 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
531 } 538 }
532 539
533 if (is_flogi_frame(fh)) { 540 if (fnic->ctlr.map_dest)
534 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); 541 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
535 memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); 542 else
536 } else { 543 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
537 if (fnic->fcoui_mode) 544 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
538 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
539 else
540 memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
541 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
542 }
543 545
544 tot_len = skb->len; 546 tot_len = skb->len;
545 BUG_ON(tot_len % 4); 547 BUG_ON(tot_len % 4);
@@ -578,109 +580,85 @@ fnic_send_frame_end:
578int fnic_send(struct fc_lport *lp, struct fc_frame *fp) 580int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
579{ 581{
580 struct fnic *fnic = lport_priv(lp); 582 struct fnic *fnic = lport_priv(lp);
581 struct fc_frame_header *fh;
582 int ret = 0;
583 enum fnic_state old_state;
584 unsigned long flags; 583 unsigned long flags;
585 struct fc_frame *old_flogi = NULL;
586 struct fc_frame *old_flogi_resp = NULL;
587 584
588 if (fnic->in_remove) { 585 if (fnic->in_remove) {
589 dev_kfree_skb(fp_skb(fp)); 586 dev_kfree_skb(fp_skb(fp));
590 ret = -1; 587 return -1;
591 goto fnic_send_end;
592 } 588 }
593 589
594 fh = fc_frame_header_get(fp); 590 /*
595 /* if not an Flogi frame, send it out, this is the common case */ 591 * Queue frame if in a transitional state.
596 if (!is_flogi_frame(fh)) 592 * This occurs while registering the Port_ID / MAC address after FLOGI.
597 return fnic_send_frame(fnic, fp); 593 */
594 spin_lock_irqsave(&fnic->fnic_lock, flags);
595 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
596 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
597 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
598 return 0;
599 }
600 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
598 601
599 /* Flogi frame, now enter the state machine */ 602 return fnic_send_frame(fnic, fp);
603}
600 604
601 spin_lock_irqsave(&fnic->fnic_lock, flags); 605/**
602again: 606 * fnic_flush_tx() - send queued frames.
603 /* Get any old cached frames, free them after dropping lock */ 607 * @fnic: fnic device
604 old_flogi = fnic->flogi; 608 *
605 fnic->flogi = NULL; 609 * Send frames that were waiting to go out in FC or Ethernet mode.
606 old_flogi_resp = fnic->flogi_resp; 610 * Whenever changing modes we purge queued frames, so these frames should
607 fnic->flogi_resp = NULL; 611 * be queued for the stable mode that we're in, either FC or Ethernet.
612 *
613 * Called without fnic_lock held.
614 */
615void fnic_flush_tx(struct fnic *fnic)
616{
617 struct sk_buff *skb;
618 struct fc_frame *fp;
608 619
609 fnic->flogi_oxid = FC_XID_UNKNOWN; 620 while ((skb = skb_dequeue(&fnic->frame_queue))) {
621 fp = (struct fc_frame *)skb;
622 fnic_send_frame(fnic, fp);
623 }
624}
610 625
626/**
627 * fnic_set_eth_mode() - put fnic into ethernet mode.
628 * @fnic: fnic device
629 *
630 * Called without fnic lock held.
631 */
632static void fnic_set_eth_mode(struct fnic *fnic)
633{
634 unsigned long flags;
635 enum fnic_state old_state;
636 int ret;
637
638 spin_lock_irqsave(&fnic->fnic_lock, flags);
639again:
611 old_state = fnic->state; 640 old_state = fnic->state;
612 switch (old_state) { 641 switch (old_state) {
613 case FNIC_IN_FC_MODE: 642 case FNIC_IN_FC_MODE:
614 case FNIC_IN_ETH_TRANS_FC_MODE: 643 case FNIC_IN_ETH_TRANS_FC_MODE:
615 default: 644 default:
616 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 645 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
617 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
618 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 646 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
619 647
620 if (old_flogi) {
621 dev_kfree_skb(fp_skb(old_flogi));
622 old_flogi = NULL;
623 }
624 if (old_flogi_resp) {
625 dev_kfree_skb(fp_skb(old_flogi_resp));
626 old_flogi_resp = NULL;
627 }
628
629 ret = fnic_fw_reset_handler(fnic); 648 ret = fnic_fw_reset_handler(fnic);
630 649
631 spin_lock_irqsave(&fnic->fnic_lock, flags); 650 spin_lock_irqsave(&fnic->fnic_lock, flags);
632 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) 651 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
633 goto again; 652 goto again;
634 if (ret) { 653 if (ret)
635 fnic->state = old_state; 654 fnic->state = old_state;
636 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
637 dev_kfree_skb(fp_skb(fp));
638 goto fnic_send_end;
639 }
640 old_flogi = fnic->flogi;
641 fnic->flogi = fp;
642 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
643 old_flogi_resp = fnic->flogi_resp;
644 fnic->flogi_resp = NULL;
645 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
646 break; 655 break;
647 656
648 case FNIC_IN_FC_TRANS_ETH_MODE: 657 case FNIC_IN_FC_TRANS_ETH_MODE:
649 /*
650 * A reset is pending with the firmware. Store the flogi
651 * and its oxid. The transition out of this state happens
652 * only when Firmware completes the reset, either with
653 * success or failed. If success, transition to
654 * FNIC_IN_ETH_MODE, if fail, then transition to
655 * FNIC_IN_FC_MODE
656 */
657 fnic->flogi = fp;
658 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
659 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
660 break;
661
662 case FNIC_IN_ETH_MODE: 658 case FNIC_IN_ETH_MODE:
663 /*
664 * The fw/hw is already in eth mode. Store the oxid,
665 * and send the flogi frame out. The transition out of this
666 * state happens only we receive flogi response from the
667 * network, and the oxid matches the cached oxid when the
668 * flogi frame was sent out. If they match, then we issue
669 * a flogi_reg request and transition to state
670 * FNIC_IN_ETH_TRANS_FC_MODE
671 */
672 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
673 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
674 ret = fnic_send_frame(fnic, fp);
675 break; 659 break;
676 } 660 }
677 661 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
678fnic_send_end:
679 if (old_flogi)
680 dev_kfree_skb(fp_skb(old_flogi));
681 if (old_flogi_resp)
682 dev_kfree_skb(fp_skb(old_flogi_resp));
683 return ret;
684} 662}
685 663
686static void fnic_wq_complete_frame_send(struct vnic_wq *wq, 664static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 2b3064828aea..5c1f223cabce 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -48,9 +48,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
48 } 48 }
49 49
50 if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { 50 if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
51 work_done += fnic_wq_copy_cmpl_handler(fnic, 8); 51 work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
52 work_done += fnic_wq_cmpl_handler(fnic, 4); 52 work_done += fnic_wq_cmpl_handler(fnic, -1);
53 work_done += fnic_rq_cmpl_handler(fnic, 4); 53 work_done += fnic_rq_cmpl_handler(fnic, -1);
54 54
55 vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], 55 vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
56 work_done, 56 work_done,
@@ -66,9 +66,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
66 struct fnic *fnic = data; 66 struct fnic *fnic = data;
67 unsigned long work_done = 0; 67 unsigned long work_done = 0;
68 68
69 work_done += fnic_wq_copy_cmpl_handler(fnic, 8); 69 work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
70 work_done += fnic_wq_cmpl_handler(fnic, 4); 70 work_done += fnic_wq_cmpl_handler(fnic, -1);
71 work_done += fnic_rq_cmpl_handler(fnic, 4); 71 work_done += fnic_rq_cmpl_handler(fnic, -1);
72 72
73 vnic_intr_return_credits(&fnic->intr[0], 73 vnic_intr_return_credits(&fnic->intr[0],
74 work_done, 74 work_done,
@@ -83,7 +83,7 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
83 struct fnic *fnic = data; 83 struct fnic *fnic = data;
84 unsigned long rq_work_done = 0; 84 unsigned long rq_work_done = 0;
85 85
86 rq_work_done = fnic_rq_cmpl_handler(fnic, 4); 86 rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
87 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], 87 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
88 rq_work_done, 88 rq_work_done,
89 1 /* unmask intr */, 89 1 /* unmask intr */,
@@ -97,7 +97,7 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
97 struct fnic *fnic = data; 97 struct fnic *fnic = data;
98 unsigned long wq_work_done = 0; 98 unsigned long wq_work_done = 0;
99 99
100 wq_work_done = fnic_wq_cmpl_handler(fnic, 4); 100 wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
101 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], 101 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
102 wq_work_done, 102 wq_work_done,
103 1 /* unmask intr */, 103 1 /* unmask intr */,
@@ -110,7 +110,7 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
110 struct fnic *fnic = data; 110 struct fnic *fnic = data;
111 unsigned long wq_copy_work_done = 0; 111 unsigned long wq_copy_work_done = 0;
112 112
113 wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8); 113 wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
114 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], 114 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
115 wq_copy_work_done, 115 wq_copy_work_done,
116 1 /* unmask intr */, 116 1 /* unmask intr */,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 71c7bbe26d05..97b212570bcc 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mempool.h> 19#include <linux/mempool.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/slab.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
@@ -25,6 +26,8 @@
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26#include <linux/spinlock.h> 27#include <linux/spinlock.h>
27#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/if_ether.h>
30#include <scsi/fc/fc_fip.h>
28#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport.h> 32#include <scsi/scsi_transport.h>
30#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
@@ -68,6 +71,7 @@ MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
68 71
69static struct libfc_function_template fnic_transport_template = { 72static struct libfc_function_template fnic_transport_template = {
70 .frame_send = fnic_send, 73 .frame_send = fnic_send,
74 .lport_set_port_id = fnic_set_port_id,
71 .fcp_abort_io = fnic_empty_scsi_cleanup, 75 .fcp_abort_io = fnic_empty_scsi_cleanup,
72 .fcp_cleanup = fnic_empty_scsi_cleanup, 76 .fcp_cleanup = fnic_empty_scsi_cleanup,
73 .exch_mgr_reset = fnic_exch_mgr_reset 77 .exch_mgr_reset = fnic_exch_mgr_reset
@@ -140,6 +144,7 @@ static struct fc_function_template fnic_fc_functions = {
140 .get_fc_host_stats = fnic_get_stats, 144 .get_fc_host_stats = fnic_get_stats,
141 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 145 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
142 .terminate_rport_io = fnic_terminate_rport_io, 146 .terminate_rport_io = fnic_terminate_rport_io,
147 .bsg_request = fc_lport_bsg_request,
143}; 148};
144 149
145static void fnic_get_host_speed(struct Scsi_Host *shost) 150static void fnic_get_host_speed(struct Scsi_Host *shost)
@@ -324,9 +329,6 @@ static int fnic_cleanup(struct fnic *fnic)
324{ 329{
325 unsigned int i; 330 unsigned int i;
326 int err; 331 int err;
327 unsigned long flags;
328 struct fc_frame *flogi = NULL;
329 struct fc_frame *flogi_resp = NULL;
330 332
331 vnic_dev_disable(fnic->vdev); 333 vnic_dev_disable(fnic->vdev);
332 for (i = 0; i < fnic->intr_count; i++) 334 for (i = 0; i < fnic->intr_count; i++)
@@ -367,24 +369,6 @@ static int fnic_cleanup(struct fnic *fnic)
367 for (i = 0; i < fnic->intr_count; i++) 369 for (i = 0; i < fnic->intr_count; i++)
368 vnic_intr_clean(&fnic->intr[i]); 370 vnic_intr_clean(&fnic->intr[i]);
369 371
370 /*
371 * Remove cached flogi and flogi resp frames if any
372 * These frames are not in any queue, and therefore queue
373 * cleanup does not clean them. So clean them explicitly
374 */
375 spin_lock_irqsave(&fnic->fnic_lock, flags);
376 flogi = fnic->flogi;
377 fnic->flogi = NULL;
378 flogi_resp = fnic->flogi_resp;
379 fnic->flogi_resp = NULL;
380 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
381
382 if (flogi)
383 dev_kfree_skb(fp_skb(flogi));
384
385 if (flogi_resp)
386 dev_kfree_skb(fp_skb(flogi_resp));
387
388 mempool_destroy(fnic->io_req_pool); 372 mempool_destroy(fnic->io_req_pool);
389 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) 373 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
390 mempool_destroy(fnic->io_sgl_pool[i]); 374 mempool_destroy(fnic->io_sgl_pool[i]);
@@ -409,6 +393,17 @@ static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
409 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); 393 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
410} 394}
411 395
396/**
397 * fnic_get_mac() - get assigned data MAC address for FIP code.
398 * @lport: local port.
399 */
400static u8 *fnic_get_mac(struct fc_lport *lport)
401{
402 struct fnic *fnic = lport_priv(lport);
403
404 return fnic->data_src_addr;
405}
406
412static int __devinit fnic_probe(struct pci_dev *pdev, 407static int __devinit fnic_probe(struct pci_dev *pdev,
413 const struct pci_device_id *ent) 408 const struct pci_device_id *ent)
414{ 409{
@@ -424,17 +419,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
424 * Allocate SCSI Host and set up association between host, 419 * Allocate SCSI Host and set up association between host,
425 * local port, and fnic 420 * local port, and fnic
426 */ 421 */
427 host = scsi_host_alloc(&fnic_host_template, 422 lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
428 sizeof(struct fc_lport) + sizeof(struct fnic)); 423 if (!lp) {
429 if (!host) { 424 printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
430 printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
431 err = -ENOMEM; 425 err = -ENOMEM;
432 goto err_out; 426 goto err_out;
433 } 427 }
434 lp = shost_priv(host); 428 host = lp->host;
435 lp->host = host;
436 fnic = lport_priv(lp); 429 fnic = lport_priv(lp);
437 fnic->lport = lp; 430 fnic->lport = lp;
431 fnic->ctlr.lp = lp;
438 432
439 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, 433 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
440 host->host_no); 434 host->host_no);
@@ -543,12 +537,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
543 goto err_out_dev_close; 537 goto err_out_dev_close;
544 } 538 }
545 539
546 err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr); 540 err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
547 if (err) { 541 if (err) {
548 shost_printk(KERN_ERR, fnic->lport->host, 542 shost_printk(KERN_ERR, fnic->lport->host,
549 "vNIC get MAC addr failed \n"); 543 "vNIC get MAC addr failed \n");
550 goto err_out_dev_close; 544 goto err_out_dev_close;
551 } 545 }
546 /* set data_src for point-to-point mode and to keep it non-zero */
547 memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
552 548
553 /* Get vNIC configuration */ 549 /* Get vNIC configuration */
554 err = fnic_get_vnic_config(fnic); 550 err = fnic_get_vnic_config(fnic);
@@ -560,6 +556,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
560 } 556 }
561 host->max_lun = fnic->config.luns_per_tgt; 557 host->max_lun = fnic->config.luns_per_tgt;
562 host->max_id = FNIC_MAX_FCP_TARGET; 558 host->max_id = FNIC_MAX_FCP_TARGET;
559 host->max_cmd_len = FNIC_MAX_CMD_LEN;
563 560
564 fnic_get_res_counts(fnic); 561 fnic_get_res_counts(fnic);
565 562
@@ -571,19 +568,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
571 goto err_out_dev_close; 568 goto err_out_dev_close;
572 } 569 }
573 570
574 err = fnic_request_intr(fnic);
575 if (err) {
576 shost_printk(KERN_ERR, fnic->lport->host,
577 "Unable to request irq.\n");
578 goto err_out_clear_intr;
579 }
580
581 err = fnic_alloc_vnic_resources(fnic); 571 err = fnic_alloc_vnic_resources(fnic);
582 if (err) { 572 if (err) {
583 shost_printk(KERN_ERR, fnic->lport->host, 573 shost_printk(KERN_ERR, fnic->lport->host,
584 "Failed to alloc vNIC resources, " 574 "Failed to alloc vNIC resources, "
585 "aborting.\n"); 575 "aborting.\n");
586 goto err_out_free_intr; 576 goto err_out_clear_intr;
587 } 577 }
588 578
589 579
@@ -623,9 +613,23 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
623 fnic->vlan_hw_insert = 1; 613 fnic->vlan_hw_insert = 1;
624 fnic->vlan_id = 0; 614 fnic->vlan_id = 0;
625 615
626 fnic->flogi_oxid = FC_XID_UNKNOWN; 616 /* Initialize the FIP fcoe_ctrl struct */
627 fnic->flogi = NULL; 617 fnic->ctlr.send = fnic_eth_send;
628 fnic->flogi_resp = NULL; 618 fnic->ctlr.update_mac = fnic_update_mac;
619 fnic->ctlr.get_src_addr = fnic_get_mac;
620 fcoe_ctlr_init(&fnic->ctlr);
621 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
622 shost_printk(KERN_INFO, fnic->lport->host,
623 "firmware supports FIP\n");
624 /* enable directed and multicast */
625 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
626 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
627 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
628 } else {
629 shost_printk(KERN_INFO, fnic->lport->host,
630 "firmware uses non-FIP mode\n");
631 fnic->ctlr.mode = FIP_ST_NON_FIP;
632 }
629 fnic->state = FNIC_IN_FC_MODE; 633 fnic->state = FNIC_IN_FC_MODE;
630 634
631 /* Enable hardware stripping of vlan header on ingress */ 635 /* Enable hardware stripping of vlan header on ingress */
@@ -697,6 +701,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
697 goto err_out_remove_scsi_host; 701 goto err_out_remove_scsi_host;
698 } 702 }
699 703
704 fc_lport_init_stats(lp);
705
700 fc_lport_config(lp); 706 fc_lport_config(lp);
701 707
702 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + 708 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
@@ -716,6 +722,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
716 INIT_WORK(&fnic->link_work, fnic_handle_link); 722 INIT_WORK(&fnic->link_work, fnic_handle_link);
717 INIT_WORK(&fnic->frame_work, fnic_handle_frame); 723 INIT_WORK(&fnic->frame_work, fnic_handle_frame);
718 skb_queue_head_init(&fnic->frame_queue); 724 skb_queue_head_init(&fnic->frame_queue);
725 skb_queue_head_init(&fnic->tx_queue);
719 726
720 /* Enable all queues */ 727 /* Enable all queues */
721 for (i = 0; i < fnic->raw_wq_count; i++) 728 for (i = 0; i < fnic->raw_wq_count; i++)
@@ -728,6 +735,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
728 fc_fabric_login(lp); 735 fc_fabric_login(lp);
729 736
730 vnic_dev_enable(fnic->vdev); 737 vnic_dev_enable(fnic->vdev);
738
739 err = fnic_request_intr(fnic);
740 if (err) {
741 shost_printk(KERN_ERR, fnic->lport->host,
742 "Unable to request irq.\n");
743 goto err_out_free_exch_mgr;
744 }
745
731 for (i = 0; i < fnic->intr_count; i++) 746 for (i = 0; i < fnic->intr_count; i++)
732 vnic_intr_unmask(&fnic->intr[i]); 747 vnic_intr_unmask(&fnic->intr[i]);
733 748
@@ -738,8 +753,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
738err_out_free_exch_mgr: 753err_out_free_exch_mgr:
739 fc_exch_mgr_free(lp); 754 fc_exch_mgr_free(lp);
740err_out_remove_scsi_host: 755err_out_remove_scsi_host:
741 fc_remove_host(fnic->lport->host); 756 fc_remove_host(lp->host);
742 scsi_remove_host(fnic->lport->host); 757 scsi_remove_host(lp->host);
743err_out_free_rq_buf: 758err_out_free_rq_buf:
744 for (i = 0; i < fnic->rq_count; i++) 759 for (i = 0; i < fnic->rq_count; i++)
745 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); 760 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
@@ -752,8 +767,6 @@ err_out_free_ioreq_pool:
752 mempool_destroy(fnic->io_req_pool); 767 mempool_destroy(fnic->io_req_pool);
753err_out_free_resources: 768err_out_free_resources:
754 fnic_free_vnic_resources(fnic); 769 fnic_free_vnic_resources(fnic);
755err_out_free_intr:
756 fnic_free_intr(fnic);
757err_out_clear_intr: 770err_out_clear_intr:
758 fnic_clear_intr_mode(fnic); 771 fnic_clear_intr_mode(fnic);
759err_out_dev_close: 772err_out_dev_close:
@@ -775,6 +788,7 @@ err_out:
775static void __devexit fnic_remove(struct pci_dev *pdev) 788static void __devexit fnic_remove(struct pci_dev *pdev)
776{ 789{
777 struct fnic *fnic = pci_get_drvdata(pdev); 790 struct fnic *fnic = pci_get_drvdata(pdev);
791 struct fc_lport *lp = fnic->lport;
778 unsigned long flags; 792 unsigned long flags;
779 793
780 /* 794 /*
@@ -796,6 +810,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
796 */ 810 */
797 flush_workqueue(fnic_event_queue); 811 flush_workqueue(fnic_event_queue);
798 skb_queue_purge(&fnic->frame_queue); 812 skb_queue_purge(&fnic->frame_queue);
813 skb_queue_purge(&fnic->tx_queue);
799 814
800 /* 815 /*
801 * Log off the fabric. This stops all remote ports, dns port, 816 * Log off the fabric. This stops all remote ports, dns port,
@@ -808,7 +823,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
808 fnic->in_remove = 1; 823 fnic->in_remove = 1;
809 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 824 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
810 825
811 fc_lport_destroy(fnic->lport); 826 fcoe_ctlr_destroy(&fnic->ctlr);
827 fc_lport_destroy(lp);
812 828
813 /* 829 /*
814 * This stops the fnic device, masks all interrupts. Completed 830 * This stops the fnic device, masks all interrupts. Completed
@@ -818,6 +834,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
818 fnic_cleanup(fnic); 834 fnic_cleanup(fnic);
819 835
820 BUG_ON(!skb_queue_empty(&fnic->frame_queue)); 836 BUG_ON(!skb_queue_empty(&fnic->frame_queue));
837 BUG_ON(!skb_queue_empty(&fnic->tx_queue));
821 838
822 spin_lock_irqsave(&fnic_list_lock, flags); 839 spin_lock_irqsave(&fnic_list_lock, flags);
823 list_del(&fnic->list); 840 list_del(&fnic->list);
@@ -827,8 +844,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
827 scsi_remove_host(fnic->lport->host); 844 scsi_remove_host(fnic->lport->host);
828 fc_exch_mgr_free(fnic->lport); 845 fc_exch_mgr_free(fnic->lport);
829 vnic_dev_notify_unset(fnic->vdev); 846 vnic_dev_notify_unset(fnic->vdev);
830 fnic_free_vnic_resources(fnic);
831 fnic_free_intr(fnic); 847 fnic_free_intr(fnic);
848 fnic_free_vnic_resources(fnic);
832 fnic_clear_intr_mode(fnic); 849 fnic_clear_intr_mode(fnic);
833 vnic_dev_close(fnic->vdev); 850 vnic_dev_close(fnic->vdev);
834 vnic_dev_unregister(fnic->vdev); 851 vnic_dev_unregister(fnic->vdev);
@@ -836,7 +853,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
836 pci_release_regions(pdev); 853 pci_release_regions(pdev);
837 pci_disable_device(pdev); 854 pci_disable_device(pdev);
838 pci_set_drvdata(pdev, NULL); 855 pci_set_drvdata(pdev, NULL);
839 scsi_host_put(fnic->lport->host); 856 scsi_host_put(lp->host);
840} 857}
841 858
842static struct pci_driver fnic_driver = { 859static struct pci_driver fnic_driver = {
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 7ba61ec715d2..50488f8e169d 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -144,10 +144,9 @@ int fnic_get_vnic_config(struct fnic *fnic)
144 c->intr_timer_type = c->intr_timer_type; 144 c->intr_timer_type = c->intr_timer_type;
145 145
146 shost_printk(KERN_INFO, fnic->lport->host, 146 shost_printk(KERN_INFO, fnic->lport->host,
147 "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " 147 "vNIC MAC addr %pM "
148 "wq/wq_copy/rq %d/%d/%d\n", 148 "wq/wq_copy/rq %d/%d/%d\n",
149 fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2], 149 fnic->ctlr.ctl_src_addr,
150 fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
151 c->wq_enet_desc_count, c->wq_copy_desc_count, 150 c->wq_enet_desc_count, c->wq_copy_desc_count,
152 c->rq_desc_count); 151 c->rq_desc_count);
153 shost_printk(KERN_INFO, fnic->lport->host, 152 shost_printk(KERN_INFO, fnic->lport->host,
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
index b6f310262534..ef8aaf2156dd 100644
--- a/drivers/scsi/fnic/fnic_res.h
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -51,6 +51,31 @@ static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
52} 52}
53 53
54static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq,
55 void *os_buf, dma_addr_t dma_addr,
56 unsigned int len,
57 int vlan_tag_insert,
58 unsigned int vlan_tag,
59 int cq_entry)
60{
61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
62
63 wq_enet_desc_enc(desc,
64 (u64)dma_addr | VNIC_PADDR_TARGET,
65 (u16)len,
66 0, /* mss_or_csum_offset */
67 0, /* fc_eof */
68 0, /* offload_mode */
69 1, /* eop */
70 (u8)cq_entry,
71 0, /* fcoe_encap */
72 (u8)vlan_tag_insert,
73 (u16)vlan_tag,
74 0 /* loopback */);
75
76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
77}
78
54static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, 79static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
55 u32 req_id, 80 u32 req_id,
56 u32 lunmap_id, u8 spl_flags, 81 u32 lunmap_id, u8 spl_flags,
@@ -58,6 +83,7 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
58 u64 sgl_addr, u64 sns_addr, 83 u64 sgl_addr, u64 sns_addr,
59 u8 crn, u8 pri_ta, 84 u8 crn, u8 pri_ta,
60 u8 flags, u8 *scsi_cdb, 85 u8 flags, u8 *scsi_cdb,
86 u8 cdb_len,
61 u32 data_len, u8 *lun, 87 u32 data_len, u8 *lun,
62 u32 d_id, u16 mss, 88 u32 d_id, u16 mss,
63 u32 ratov, u32 edtov) 89 u32 ratov, u32 edtov)
@@ -82,7 +108,8 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
82 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ 108 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */
83 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ 109 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */
84 desc->u.icmnd_16.flags = flags; /* command flags */ 110 desc->u.icmnd_16.flags = flags; /* command flags */
85 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */ 111 memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16);
112 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len); /* SCSI CDB */
86 desc->u.icmnd_16.data_len = data_len; /* length of data expected */ 113 desc->u.icmnd_16.data_len = data_len; /* length of data expected */
87 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ 114 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */
88 desc->u.icmnd_16._resvd2 = 0; /* reserved */ 115 desc->u.icmnd_16._resvd2 = 0; /* reserved */
@@ -132,12 +159,37 @@ static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
132 desc->hdr.tag.u.req_id = req_id; /* id for this request */ 159 desc->hdr.tag.u.req_id = req_id; /* id for this request */
133 160
134 desc->u.flogi_reg.format = format; 161 desc->u.flogi_reg.format = format;
162 desc->u.flogi_reg._resvd = 0;
135 hton24(desc->u.flogi_reg.s_id, s_id); 163 hton24(desc->u.flogi_reg.s_id, s_id);
136 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); 164 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
137 165
138 vnic_wq_copy_post(wq); 166 vnic_wq_copy_post(wq);
139} 167}
140 168
169static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq,
170 u32 req_id, u32 s_id,
171 u8 *fcf_mac, u8 *ha_mac,
172 u32 r_a_tov, u32 e_d_tov)
173{
174 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
175
176 desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */
177 desc->hdr.status = 0; /* header status entry */
178 desc->hdr._resvd = 0; /* reserved */
179 desc->hdr.tag.u.req_id = req_id; /* id for this request */
180
181 desc->u.flogi_fip_reg._resvd0 = 0;
182 hton24(desc->u.flogi_fip_reg.s_id, s_id);
183 memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN);
184 desc->u.flogi_fip_reg._resvd1 = 0;
185 desc->u.flogi_fip_reg.r_a_tov = r_a_tov;
186 desc->u.flogi_fip_reg.e_d_tov = e_d_tov;
187 memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN);
188 desc->u.flogi_fip_reg._resvd2 = 0;
189
190 vnic_wq_copy_post(wq);
191}
192
141static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, 193static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
142 u32 req_id) 194 u32 req_id)
143{ 195{
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index bfc996971b81..3cc47c6e1ada 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -26,6 +26,7 @@
26#include <linux/if_ether.h> 26#include <linux/if_ether.h>
27#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/gfp.h>
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
@@ -174,6 +175,9 @@ int fnic_fw_reset_handler(struct fnic *fnic)
174 int ret = 0; 175 int ret = 0;
175 unsigned long flags; 176 unsigned long flags;
176 177
178 skb_queue_purge(&fnic->frame_queue);
179 skb_queue_purge(&fnic->tx_queue);
180
177 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); 181 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
178 182
179 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) 183 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -200,9 +204,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
200 * fnic_flogi_reg_handler 204 * fnic_flogi_reg_handler
201 * Routine to send flogi register msg to fw 205 * Routine to send flogi register msg to fw
202 */ 206 */
203int fnic_flogi_reg_handler(struct fnic *fnic) 207int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
204{ 208{
205 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; 209 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
210 enum fcpio_flogi_reg_format_type format;
211 struct fc_lport *lp = fnic->lport;
206 u8 gw_mac[ETH_ALEN]; 212 u8 gw_mac[ETH_ALEN];
207 int ret = 0; 213 int ret = 0;
208 unsigned long flags; 214 unsigned long flags;
@@ -217,23 +223,32 @@ int fnic_flogi_reg_handler(struct fnic *fnic)
217 goto flogi_reg_ioreq_end; 223 goto flogi_reg_ioreq_end;
218 } 224 }
219 225
220 if (fnic->fcoui_mode) 226 if (fnic->ctlr.map_dest) {
221 memset(gw_mac, 0xff, ETH_ALEN); 227 memset(gw_mac, 0xff, ETH_ALEN);
222 else 228 format = FCPIO_FLOGI_REG_DEF_DEST;
223 memcpy(gw_mac, fnic->dest_addr, ETH_ALEN); 229 } else {
230 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
231 format = FCPIO_FLOGI_REG_GW_DEST;
232 }
224 233
225 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, 234 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
226 FCPIO_FLOGI_REG_GW_DEST, 235 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
227 fnic->s_id, 236 fc_id, gw_mac,
228 gw_mac); 237 fnic->data_src_addr,
238 lp->r_a_tov, lp->e_d_tov);
239 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
240 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
241 fc_id, fnic->data_src_addr, gw_mac);
242 } else {
243 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
244 format, fc_id, gw_mac);
245 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
246 "FLOGI reg issued fcid %x map %d dest %pM\n",
247 fc_id, fnic->ctlr.map_dest, gw_mac);
248 }
229 249
230flogi_reg_ioreq_end: 250flogi_reg_ioreq_end:
231 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 251 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
232
233 if (!ret)
234 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
235 "flog reg issued\n");
236
237 return ret; 252 return ret;
238} 253}
239 254
@@ -319,7 +334,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
319 0, /* scsi cmd ref, always 0 */ 334 0, /* scsi cmd ref, always 0 */
320 pri_tag, /* scsi pri and tag */ 335 pri_tag, /* scsi pri and tag */
321 flags, /* command flags */ 336 flags, /* command flags */
322 sc->cmnd, scsi_bufflen(sc), 337 sc->cmnd, sc->cmd_len,
338 scsi_bufflen(sc),
323 fc_lun.scsi_lun, io_req->port_id, 339 fc_lun.scsi_lun, io_req->port_id,
324 rport->maxframe_size, rp->r_a_tov, 340 rport->maxframe_size, rp->r_a_tov,
325 rp->e_d_tov); 341 rp->e_d_tov);
@@ -452,7 +468,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
452 u8 hdr_status; 468 u8 hdr_status;
453 struct fcpio_tag tag; 469 struct fcpio_tag tag;
454 int ret = 0; 470 int ret = 0;
455 struct fc_frame *flogi;
456 unsigned long flags; 471 unsigned long flags;
457 472
458 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 473 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
@@ -462,9 +477,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
462 477
463 spin_lock_irqsave(&fnic->fnic_lock, flags); 478 spin_lock_irqsave(&fnic->fnic_lock, flags);
464 479
465 flogi = fnic->flogi;
466 fnic->flogi = NULL;
467
468 /* fnic should be in FC_TRANS_ETH_MODE */ 480 /* fnic should be in FC_TRANS_ETH_MODE */
469 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { 481 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
470 /* Check status of reset completion */ 482 /* Check status of reset completion */
@@ -505,17 +517,14 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
505 * free the flogi frame. Else, send it out 517 * free the flogi frame. Else, send it out
506 */ 518 */
507 if (fnic->remove_wait || ret) { 519 if (fnic->remove_wait || ret) {
508 fnic->flogi_oxid = FC_XID_UNKNOWN;
509 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 520 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
510 if (flogi) 521 skb_queue_purge(&fnic->tx_queue);
511 dev_kfree_skb_irq(fp_skb(flogi));
512 goto reset_cmpl_handler_end; 522 goto reset_cmpl_handler_end;
513 } 523 }
514 524
515 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 525 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
516 526
517 if (flogi) 527 fnic_flush_tx(fnic);
518 ret = fnic_send_frame(fnic, flogi);
519 528
520 reset_cmpl_handler_end: 529 reset_cmpl_handler_end:
521 return ret; 530 return ret;
@@ -532,18 +541,13 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
532 u8 hdr_status; 541 u8 hdr_status;
533 struct fcpio_tag tag; 542 struct fcpio_tag tag;
534 int ret = 0; 543 int ret = 0;
535 struct fc_frame *flogi_resp = NULL;
536 unsigned long flags; 544 unsigned long flags;
537 struct sk_buff *skb;
538 545
539 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 546 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
540 547
541 /* Update fnic state based on status of flogi reg completion */ 548 /* Update fnic state based on status of flogi reg completion */
542 spin_lock_irqsave(&fnic->fnic_lock, flags); 549 spin_lock_irqsave(&fnic->fnic_lock, flags);
543 550
544 flogi_resp = fnic->flogi_resp;
545 fnic->flogi_resp = NULL;
546
547 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { 551 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
548 552
549 /* Check flogi registration completion status */ 553 /* Check flogi registration completion status */
@@ -567,25 +571,17 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
567 ret = -1; 571 ret = -1;
568 } 572 }
569 573
570 /* Successful flogi reg cmpl, pass frame to LibFC */ 574 if (!ret) {
571 if (!ret && flogi_resp) {
572 if (fnic->stop_rx_link_events) { 575 if (fnic->stop_rx_link_events) {
573 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 576 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
574 goto reg_cmpl_handler_end; 577 goto reg_cmpl_handler_end;
575 } 578 }
576 skb = (struct sk_buff *)flogi_resp;
577 /* Use fr_flags to indicate whether flogi resp or not */
578 fr_flags(flogi_resp) = 1;
579 fr_dev(flogi_resp) = fnic->lport;
580 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 579 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
581 580
582 skb_queue_tail(&fnic->frame_queue, skb); 581 fnic_flush_tx(fnic);
583 queue_work(fnic_event_queue, &fnic->frame_work); 582 queue_work(fnic_event_queue, &fnic->frame_work);
584
585 } else { 583 } else {
586 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 584 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
587 if (flogi_resp)
588 dev_kfree_skb_irq(fp_skb(flogi_resp));
589 } 585 }
590 586
591reg_cmpl_handler_end: 587reg_cmpl_handler_end:
@@ -907,6 +903,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
907 break; 903 break;
908 904
909 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ 905 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
906 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
910 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); 907 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
911 break; 908 break;
912 909
@@ -1224,22 +1221,6 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1224 1221
1225} 1222}
1226 1223
1227static void fnic_block_error_handler(struct scsi_cmnd *sc)
1228{
1229 struct Scsi_Host *shost = sc->device->host;
1230 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
1231 unsigned long flags;
1232
1233 spin_lock_irqsave(shost->host_lock, flags);
1234 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1235 spin_unlock_irqrestore(shost->host_lock, flags);
1236 msleep(1000);
1237 spin_lock_irqsave(shost->host_lock, flags);
1238 }
1239 spin_unlock_irqrestore(shost->host_lock, flags);
1240
1241}
1242
1243/* 1224/*
1244 * This function is exported to SCSI for sending abort cmnds. 1225 * This function is exported to SCSI for sending abort cmnds.
1245 * A SCSI IO is represented by a io_req in the driver. 1226 * A SCSI IO is represented by a io_req in the driver.
@@ -1259,7 +1240,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1259 DECLARE_COMPLETION_ONSTACK(tm_done); 1240 DECLARE_COMPLETION_ONSTACK(tm_done);
1260 1241
1261 /* Wait for rport to unblock */ 1242 /* Wait for rport to unblock */
1262 fnic_block_error_handler(sc); 1243 fc_block_scsi_eh(sc);
1263 1244
1264 /* Get local-port, check ready and link up */ 1245 /* Get local-port, check ready and link up */
1265 lp = shost_priv(sc->device->host); 1246 lp = shost_priv(sc->device->host);
@@ -1541,7 +1522,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1541 DECLARE_COMPLETION_ONSTACK(tm_done); 1522 DECLARE_COMPLETION_ONSTACK(tm_done);
1542 1523
1543 /* Wait for rport to unblock */ 1524 /* Wait for rport to unblock */
1544 fnic_block_error_handler(sc); 1525 fc_block_scsi_eh(sc);
1545 1526
1546 /* Get local-port, check ready and link up */ 1527 /* Get local-port, check ready and link up */
1547 lp = shost_priv(sc->device->host); 1528 lp = shost_priv(sc->device->host);
@@ -1762,7 +1743,7 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
1762 fnic->remove_wait = &remove_wait; 1743 fnic->remove_wait = &remove_wait;
1763 old_state = fnic->state; 1744 old_state = fnic->state;
1764 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1745 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1765 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); 1746 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
1766 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1747 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1767 1748
1768 err = fnic_fw_reset_handler(fnic); 1749 err = fnic_fw_reset_handler(fnic);
@@ -1802,7 +1783,7 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
1802 spin_lock_irqsave(&fnic->fnic_lock, flags); 1783 spin_lock_irqsave(&fnic->fnic_lock, flags);
1803 old_state = fnic->state; 1784 old_state = fnic->state;
1804 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1785 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1805 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); 1786 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
1806 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1787 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1807 1788
1808 if (fnic_fw_reset_handler(fnic)) { 1789 if (fnic_fw_reset_handler(fnic)) {
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 566770645086..db710148d156 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -22,6 +22,7 @@
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/if_ether.h> 24#include <linux/if_ether.h>
25#include <linux/slab.h>
25#include "vnic_resource.h" 26#include "vnic_resource.h"
26#include "vnic_devcmd.h" 27#include "vnic_devcmd.h"
27#include "vnic_dev.h" 28#include "vnic_dev.h"
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index d62b9061bf12..7c9ccbd4134b 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -94,7 +94,7 @@ enum vnic_devcmd_cmd {
94 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), 94 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
95 95
96 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ 96 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
97 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), 97 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
98 98
99 /* hang detection notification */ 99 /* hang detection notification */
100 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), 100 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
index bedd0d285630..fd2068f5ae16 100644
--- a/drivers/scsi/fnic/vnic_rq.c
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -20,6 +20,7 @@
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/slab.h>
23#include "vnic_dev.h" 24#include "vnic_dev.h"
24#include "vnic_rq.h" 25#include "vnic_rq.h"
25 26
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index 46baa5254001..fbb55364e272 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -95,5 +95,6 @@ struct vnic_fc_config {
95 95
96#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ 96#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
97#define VFCF_PERBI 0x2 /* persistent binding info available */ 97#define VFCF_PERBI 0x2 /* persistent binding info available */
98#define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */
98 99
99#endif /* _VNIC_SCSI_H_ */ 100#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index 1f9ea790d130..a414135460db 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -20,6 +20,7 @@
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/slab.h>
23#include "vnic_dev.h" 24#include "vnic_dev.h"
24#include "vnic_wq.h" 25#include "vnic_wq.h"
25 26
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 9e8fce0f0c1b..35a4b3073ec3 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -121,6 +121,7 @@
121#include <linux/dma-mapping.h> 121#include <linux/dma-mapping.h>
122#include <linux/list.h> 122#include <linux/list.h>
123#include <linux/smp_lock.h> 123#include <linux/smp_lock.h>
124#include <linux/slab.h>
124 125
125#ifdef GDTH_RTC 126#ifdef GDTH_RTC
126#include <linux/mc146818rtc.h> 127#include <linux/mc146818rtc.h>
@@ -140,40 +141,40 @@
140#include "gdth.h" 141#include "gdth.h"
141 142
142static void gdth_delay(int milliseconds); 143static void gdth_delay(int milliseconds);
143static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs); 144static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
144static irqreturn_t gdth_interrupt(int irq, void *dev_id); 145static irqreturn_t gdth_interrupt(int irq, void *dev_id);
145static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, 146static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
146 int gdth_from_wait, int* pIndex); 147 int gdth_from_wait, int* pIndex);
147static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, 148static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
148 Scsi_Cmnd *scp); 149 Scsi_Cmnd *scp);
149static int gdth_async_event(gdth_ha_str *ha); 150static int gdth_async_event(gdth_ha_str *ha);
150static void gdth_log_event(gdth_evt_data *dvr, char *buffer); 151static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
151 152
152static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority); 153static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
153static void gdth_next(gdth_ha_str *ha); 154static void gdth_next(gdth_ha_str *ha);
154static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b); 155static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
155static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); 156static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
156static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, 157static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
157 ushort idx, gdth_evt_data *evt); 158 u16 idx, gdth_evt_data *evt);
158static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr); 159static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
159static void gdth_readapp_event(gdth_ha_str *ha, unchar application, 160static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
160 gdth_evt_str *estr); 161 gdth_evt_str *estr);
161static void gdth_clear_events(void); 162static void gdth_clear_events(void);
162 163
163static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 164static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
164 char *buffer, ushort count); 165 char *buffer, u16 count);
165static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); 166static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
166static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); 167static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
167 168
168static void gdth_enable_int(gdth_ha_str *ha); 169static void gdth_enable_int(gdth_ha_str *ha);
169static int gdth_test_busy(gdth_ha_str *ha); 170static int gdth_test_busy(gdth_ha_str *ha);
170static int gdth_get_cmd_index(gdth_ha_str *ha); 171static int gdth_get_cmd_index(gdth_ha_str *ha);
171static void gdth_release_event(gdth_ha_str *ha); 172static void gdth_release_event(gdth_ha_str *ha);
172static int gdth_wait(gdth_ha_str *ha, int index,ulong32 time); 173static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
173static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, 174static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
174 ulong32 p1, ulong64 p2,ulong64 p3); 175 u32 p1, u64 p2,u64 p3);
175static int gdth_search_drives(gdth_ha_str *ha); 176static int gdth_search_drives(gdth_ha_str *ha);
176static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive); 177static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
177 178
178static const char *gdth_ctr_name(gdth_ha_str *ha); 179static const char *gdth_ctr_name(gdth_ha_str *ha);
179 180
@@ -189,7 +190,7 @@ static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
189static void gdth_scsi_done(struct scsi_cmnd *scp); 190static void gdth_scsi_done(struct scsi_cmnd *scp);
190 191
191#ifdef DEBUG_GDTH 192#ifdef DEBUG_GDTH
192static unchar DebugState = DEBUG_GDTH; 193static u8 DebugState = DEBUG_GDTH;
193 194
194#ifdef __SERIAL__ 195#ifdef __SERIAL__
195#define MAX_SERBUF 160 196#define MAX_SERBUF 160
@@ -270,30 +271,30 @@ static int ser_printk(const char *fmt, ...)
270#endif 271#endif
271 272
272#ifdef GDTH_STATISTICS 273#ifdef GDTH_STATISTICS
273static ulong32 max_rq=0, max_index=0, max_sg=0; 274static u32 max_rq=0, max_index=0, max_sg=0;
274#ifdef INT_COAL 275#ifdef INT_COAL
275static ulong32 max_int_coal=0; 276static u32 max_int_coal=0;
276#endif 277#endif
277static ulong32 act_ints=0, act_ios=0, act_stats=0, act_rq=0; 278static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
278static struct timer_list gdth_timer; 279static struct timer_list gdth_timer;
279#endif 280#endif
280 281
281#define PTR2USHORT(a) (ushort)(ulong)(a) 282#define PTR2USHORT(a) (u16)(unsigned long)(a)
282#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b) 283#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
283#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t)) 284#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
284 285
285#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b)) 286#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
286 287
287#ifdef CONFIG_ISA 288#ifdef CONFIG_ISA
288static unchar gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */ 289static u8 gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
289#endif 290#endif
290#if defined(CONFIG_EISA) || defined(CONFIG_ISA) 291#if defined(CONFIG_EISA) || defined(CONFIG_ISA)
291static unchar gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */ 292static u8 gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
292#endif 293#endif
293static unchar gdth_polling; /* polling if TRUE */ 294static u8 gdth_polling; /* polling if TRUE */
294static int gdth_ctr_count = 0; /* controller count */ 295static int gdth_ctr_count = 0; /* controller count */
295static LIST_HEAD(gdth_instances); /* controller list */ 296static LIST_HEAD(gdth_instances); /* controller list */
296static unchar gdth_write_through = FALSE; /* write through */ 297static u8 gdth_write_through = FALSE; /* write through */
297static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */ 298static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
298static int elastidx; 299static int elastidx;
299static int eoldidx; 300static int eoldidx;
@@ -303,7 +304,7 @@ static int major;
303#define DOU 2 /* OUT data direction */ 304#define DOU 2 /* OUT data direction */
304#define DNO DIN /* no data transfer */ 305#define DNO DIN /* no data transfer */
305#define DUN DIN /* unknown data direction */ 306#define DUN DIN /* unknown data direction */
306static unchar gdth_direction_tab[0x100] = { 307static u8 gdth_direction_tab[0x100] = {
307 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN, 308 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
308 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN, 309 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
309 DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU, 310 DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
@@ -390,7 +391,7 @@ static gdth_ha_str *gdth_find_ha(int hanum)
390static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha) 391static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
391{ 392{
392 struct gdth_cmndinfo *priv = NULL; 393 struct gdth_cmndinfo *priv = NULL;
393 ulong flags; 394 unsigned long flags;
394 int i; 395 int i;
395 396
396 spin_lock_irqsave(&ha->smp_lock, flags); 397 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -493,7 +494,7 @@ int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
493 return rval; 494 return rval;
494} 495}
495 496
496static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs) 497static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
497{ 498{
498 *cyls = size /HEADS/SECS; 499 *cyls = size /HEADS/SECS;
499 if (*cyls <= MAXCYLS) { 500 if (*cyls <= MAXCYLS) {
@@ -514,9 +515,9 @@ static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs
514 515
515/* controller search and initialization functions */ 516/* controller search and initialization functions */
516#ifdef CONFIG_EISA 517#ifdef CONFIG_EISA
517static int __init gdth_search_eisa(ushort eisa_adr) 518static int __init gdth_search_eisa(u16 eisa_adr)
518{ 519{
519 ulong32 id; 520 u32 id;
520 521
521 TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr)); 522 TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
522 id = inl(eisa_adr+ID0REG); 523 id = inl(eisa_adr+ID0REG);
@@ -533,13 +534,13 @@ static int __init gdth_search_eisa(ushort eisa_adr)
533#endif /* CONFIG_EISA */ 534#endif /* CONFIG_EISA */
534 535
535#ifdef CONFIG_ISA 536#ifdef CONFIG_ISA
536static int __init gdth_search_isa(ulong32 bios_adr) 537static int __init gdth_search_isa(u32 bios_adr)
537{ 538{
538 void __iomem *addr; 539 void __iomem *addr;
539 ulong32 id; 540 u32 id;
540 541
541 TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr)); 542 TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr));
542 if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(ulong32))) != NULL) { 543 if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) {
543 id = readl(addr); 544 id = readl(addr);
544 iounmap(addr); 545 iounmap(addr);
545 if (id == GDT2_ID) /* GDT2000 */ 546 if (id == GDT2_ID) /* GDT2000 */
@@ -551,7 +552,7 @@ static int __init gdth_search_isa(ulong32 bios_adr)
551 552
552#ifdef CONFIG_PCI 553#ifdef CONFIG_PCI
553 554
554static bool gdth_search_vortex(ushort device) 555static bool gdth_search_vortex(u16 device)
555{ 556{
556 if (device <= PCI_DEVICE_ID_VORTEX_GDT6555) 557 if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
557 return true; 558 return true;
@@ -603,9 +604,9 @@ static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
603static int __devinit gdth_pci_init_one(struct pci_dev *pdev, 604static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
604 const struct pci_device_id *ent) 605 const struct pci_device_id *ent)
605{ 606{
606 ushort vendor = pdev->vendor; 607 u16 vendor = pdev->vendor;
607 ushort device = pdev->device; 608 u16 device = pdev->device;
608 ulong base0, base1, base2; 609 unsigned long base0, base1, base2;
609 int rc; 610 int rc;
610 gdth_pci_str gdth_pcistr; 611 gdth_pci_str gdth_pcistr;
611 gdth_ha_str *ha = NULL; 612 gdth_ha_str *ha = NULL;
@@ -658,10 +659,10 @@ static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
658#endif /* CONFIG_PCI */ 659#endif /* CONFIG_PCI */
659 660
660#ifdef CONFIG_EISA 661#ifdef CONFIG_EISA
661static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha) 662static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha)
662{ 663{
663 ulong32 retries,id; 664 u32 retries,id;
664 unchar prot_ver,eisacf,i,irq_found; 665 u8 prot_ver,eisacf,i,irq_found;
665 666
666 TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr)); 667 TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
667 668
@@ -688,7 +689,7 @@ static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
688 return 0; 689 return 0;
689 } 690 }
690 ha->bmic = eisa_adr; 691 ha->bmic = eisa_adr;
691 ha->brd_phys = (ulong32)eisa_adr >> 12; 692 ha->brd_phys = (u32)eisa_adr >> 12;
692 693
693 outl(0,eisa_adr+MAILBOXREG); 694 outl(0,eisa_adr+MAILBOXREG);
694 outl(0,eisa_adr+MAILBOXREG+4); 695 outl(0,eisa_adr+MAILBOXREG+4);
@@ -752,12 +753,12 @@ static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
752#endif /* CONFIG_EISA */ 753#endif /* CONFIG_EISA */
753 754
754#ifdef CONFIG_ISA 755#ifdef CONFIG_ISA
755static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha) 756static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
756{ 757{
757 register gdt2_dpram_str __iomem *dp2_ptr; 758 register gdt2_dpram_str __iomem *dp2_ptr;
758 int i; 759 int i;
759 unchar irq_drq,prot_ver; 760 u8 irq_drq,prot_ver;
760 ulong32 retries; 761 u32 retries;
761 762
762 TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr)); 763 TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr));
763 764
@@ -812,7 +813,7 @@ static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha)
812 } 813 }
813 gdth_delay(1); 814 gdth_delay(1);
814 } 815 }
815 prot_ver = (unchar)readl(&dp2_ptr->u.ic.S_Info[0]); 816 prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]);
816 writeb(0, &dp2_ptr->u.ic.Status); 817 writeb(0, &dp2_ptr->u.ic.Status);
817 writeb(0xff, &dp2_ptr->io.irqdel); 818 writeb(0xff, &dp2_ptr->io.irqdel);
818 if (prot_ver != PROTOCOL_VERSION) { 819 if (prot_ver != PROTOCOL_VERSION) {
@@ -859,9 +860,9 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
859 register gdt6_dpram_str __iomem *dp6_ptr; 860 register gdt6_dpram_str __iomem *dp6_ptr;
860 register gdt6c_dpram_str __iomem *dp6c_ptr; 861 register gdt6c_dpram_str __iomem *dp6c_ptr;
861 register gdt6m_dpram_str __iomem *dp6m_ptr; 862 register gdt6m_dpram_str __iomem *dp6m_ptr;
862 ulong32 retries; 863 u32 retries;
863 unchar prot_ver; 864 u8 prot_ver;
864 ushort command; 865 u16 command;
865 int i, found = FALSE; 866 int i, found = FALSE;
866 867
867 TRACE(("gdth_init_pci()\n")); 868 TRACE(("gdth_init_pci()\n"));
@@ -871,7 +872,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
871 else 872 else
872 ha->oem_id = OEM_ID_ICP; 873 ha->oem_id = OEM_ID_ICP;
873 ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8); 874 ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
874 ha->stype = (ulong32)pdev->device; 875 ha->stype = (u32)pdev->device;
875 ha->irq = pdev->irq; 876 ha->irq = pdev->irq;
876 ha->pdev = pdev; 877 ha->pdev = pdev;
877 878
@@ -891,7 +892,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
891 found = FALSE; 892 found = FALSE;
892 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 893 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
893 iounmap(ha->brd); 894 iounmap(ha->brd);
894 ha->brd = ioremap(i, sizeof(ushort)); 895 ha->brd = ioremap(i, sizeof(u16));
895 if (ha->brd == NULL) { 896 if (ha->brd == NULL) {
896 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 897 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
897 return 0; 898 return 0;
@@ -947,7 +948,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
947 } 948 }
948 gdth_delay(1); 949 gdth_delay(1);
949 } 950 }
950 prot_ver = (unchar)readl(&dp6_ptr->u.ic.S_Info[0]); 951 prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
951 writeb(0, &dp6_ptr->u.ic.S_Status); 952 writeb(0, &dp6_ptr->u.ic.S_Status);
952 writeb(0xff, &dp6_ptr->io.irqdel); 953 writeb(0xff, &dp6_ptr->io.irqdel);
953 if (prot_ver != PROTOCOL_VERSION) { 954 if (prot_ver != PROTOCOL_VERSION) {
@@ -1000,7 +1001,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1000 found = FALSE; 1001 found = FALSE;
1001 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 1002 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
1002 iounmap(ha->brd); 1003 iounmap(ha->brd);
1003 ha->brd = ioremap(i, sizeof(ushort)); 1004 ha->brd = ioremap(i, sizeof(u16));
1004 if (ha->brd == NULL) { 1005 if (ha->brd == NULL) {
1005 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1006 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1006 return 0; 1007 return 0;
@@ -1059,7 +1060,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1059 } 1060 }
1060 gdth_delay(1); 1061 gdth_delay(1);
1061 } 1062 }
1062 prot_ver = (unchar)readl(&dp6c_ptr->u.ic.S_Info[0]); 1063 prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
1063 writeb(0, &dp6c_ptr->u.ic.Status); 1064 writeb(0, &dp6c_ptr->u.ic.Status);
1064 if (prot_ver != PROTOCOL_VERSION) { 1065 if (prot_ver != PROTOCOL_VERSION) {
1065 printk("GDT-PCI: Illegal protocol version\n"); 1066 printk("GDT-PCI: Illegal protocol version\n");
@@ -1128,7 +1129,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1128 found = FALSE; 1129 found = FALSE;
1129 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 1130 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
1130 iounmap(ha->brd); 1131 iounmap(ha->brd);
1131 ha->brd = ioremap(i, sizeof(ushort)); 1132 ha->brd = ioremap(i, sizeof(u16));
1132 if (ha->brd == NULL) { 1133 if (ha->brd == NULL) {
1133 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1134 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1134 return 0; 1135 return 0;
@@ -1180,7 +1181,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1180 } 1181 }
1181 gdth_delay(1); 1182 gdth_delay(1);
1182 } 1183 }
1183 prot_ver = (unchar)readl(&dp6m_ptr->u.ic.S_Info[0]); 1184 prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
1184 writeb(0, &dp6m_ptr->u.ic.S_Status); 1185 writeb(0, &dp6m_ptr->u.ic.S_Status);
1185 if (prot_ver != PROTOCOL_VERSION) { 1186 if (prot_ver != PROTOCOL_VERSION) {
1186 printk("GDT-PCI: Illegal protocol version\n"); 1187 printk("GDT-PCI: Illegal protocol version\n");
@@ -1223,7 +1224,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1223 } 1224 }
1224 gdth_delay(1); 1225 gdth_delay(1);
1225 } 1226 }
1226 prot_ver = (unchar)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16); 1227 prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
1227 writeb(0, &dp6m_ptr->u.ic.S_Status); 1228 writeb(0, &dp6m_ptr->u.ic.S_Status);
1228 if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */ 1229 if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
1229 ha->dma64_support = 0; 1230 ha->dma64_support = 0;
@@ -1239,7 +1240,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1239 1240
1240static void __devinit gdth_enable_int(gdth_ha_str *ha) 1241static void __devinit gdth_enable_int(gdth_ha_str *ha)
1241{ 1242{
1242 ulong flags; 1243 unsigned long flags;
1243 gdt2_dpram_str __iomem *dp2_ptr; 1244 gdt2_dpram_str __iomem *dp2_ptr;
1244 gdt6_dpram_str __iomem *dp6_ptr; 1245 gdt6_dpram_str __iomem *dp6_ptr;
1245 gdt6m_dpram_str __iomem *dp6m_ptr; 1246 gdt6m_dpram_str __iomem *dp6m_ptr;
@@ -1274,14 +1275,14 @@ static void __devinit gdth_enable_int(gdth_ha_str *ha)
1274} 1275}
1275 1276
1276/* return IStatus if interrupt was from this card else 0 */ 1277/* return IStatus if interrupt was from this card else 0 */
1277static unchar gdth_get_status(gdth_ha_str *ha) 1278static u8 gdth_get_status(gdth_ha_str *ha)
1278{ 1279{
1279 unchar IStatus = 0; 1280 u8 IStatus = 0;
1280 1281
1281 TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count)); 1282 TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
1282 1283
1283 if (ha->type == GDT_EISA) 1284 if (ha->type == GDT_EISA)
1284 IStatus = inb((ushort)ha->bmic + EDOORREG); 1285 IStatus = inb((u16)ha->bmic + EDOORREG);
1285 else if (ha->type == GDT_ISA) 1286 else if (ha->type == GDT_ISA)
1286 IStatus = 1287 IStatus =
1287 readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); 1288 readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
@@ -1329,7 +1330,7 @@ static int gdth_get_cmd_index(gdth_ha_str *ha)
1329 if (ha->cmd_tab[i].cmnd == UNUSED_CMND) { 1330 if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
1330 ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer; 1331 ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
1331 ha->cmd_tab[i].service = ha->pccb->Service; 1332 ha->cmd_tab[i].service = ha->pccb->Service;
1332 ha->pccb->CommandIndex = (ulong32)i+2; 1333 ha->pccb->CommandIndex = (u32)i+2;
1333 return (i+2); 1334 return (i+2);
1334 } 1335 }
1335 } 1336 }
@@ -1362,7 +1363,7 @@ static void gdth_copy_command(gdth_ha_str *ha)
1362 register gdt6c_dpram_str __iomem *dp6c_ptr; 1363 register gdt6c_dpram_str __iomem *dp6c_ptr;
1363 gdt6_dpram_str __iomem *dp6_ptr; 1364 gdt6_dpram_str __iomem *dp6_ptr;
1364 gdt2_dpram_str __iomem *dp2_ptr; 1365 gdt2_dpram_str __iomem *dp2_ptr;
1365 ushort cp_count,dp_offset,cmd_no; 1366 u16 cp_count,dp_offset,cmd_no;
1366 1367
1367 TRACE(("gdth_copy_command() hanum %d\n", ha->hanum)); 1368 TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
1368 1369
@@ -1386,28 +1387,28 @@ static void gdth_copy_command(gdth_ha_str *ha)
1386 dp2_ptr = ha->brd; 1387 dp2_ptr = ha->brd;
1387 writew(dp_offset + DPMEM_COMMAND_OFFSET, 1388 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1388 &dp2_ptr->u.ic.comm_queue[cmd_no].offset); 1389 &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
1389 writew((ushort)cmd_ptr->Service, 1390 writew((u16)cmd_ptr->Service,
1390 &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id); 1391 &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
1391 memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1392 memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1392 } else if (ha->type == GDT_PCI) { 1393 } else if (ha->type == GDT_PCI) {
1393 dp6_ptr = ha->brd; 1394 dp6_ptr = ha->brd;
1394 writew(dp_offset + DPMEM_COMMAND_OFFSET, 1395 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1395 &dp6_ptr->u.ic.comm_queue[cmd_no].offset); 1396 &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
1396 writew((ushort)cmd_ptr->Service, 1397 writew((u16)cmd_ptr->Service,
1397 &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id); 1398 &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
1398 memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1399 memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1399 } else if (ha->type == GDT_PCINEW) { 1400 } else if (ha->type == GDT_PCINEW) {
1400 dp6c_ptr = ha->brd; 1401 dp6c_ptr = ha->brd;
1401 writew(dp_offset + DPMEM_COMMAND_OFFSET, 1402 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1402 &dp6c_ptr->u.ic.comm_queue[cmd_no].offset); 1403 &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
1403 writew((ushort)cmd_ptr->Service, 1404 writew((u16)cmd_ptr->Service,
1404 &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id); 1405 &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
1405 memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1406 memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1406 } else if (ha->type == GDT_PCIMPR) { 1407 } else if (ha->type == GDT_PCIMPR) {
1407 dp6m_ptr = ha->brd; 1408 dp6m_ptr = ha->brd;
1408 writew(dp_offset + DPMEM_COMMAND_OFFSET, 1409 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1409 &dp6m_ptr->u.ic.comm_queue[cmd_no].offset); 1410 &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
1410 writew((ushort)cmd_ptr->Service, 1411 writew((u16)cmd_ptr->Service,
1411 &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id); 1412 &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
1412 memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1413 memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1413 } 1414 }
@@ -1420,14 +1421,14 @@ static void gdth_release_event(gdth_ha_str *ha)
1420 1421
1421#ifdef GDTH_STATISTICS 1422#ifdef GDTH_STATISTICS
1422 { 1423 {
1423 ulong32 i,j; 1424 u32 i,j;
1424 for (i=0,j=0; j<GDTH_MAXCMDS; ++j) { 1425 for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
1425 if (ha->cmd_tab[j].cmnd != UNUSED_CMND) 1426 if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
1426 ++i; 1427 ++i;
1427 } 1428 }
1428 if (max_index < i) { 1429 if (max_index < i) {
1429 max_index = i; 1430 max_index = i;
1430 TRACE3(("GDT: max_index = %d\n",(ushort)i)); 1431 TRACE3(("GDT: max_index = %d\n",(u16)i));
1431 } 1432 }
1432 } 1433 }
1433#endif 1434#endif
@@ -1450,7 +1451,7 @@ static void gdth_release_event(gdth_ha_str *ha)
1450 } 1451 }
1451} 1452}
1452 1453
1453static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time) 1454static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
1454{ 1455{
1455 int answer_found = FALSE; 1456 int answer_found = FALSE;
1456 int wait_index = 0; 1457 int wait_index = 0;
@@ -1476,8 +1477,8 @@ static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time)
1476} 1477}
1477 1478
1478 1479
1479static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, 1480static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
1480 ulong32 p1, ulong64 p2, ulong64 p3) 1481 u32 p1, u64 p2, u64 p3)
1481{ 1482{
1482 register gdth_cmd_str *cmd_ptr; 1483 register gdth_cmd_str *cmd_ptr;
1483 int retries,index; 1484 int retries,index;
@@ -1501,35 +1502,35 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
1501 if (service == CACHESERVICE) { 1502 if (service == CACHESERVICE) {
1502 if (opcode == GDT_IOCTL) { 1503 if (opcode == GDT_IOCTL) {
1503 cmd_ptr->u.ioctl.subfunc = p1; 1504 cmd_ptr->u.ioctl.subfunc = p1;
1504 cmd_ptr->u.ioctl.channel = (ulong32)p2; 1505 cmd_ptr->u.ioctl.channel = (u32)p2;
1505 cmd_ptr->u.ioctl.param_size = (ushort)p3; 1506 cmd_ptr->u.ioctl.param_size = (u16)p3;
1506 cmd_ptr->u.ioctl.p_param = ha->scratch_phys; 1507 cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
1507 } else { 1508 } else {
1508 if (ha->cache_feat & GDT_64BIT) { 1509 if (ha->cache_feat & GDT_64BIT) {
1509 cmd_ptr->u.cache64.DeviceNo = (ushort)p1; 1510 cmd_ptr->u.cache64.DeviceNo = (u16)p1;
1510 cmd_ptr->u.cache64.BlockNo = p2; 1511 cmd_ptr->u.cache64.BlockNo = p2;
1511 } else { 1512 } else {
1512 cmd_ptr->u.cache.DeviceNo = (ushort)p1; 1513 cmd_ptr->u.cache.DeviceNo = (u16)p1;
1513 cmd_ptr->u.cache.BlockNo = (ulong32)p2; 1514 cmd_ptr->u.cache.BlockNo = (u32)p2;
1514 } 1515 }
1515 } 1516 }
1516 } else if (service == SCSIRAWSERVICE) { 1517 } else if (service == SCSIRAWSERVICE) {
1517 if (ha->raw_feat & GDT_64BIT) { 1518 if (ha->raw_feat & GDT_64BIT) {
1518 cmd_ptr->u.raw64.direction = p1; 1519 cmd_ptr->u.raw64.direction = p1;
1519 cmd_ptr->u.raw64.bus = (unchar)p2; 1520 cmd_ptr->u.raw64.bus = (u8)p2;
1520 cmd_ptr->u.raw64.target = (unchar)p3; 1521 cmd_ptr->u.raw64.target = (u8)p3;
1521 cmd_ptr->u.raw64.lun = (unchar)(p3 >> 8); 1522 cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
1522 } else { 1523 } else {
1523 cmd_ptr->u.raw.direction = p1; 1524 cmd_ptr->u.raw.direction = p1;
1524 cmd_ptr->u.raw.bus = (unchar)p2; 1525 cmd_ptr->u.raw.bus = (u8)p2;
1525 cmd_ptr->u.raw.target = (unchar)p3; 1526 cmd_ptr->u.raw.target = (u8)p3;
1526 cmd_ptr->u.raw.lun = (unchar)(p3 >> 8); 1527 cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
1527 } 1528 }
1528 } else if (service == SCREENSERVICE) { 1529 } else if (service == SCREENSERVICE) {
1529 if (opcode == GDT_REALTIME) { 1530 if (opcode == GDT_REALTIME) {
1530 *(ulong32 *)&cmd_ptr->u.screen.su.data[0] = p1; 1531 *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
1531 *(ulong32 *)&cmd_ptr->u.screen.su.data[4] = (ulong32)p2; 1532 *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
1532 *(ulong32 *)&cmd_ptr->u.screen.su.data[8] = (ulong32)p3; 1533 *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
1533 } 1534 }
1534 } 1535 }
1535 ha->cmd_len = sizeof(gdth_cmd_str); 1536 ha->cmd_len = sizeof(gdth_cmd_str);
@@ -1555,9 +1556,9 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
1555 1556
1556static int __devinit gdth_search_drives(gdth_ha_str *ha) 1557static int __devinit gdth_search_drives(gdth_ha_str *ha)
1557{ 1558{
1558 ushort cdev_cnt, i; 1559 u16 cdev_cnt, i;
1559 int ok; 1560 int ok;
1560 ulong32 bus_no, drv_cnt, drv_no, j; 1561 u32 bus_no, drv_cnt, drv_no, j;
1561 gdth_getch_str *chn; 1562 gdth_getch_str *chn;
1562 gdth_drlist_str *drl; 1563 gdth_drlist_str *drl;
1563 gdth_iochan_str *ioc; 1564 gdth_iochan_str *ioc;
@@ -1570,8 +1571,8 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1570#endif 1571#endif
1571 1572
1572#ifdef GDTH_RTC 1573#ifdef GDTH_RTC
1573 unchar rtc[12]; 1574 u8 rtc[12];
1574 ulong flags; 1575 unsigned long flags;
1575#endif 1576#endif
1576 1577
1577 TRACE(("gdth_search_drives() hanum %d\n", ha->hanum)); 1578 TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
@@ -1584,7 +1585,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1584 if (ok) 1585 if (ok)
1585 ha->screen_feat = GDT_64BIT; 1586 ha->screen_feat = GDT_64BIT;
1586 } 1587 }
1587 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 1588 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1588 ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0); 1589 ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
1589 if (!ok) { 1590 if (!ok) {
1590 printk("GDT-HA %d: Initialization error screen service (code %d)\n", 1591 printk("GDT-HA %d: Initialization error screen service (code %d)\n",
@@ -1609,11 +1610,11 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1609 rtc[j] = CMOS_READ(j); 1610 rtc[j] = CMOS_READ(j);
1610 } while (rtc[0] != CMOS_READ(0)); 1611 } while (rtc[0] != CMOS_READ(0));
1611 spin_unlock_irqrestore(&rtc_lock, flags); 1612 spin_unlock_irqrestore(&rtc_lock, flags);
1612 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0], 1613 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0],
1613 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8])); 1614 *(u32 *)&rtc[4], *(u32 *)&rtc[8]));
1614 /* 3. send to controller firmware */ 1615 /* 3. send to controller firmware */
1615 gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(ulong32 *)&rtc[0], 1616 gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0],
1616 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]); 1617 *(u32 *)&rtc[4], *(u32 *)&rtc[8]);
1617#endif 1618#endif
1618 1619
1619 /* unfreeze all IOs */ 1620 /* unfreeze all IOs */
@@ -1627,7 +1628,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1627 if (ok) 1628 if (ok)
1628 ha->cache_feat = GDT_64BIT; 1629 ha->cache_feat = GDT_64BIT;
1629 } 1630 }
1630 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 1631 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1631 ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0); 1632 ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
1632 if (!ok) { 1633 if (!ok) {
1633 printk("GDT-HA %d: Initialization error cache service (code %d)\n", 1634 printk("GDT-HA %d: Initialization error cache service (code %d)\n",
@@ -1635,7 +1636,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1635 return 0; 1636 return 0;
1636 } 1637 }
1637 TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n")); 1638 TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
1638 cdev_cnt = (ushort)ha->info; 1639 cdev_cnt = (u16)ha->info;
1639 ha->fw_vers = ha->service; 1640 ha->fw_vers = ha->service;
1640 1641
1641#ifdef INT_COAL 1642#ifdef INT_COAL
@@ -1644,7 +1645,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1644 pmod = (gdth_perf_modes *)ha->pscratch; 1645 pmod = (gdth_perf_modes *)ha->pscratch;
1645 pmod->version = 1; 1646 pmod->version = 1;
1646 pmod->st_mode = 1; /* enable one status buffer */ 1647 pmod->st_mode = 1; /* enable one status buffer */
1647 *((ulong64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys; 1648 *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
1648 pmod->st_buff_indx1 = COALINDEX; 1649 pmod->st_buff_indx1 = COALINDEX;
1649 pmod->st_buff_addr2 = 0; 1650 pmod->st_buff_addr2 = 0;
1650 pmod->st_buff_u_addr2 = 0; 1651 pmod->st_buff_u_addr2 = 0;
@@ -1705,7 +1706,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1705 else 1706 else
1706 ha->bus_id[bus_no] = 0xff; 1707 ha->bus_id[bus_no] = 0xff;
1707 } 1708 }
1708 ha->bus_cnt = (unchar)bus_no; 1709 ha->bus_cnt = (u8)bus_no;
1709 } 1710 }
1710 TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt)); 1711 TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
1711 1712
@@ -1789,12 +1790,12 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1789 1790
1790 /* logical drives */ 1791 /* logical drives */
1791 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT, 1792 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
1792 INVALID_CHANNEL,sizeof(ulong32))) { 1793 INVALID_CHANNEL,sizeof(u32))) {
1793 drv_cnt = *(ulong32 *)ha->pscratch; 1794 drv_cnt = *(u32 *)ha->pscratch;
1794 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST, 1795 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
1795 INVALID_CHANNEL,drv_cnt * sizeof(ulong32))) { 1796 INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
1796 for (j = 0; j < drv_cnt; ++j) { 1797 for (j = 0; j < drv_cnt; ++j) {
1797 drv_no = ((ulong32 *)ha->pscratch)[j]; 1798 drv_no = ((u32 *)ha->pscratch)[j];
1798 if (drv_no < MAX_LDRIVES) { 1799 if (drv_no < MAX_LDRIVES) {
1799 ha->hdr[drv_no].is_logdrv = TRUE; 1800 ha->hdr[drv_no].is_logdrv = TRUE;
1800 TRACE2(("Drive %d is log. drive\n",drv_no)); 1801 TRACE2(("Drive %d is log. drive\n",drv_no));
@@ -1838,7 +1839,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1838 if (ok) 1839 if (ok)
1839 ha->raw_feat = GDT_64BIT; 1840 ha->raw_feat = GDT_64BIT;
1840 } 1841 }
1841 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 1842 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1842 ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0); 1843 ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
1843 if (!ok) { 1844 if (!ok) {
1844 printk("GDT-HA %d: Initialization error raw service (code %d)\n", 1845 printk("GDT-HA %d: Initialization error raw service (code %d)\n",
@@ -1854,7 +1855,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1854 if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) { 1855 if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1855 TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n", 1856 TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
1856 ha->info)); 1857 ha->info));
1857 ha->raw_feat |= (ushort)ha->info; 1858 ha->raw_feat |= (u16)ha->info;
1858 } 1859 }
1859 } 1860 }
1860 1861
@@ -1865,7 +1866,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1865 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) { 1866 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1866 TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n", 1867 TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
1867 ha->info)); 1868 ha->info));
1868 ha->cache_feat |= (ushort)ha->info; 1869 ha->cache_feat |= (u16)ha->info;
1869 } 1870 }
1870 } 1871 }
1871 1872
@@ -1923,9 +1924,9 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1923 return 1; 1924 return 1;
1924} 1925}
1925 1926
1926static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive) 1927static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
1927{ 1928{
1928 ulong32 drv_cyls; 1929 u32 drv_cyls;
1929 int drv_hds, drv_secs; 1930 int drv_hds, drv_secs;
1930 1931
1931 TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive)); 1932 TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
@@ -1944,17 +1945,17 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
1944 } else { 1945 } else {
1945 drv_hds = ha->info2 & 0xff; 1946 drv_hds = ha->info2 & 0xff;
1946 drv_secs = (ha->info2 >> 8) & 0xff; 1947 drv_secs = (ha->info2 >> 8) & 0xff;
1947 drv_cyls = (ulong32)ha->hdr[hdrive].size / drv_hds / drv_secs; 1948 drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
1948 } 1949 }
1949 ha->hdr[hdrive].heads = (unchar)drv_hds; 1950 ha->hdr[hdrive].heads = (u8)drv_hds;
1950 ha->hdr[hdrive].secs = (unchar)drv_secs; 1951 ha->hdr[hdrive].secs = (u8)drv_secs;
1951 /* round size */ 1952 /* round size */
1952 ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs; 1953 ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
1953 1954
1954 if (ha->cache_feat & GDT_64BIT) { 1955 if (ha->cache_feat & GDT_64BIT) {
1955 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0) 1956 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
1956 && ha->info2 != 0) { 1957 && ha->info2 != 0) {
1957 ha->hdr[hdrive].size = ((ulong64)ha->info2 << 32) | ha->info; 1958 ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
1958 } 1959 }
1959 } 1960 }
1960 TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n", 1961 TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
@@ -1964,7 +1965,7 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
1964 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) { 1965 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
1965 TRACE2(("gdth_search_dr() cache drive %d devtype %d\n", 1966 TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
1966 hdrive,ha->info)); 1967 hdrive,ha->info));
1967 ha->hdr[hdrive].devtype = (ushort)ha->info; 1968 ha->hdr[hdrive].devtype = (u16)ha->info;
1968 } 1969 }
1969 1970
1970 /* cluster info */ 1971 /* cluster info */
@@ -1972,14 +1973,14 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
1972 TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n", 1973 TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
1973 hdrive,ha->info)); 1974 hdrive,ha->info));
1974 if (!shared_access) 1975 if (!shared_access)
1975 ha->hdr[hdrive].cluster_type = (unchar)ha->info; 1976 ha->hdr[hdrive].cluster_type = (u8)ha->info;
1976 } 1977 }
1977 1978
1978 /* R/W attributes */ 1979 /* R/W attributes */
1979 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) { 1980 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
1980 TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n", 1981 TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
1981 hdrive,ha->info)); 1982 hdrive,ha->info));
1982 ha->hdr[hdrive].rw_attribs = (unchar)ha->info; 1983 ha->hdr[hdrive].rw_attribs = (u8)ha->info;
1983 } 1984 }
1984 1985
1985 return 1; 1986 return 1;
@@ -1988,12 +1989,12 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
1988 1989
1989/* command queueing/sending functions */ 1990/* command queueing/sending functions */
1990 1991
1991static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority) 1992static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
1992{ 1993{
1993 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 1994 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1994 register Scsi_Cmnd *pscp; 1995 register Scsi_Cmnd *pscp;
1995 register Scsi_Cmnd *nscp; 1996 register Scsi_Cmnd *nscp;
1996 ulong flags; 1997 unsigned long flags;
1997 1998
1998 TRACE(("gdth_putq() priority %d\n",priority)); 1999 TRACE(("gdth_putq() priority %d\n",priority));
1999 spin_lock_irqsave(&ha->smp_lock, flags); 2000 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2023,7 +2024,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
2023 ++flags; 2024 ++flags;
2024 if (max_rq < flags) { 2025 if (max_rq < flags) {
2025 max_rq = flags; 2026 max_rq = flags;
2026 TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq)); 2027 TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
2027 } 2028 }
2028#endif 2029#endif
2029} 2030}
@@ -2032,9 +2033,9 @@ static void gdth_next(gdth_ha_str *ha)
2032{ 2033{
2033 register Scsi_Cmnd *pscp; 2034 register Scsi_Cmnd *pscp;
2034 register Scsi_Cmnd *nscp; 2035 register Scsi_Cmnd *nscp;
2035 unchar b, t, l, firsttime; 2036 u8 b, t, l, firsttime;
2036 unchar this_cmd, next_cmd; 2037 u8 this_cmd, next_cmd;
2037 ulong flags = 0; 2038 unsigned long flags = 0;
2038 int cmd_index; 2039 int cmd_index;
2039 2040
2040 TRACE(("gdth_next() hanum %d\n", ha->hanum)); 2041 TRACE(("gdth_next() hanum %d\n", ha->hanum));
@@ -2282,20 +2283,20 @@ static void gdth_next(gdth_ha_str *ha)
2282 * buffers, kmap_atomic() as needed. 2283 * buffers, kmap_atomic() as needed.
2283 */ 2284 */
2284static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 2285static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2285 char *buffer, ushort count) 2286 char *buffer, u16 count)
2286{ 2287{
2287 ushort cpcount,i, max_sg = scsi_sg_count(scp); 2288 u16 cpcount,i, max_sg = scsi_sg_count(scp);
2288 ushort cpsum,cpnow; 2289 u16 cpsum,cpnow;
2289 struct scatterlist *sl; 2290 struct scatterlist *sl;
2290 char *address; 2291 char *address;
2291 2292
2292 cpcount = min_t(ushort, count, scsi_bufflen(scp)); 2293 cpcount = min_t(u16, count, scsi_bufflen(scp));
2293 2294
2294 if (cpcount) { 2295 if (cpcount) {
2295 cpsum=0; 2296 cpsum=0;
2296 scsi_for_each_sg(scp, sl, max_sg, i) { 2297 scsi_for_each_sg(scp, sl, max_sg, i) {
2297 unsigned long flags; 2298 unsigned long flags;
2298 cpnow = (ushort)sl->length; 2299 cpnow = (u16)sl->length;
2299 TRACE(("copy_internal() now %d sum %d count %d %d\n", 2300 TRACE(("copy_internal() now %d sum %d count %d %d\n",
2300 cpnow, cpsum, cpcount, scsi_bufflen(scp))); 2301 cpnow, cpsum, cpcount, scsi_bufflen(scp)));
2301 if (cpsum+cpnow > cpcount) 2302 if (cpsum+cpnow > cpcount)
@@ -2325,7 +2326,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2325 2326
2326static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) 2327static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2327{ 2328{
2328 unchar t; 2329 u8 t;
2329 gdth_inq_data inq; 2330 gdth_inq_data inq;
2330 gdth_rdcap_data rdc; 2331 gdth_rdcap_data rdc;
2331 gdth_sense_data sd; 2332 gdth_sense_data sd;
@@ -2389,7 +2390,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2389 2390
2390 case READ_CAPACITY: 2391 case READ_CAPACITY:
2391 TRACE2(("Read capacity hdrive %d\n",t)); 2392 TRACE2(("Read capacity hdrive %d\n",t));
2392 if (ha->hdr[t].size > (ulong64)0xffffffff) 2393 if (ha->hdr[t].size > (u64)0xffffffff)
2393 rdc.last_block_no = 0xffffffff; 2394 rdc.last_block_no = 0xffffffff;
2394 else 2395 else
2395 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); 2396 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
@@ -2425,12 +2426,12 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2425 return 0; 2426 return 0;
2426} 2427}
2427 2428
2428static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) 2429static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
2429{ 2430{
2430 register gdth_cmd_str *cmdp; 2431 register gdth_cmd_str *cmdp;
2431 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 2432 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2432 ulong32 cnt, blockcnt; 2433 u32 cnt, blockcnt;
2433 ulong64 no, blockno; 2434 u64 no, blockno;
2434 int i, cmd_index, read_write, sgcnt, mode64; 2435 int i, cmd_index, read_write, sgcnt, mode64;
2435 2436
2436 cmdp = ha->pccb; 2437 cmdp = ha->pccb;
@@ -2498,17 +2499,17 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2498 2499
2499 if (read_write) { 2500 if (read_write) {
2500 if (scp->cmd_len == 16) { 2501 if (scp->cmd_len == 16) {
2501 memcpy(&no, &scp->cmnd[2], sizeof(ulong64)); 2502 memcpy(&no, &scp->cmnd[2], sizeof(u64));
2502 blockno = be64_to_cpu(no); 2503 blockno = be64_to_cpu(no);
2503 memcpy(&cnt, &scp->cmnd[10], sizeof(ulong32)); 2504 memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
2504 blockcnt = be32_to_cpu(cnt); 2505 blockcnt = be32_to_cpu(cnt);
2505 } else if (scp->cmd_len == 10) { 2506 } else if (scp->cmd_len == 10) {
2506 memcpy(&no, &scp->cmnd[2], sizeof(ulong32)); 2507 memcpy(&no, &scp->cmnd[2], sizeof(u32));
2507 blockno = be32_to_cpu(no); 2508 blockno = be32_to_cpu(no);
2508 memcpy(&cnt, &scp->cmnd[7], sizeof(ushort)); 2509 memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
2509 blockcnt = be16_to_cpu(cnt); 2510 blockcnt = be16_to_cpu(cnt);
2510 } else { 2511 } else {
2511 memcpy(&no, &scp->cmnd[0], sizeof(ulong32)); 2512 memcpy(&no, &scp->cmnd[0], sizeof(u32));
2512 blockno = be32_to_cpu(no) & 0x001fffffUL; 2513 blockno = be32_to_cpu(no) & 0x001fffffUL;
2513 blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4]; 2514 blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
2514 } 2515 }
@@ -2516,7 +2517,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2516 cmdp->u.cache64.BlockNo = blockno; 2517 cmdp->u.cache64.BlockNo = blockno;
2517 cmdp->u.cache64.BlockCnt = blockcnt; 2518 cmdp->u.cache64.BlockCnt = blockcnt;
2518 } else { 2519 } else {
2519 cmdp->u.cache.BlockNo = (ulong32)blockno; 2520 cmdp->u.cache.BlockNo = (u32)blockno;
2520 cmdp->u.cache.BlockCnt = blockcnt; 2521 cmdp->u.cache.BlockCnt = blockcnt;
2521 } 2522 }
2522 2523
@@ -2528,12 +2529,12 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2528 if (mode64) { 2529 if (mode64) {
2529 struct scatterlist *sl; 2530 struct scatterlist *sl;
2530 2531
2531 cmdp->u.cache64.DestAddr= (ulong64)-1; 2532 cmdp->u.cache64.DestAddr= (u64)-1;
2532 cmdp->u.cache64.sg_canz = sgcnt; 2533 cmdp->u.cache64.sg_canz = sgcnt;
2533 scsi_for_each_sg(scp, sl, sgcnt, i) { 2534 scsi_for_each_sg(scp, sl, sgcnt, i) {
2534 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); 2535 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2535#ifdef GDTH_DMA_STATISTICS 2536#ifdef GDTH_DMA_STATISTICS
2536 if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) 2537 if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff)
2537 ha->dma64_cnt++; 2538 ha->dma64_cnt++;
2538 else 2539 else
2539 ha->dma32_cnt++; 2540 ha->dma32_cnt++;
@@ -2555,8 +2556,8 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2555 } 2556 }
2556 2557
2557#ifdef GDTH_STATISTICS 2558#ifdef GDTH_STATISTICS
2558 if (max_sg < (ulong32)sgcnt) { 2559 if (max_sg < (u32)sgcnt) {
2559 max_sg = (ulong32)sgcnt; 2560 max_sg = (u32)sgcnt;
2560 TRACE3(("GDT: max_sg = %d\n",max_sg)); 2561 TRACE3(("GDT: max_sg = %d\n",max_sg));
2561 } 2562 }
2562#endif 2563#endif
@@ -2572,7 +2573,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2572 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", 2573 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2573 cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt)); 2574 cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
2574 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + 2575 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
2575 (ushort)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str); 2576 (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
2576 } else { 2577 } else {
2577 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 2578 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2578 cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz, 2579 cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
@@ -2581,7 +2582,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2581 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", 2582 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2582 cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt)); 2583 cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
2583 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + 2584 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
2584 (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str); 2585 (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
2585 } 2586 }
2586 if (ha->cmd_len & 3) 2587 if (ha->cmd_len & 3)
2587 ha->cmd_len += (4 - (ha->cmd_len & 3)); 2588 ha->cmd_len += (4 - (ha->cmd_len & 3));
@@ -2600,15 +2601,15 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2600 return cmd_index; 2601 return cmd_index;
2601} 2602}
2602 2603
2603static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) 2604static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
2604{ 2605{
2605 register gdth_cmd_str *cmdp; 2606 register gdth_cmd_str *cmdp;
2606 ushort i; 2607 u16 i;
2607 dma_addr_t sense_paddr; 2608 dma_addr_t sense_paddr;
2608 int cmd_index, sgcnt, mode64; 2609 int cmd_index, sgcnt, mode64;
2609 unchar t,l; 2610 u8 t,l;
2610 struct page *page; 2611 struct page *page;
2611 ulong offset; 2612 unsigned long offset;
2612 struct gdth_cmndinfo *cmndinfo; 2613 struct gdth_cmndinfo *cmndinfo;
2613 2614
2614 t = scp->device->id; 2615 t = scp->device->id;
@@ -2654,7 +2655,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2654 2655
2655 } else { 2656 } else {
2656 page = virt_to_page(scp->sense_buffer); 2657 page = virt_to_page(scp->sense_buffer);
2657 offset = (ulong)scp->sense_buffer & ~PAGE_MASK; 2658 offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK;
2658 sense_paddr = pci_map_page(ha->pdev,page,offset, 2659 sense_paddr = pci_map_page(ha->pdev,page,offset,
2659 16,PCI_DMA_FROMDEVICE); 2660 16,PCI_DMA_FROMDEVICE);
2660 2661
@@ -2703,12 +2704,12 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2703 if (mode64) { 2704 if (mode64) {
2704 struct scatterlist *sl; 2705 struct scatterlist *sl;
2705 2706
2706 cmdp->u.raw64.sdata = (ulong64)-1; 2707 cmdp->u.raw64.sdata = (u64)-1;
2707 cmdp->u.raw64.sg_ranz = sgcnt; 2708 cmdp->u.raw64.sg_ranz = sgcnt;
2708 scsi_for_each_sg(scp, sl, sgcnt, i) { 2709 scsi_for_each_sg(scp, sl, sgcnt, i) {
2709 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); 2710 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2710#ifdef GDTH_DMA_STATISTICS 2711#ifdef GDTH_DMA_STATISTICS
2711 if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) 2712 if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff)
2712 ha->dma64_cnt++; 2713 ha->dma64_cnt++;
2713 else 2714 else
2714 ha->dma32_cnt++; 2715 ha->dma32_cnt++;
@@ -2744,7 +2745,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2744 cmdp->u.raw64.sg_lst[0].sg_len)); 2745 cmdp->u.raw64.sg_lst[0].sg_len));
2745 /* evaluate command size */ 2746 /* evaluate command size */
2746 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + 2747 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
2747 (ushort)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str); 2748 (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
2748 } else { 2749 } else {
2749 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 2750 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2750 cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz, 2751 cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
@@ -2752,7 +2753,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2752 cmdp->u.raw.sg_lst[0].sg_len)); 2753 cmdp->u.raw.sg_lst[0].sg_len));
2753 /* evaluate command size */ 2754 /* evaluate command size */
2754 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + 2755 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
2755 (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str); 2756 (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
2756 } 2757 }
2757 } 2758 }
2758 /* check space */ 2759 /* check space */
@@ -2802,7 +2803,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2802 if (cmdp->OpCode == GDT_IOCTL) { 2803 if (cmdp->OpCode == GDT_IOCTL) {
2803 TRACE2(("IOCTL\n")); 2804 TRACE2(("IOCTL\n"));
2804 ha->cmd_len = 2805 ha->cmd_len =
2805 GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong64); 2806 GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
2806 } else if (cmdp->Service == CACHESERVICE) { 2807 } else if (cmdp->Service == CACHESERVICE) {
2807 TRACE2(("cache command %d\n",cmdp->OpCode)); 2808 TRACE2(("cache command %d\n",cmdp->OpCode));
2808 if (ha->cache_feat & GDT_64BIT) 2809 if (ha->cache_feat & GDT_64BIT)
@@ -2840,8 +2841,8 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2840 2841
2841 2842
2842/* Controller event handling functions */ 2843/* Controller event handling functions */
2843static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, 2844static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
2844 ushort idx, gdth_evt_data *evt) 2845 u16 idx, gdth_evt_data *evt)
2845{ 2846{
2846 gdth_evt_str *e; 2847 gdth_evt_str *e;
2847 struct timeval tv; 2848 struct timeval tv;
@@ -2890,7 +2891,7 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2890{ 2891{
2891 gdth_evt_str *e; 2892 gdth_evt_str *e;
2892 int eindex; 2893 int eindex;
2893 ulong flags; 2894 unsigned long flags;
2894 2895
2895 TRACE2(("gdth_read_event() handle %d\n", handle)); 2896 TRACE2(("gdth_read_event() handle %d\n", handle));
2896 spin_lock_irqsave(&ha->smp_lock, flags); 2897 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2919,12 +2920,12 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2919} 2920}
2920 2921
2921static void gdth_readapp_event(gdth_ha_str *ha, 2922static void gdth_readapp_event(gdth_ha_str *ha,
2922 unchar application, gdth_evt_str *estr) 2923 u8 application, gdth_evt_str *estr)
2923{ 2924{
2924 gdth_evt_str *e; 2925 gdth_evt_str *e;
2925 int eindex; 2926 int eindex;
2926 ulong flags; 2927 unsigned long flags;
2927 unchar found = FALSE; 2928 u8 found = FALSE;
2928 2929
2929 TRACE2(("gdth_readapp_event() app. %d\n", application)); 2930 TRACE2(("gdth_readapp_event() app. %d\n", application));
2930 spin_lock_irqsave(&ha->smp_lock, flags); 2931 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2969,9 +2970,9 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
2969 gdt2_dpram_str __iomem *dp2_ptr; 2970 gdt2_dpram_str __iomem *dp2_ptr;
2970 Scsi_Cmnd *scp; 2971 Scsi_Cmnd *scp;
2971 int rval, i; 2972 int rval, i;
2972 unchar IStatus; 2973 u8 IStatus;
2973 ushort Service; 2974 u16 Service;
2974 ulong flags = 0; 2975 unsigned long flags = 0;
2975#ifdef INT_COAL 2976#ifdef INT_COAL
2976 int coalesced = FALSE; 2977 int coalesced = FALSE;
2977 int next = FALSE; 2978 int next = FALSE;
@@ -3018,7 +3019,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
3018 if (coalesced) { 3019 if (coalesced) {
3019 /* For coalesced requests all status 3020 /* For coalesced requests all status
3020 information is found in the status buffer */ 3021 information is found in the status buffer */
3021 IStatus = (unchar)(pcs->status & 0xff); 3022 IStatus = (u8)(pcs->status & 0xff);
3022 } 3023 }
3023#endif 3024#endif
3024 3025
@@ -3197,7 +3198,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
3197 ++act_int_coal; 3198 ++act_int_coal;
3198 if (act_int_coal > max_int_coal) { 3199 if (act_int_coal > max_int_coal) {
3199 max_int_coal = act_int_coal; 3200 max_int_coal = act_int_coal;
3200 printk("GDT: max_int_coal = %d\n",(ushort)max_int_coal); 3201 printk("GDT: max_int_coal = %d\n",(u16)max_int_coal);
3201 } 3202 }
3202#endif 3203#endif
3203 /* see if there is another status */ 3204 /* see if there is another status */
@@ -3225,12 +3226,12 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id)
3225 return __gdth_interrupt(ha, false, NULL); 3226 return __gdth_interrupt(ha, false, NULL);
3226} 3227}
3227 3228
3228static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, 3229static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
3229 Scsi_Cmnd *scp) 3230 Scsi_Cmnd *scp)
3230{ 3231{
3231 gdth_msg_str *msg; 3232 gdth_msg_str *msg;
3232 gdth_cmd_str *cmdp; 3233 gdth_cmd_str *cmdp;
3233 unchar b, t; 3234 u8 b, t;
3234 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 3235 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3235 3236
3236 cmdp = ha->pccb; 3237 cmdp = ha->pccb;
@@ -3263,7 +3264,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3263 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 3264 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3264 ha->cmd_offs_dpmem = 0; 3265 ha->cmd_offs_dpmem = 0;
3265 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 3266 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3266 + sizeof(ulong64); 3267 + sizeof(u64);
3267 ha->cmd_cnt = 0; 3268 ha->cmd_cnt = 0;
3268 gdth_copy_command(ha); 3269 gdth_copy_command(ha);
3269 gdth_release_event(ha); 3270 gdth_release_event(ha);
@@ -3297,7 +3298,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3297 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 3298 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3298 ha->cmd_offs_dpmem = 0; 3299 ha->cmd_offs_dpmem = 0;
3299 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 3300 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3300 + sizeof(ulong64); 3301 + sizeof(u64);
3301 ha->cmd_cnt = 0; 3302 ha->cmd_cnt = 0;
3302 gdth_copy_command(ha); 3303 gdth_copy_command(ha);
3303 gdth_release_event(ha); 3304 gdth_release_event(ha);
@@ -3335,7 +3336,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3335 cmndinfo->OpCode)); 3336 cmndinfo->OpCode));
3336 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */ 3337 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
3337 if (cmndinfo->OpCode == GDT_CLUST_INFO) { 3338 if (cmndinfo->OpCode == GDT_CLUST_INFO) {
3338 ha->hdr[t].cluster_type = (unchar)ha->info; 3339 ha->hdr[t].cluster_type = (u8)ha->info;
3339 if (!(ha->hdr[t].cluster_type & 3340 if (!(ha->hdr[t].cluster_type &
3340 CLUSTER_MOUNTED)) { 3341 CLUSTER_MOUNTED)) {
3341 /* NOT MOUNTED -> MOUNT */ 3342 /* NOT MOUNTED -> MOUNT */
@@ -3397,7 +3398,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3397 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; 3398 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
3398 } 3399 }
3399 memset((char*)scp->sense_buffer,0,16); 3400 memset((char*)scp->sense_buffer,0,16);
3400 if (ha->status == (ushort)S_CACHE_RESERV) { 3401 if (ha->status == (u16)S_CACHE_RESERV) {
3401 scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1); 3402 scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
3402 } else { 3403 } else {
3403 scp->sense_buffer[0] = 0x70; 3404 scp->sense_buffer[0] = 0x70;
@@ -3614,16 +3615,16 @@ static int gdth_async_event(gdth_ha_str *ha)
3614 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 3615 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3615 ha->cmd_offs_dpmem = 0; 3616 ha->cmd_offs_dpmem = 0;
3616 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 3617 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3617 + sizeof(ulong64); 3618 + sizeof(u64);
3618 ha->cmd_cnt = 0; 3619 ha->cmd_cnt = 0;
3619 gdth_copy_command(ha); 3620 gdth_copy_command(ha);
3620 if (ha->type == GDT_EISA) 3621 if (ha->type == GDT_EISA)
3621 printk("[EISA slot %d] ",(ushort)ha->brd_phys); 3622 printk("[EISA slot %d] ",(u16)ha->brd_phys);
3622 else if (ha->type == GDT_ISA) 3623 else if (ha->type == GDT_ISA)
3623 printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys); 3624 printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys);
3624 else 3625 else
3625 printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8), 3626 printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
3626 (ushort)((ha->brd_phys>>3)&0x1f)); 3627 (u16)((ha->brd_phys>>3)&0x1f));
3627 gdth_release_event(ha); 3628 gdth_release_event(ha);
3628 } 3629 }
3629 3630
@@ -3640,7 +3641,7 @@ static int gdth_async_event(gdth_ha_str *ha)
3640 ha->dvr.eu.async.service = ha->service; 3641 ha->dvr.eu.async.service = ha->service;
3641 ha->dvr.eu.async.status = ha->status; 3642 ha->dvr.eu.async.status = ha->status;
3642 ha->dvr.eu.async.info = ha->info; 3643 ha->dvr.eu.async.info = ha->info;
3643 *(ulong32 *)ha->dvr.eu.async.scsi_coord = ha->info2; 3644 *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
3644 } 3645 }
3645 gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr ); 3646 gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
3646 gdth_log_event( &ha->dvr, NULL ); 3647 gdth_log_event( &ha->dvr, NULL );
@@ -3648,8 +3649,8 @@ static int gdth_async_event(gdth_ha_str *ha)
3648 /* new host drive from expand? */ 3649 /* new host drive from expand? */
3649 if (ha->service == CACHESERVICE && ha->status == 56) { 3650 if (ha->service == CACHESERVICE && ha->status == 56) {
3650 TRACE2(("gdth_async_event(): new host drive %d created\n", 3651 TRACE2(("gdth_async_event(): new host drive %d created\n",
3651 (ushort)ha->info)); 3652 (u16)ha->info));
3652 /* gdth_analyse_hdrive(hanum, (ushort)ha->info); */ 3653 /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
3653 } 3654 }
3654 } 3655 }
3655 return 1; 3656 return 1;
@@ -3680,13 +3681,13 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
3680 for (j=0,i=1; i < f[0]; i+=2) { 3681 for (j=0,i=1; i < f[0]; i+=2) {
3681 switch (f[i+1]) { 3682 switch (f[i+1]) {
3682 case 4: 3683 case 4:
3683 stack.b[j++] = *(ulong32*)&dvr->eu.stream[(int)f[i]]; 3684 stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
3684 break; 3685 break;
3685 case 2: 3686 case 2:
3686 stack.b[j++] = *(ushort*)&dvr->eu.stream[(int)f[i]]; 3687 stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
3687 break; 3688 break;
3688 case 1: 3689 case 1:
3689 stack.b[j++] = *(unchar*)&dvr->eu.stream[(int)f[i]]; 3690 stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
3690 break; 3691 break;
3691 default: 3692 default:
3692 break; 3693 break;
@@ -3712,14 +3713,14 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
3712} 3713}
3713 3714
3714#ifdef GDTH_STATISTICS 3715#ifdef GDTH_STATISTICS
3715static unchar gdth_timer_running; 3716static u8 gdth_timer_running;
3716 3717
3717static void gdth_timeout(ulong data) 3718static void gdth_timeout(unsigned long data)
3718{ 3719{
3719 ulong32 i; 3720 u32 i;
3720 Scsi_Cmnd *nscp; 3721 Scsi_Cmnd *nscp;
3721 gdth_ha_str *ha; 3722 gdth_ha_str *ha;
3722 ulong flags; 3723 unsigned long flags;
3723 3724
3724 if(unlikely(list_empty(&gdth_instances))) { 3725 if(unlikely(list_empty(&gdth_instances))) {
3725 gdth_timer_running = 0; 3726 gdth_timer_running = 0;
@@ -3891,8 +3892,8 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3891{ 3892{
3892 gdth_ha_str *ha = shost_priv(scp->device->host); 3893 gdth_ha_str *ha = shost_priv(scp->device->host);
3893 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 3894 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3894 unchar b, t; 3895 u8 b, t;
3895 ulong flags; 3896 unsigned long flags;
3896 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED; 3897 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
3897 3898
3898 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__)); 3899 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
@@ -3924,9 +3925,9 @@ static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3924{ 3925{
3925 gdth_ha_str *ha = shost_priv(scp->device->host); 3926 gdth_ha_str *ha = shost_priv(scp->device->host);
3926 int i; 3927 int i;
3927 ulong flags; 3928 unsigned long flags;
3928 Scsi_Cmnd *cmnd; 3929 Scsi_Cmnd *cmnd;
3929 unchar b; 3930 u8 b;
3930 3931
3931 TRACE2(("gdth_eh_bus_reset()\n")); 3932 TRACE2(("gdth_eh_bus_reset()\n"));
3932 3933
@@ -3974,7 +3975,7 @@ static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3974 3975
3975static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) 3976static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
3976{ 3977{
3977 unchar b, t; 3978 u8 b, t;
3978 gdth_ha_str *ha = shost_priv(sdev->host); 3979 gdth_ha_str *ha = shost_priv(sdev->host);
3979 struct scsi_device *sd; 3980 struct scsi_device *sd;
3980 unsigned capacity; 3981 unsigned capacity;
@@ -4062,7 +4063,7 @@ static int ioc_event(void __user *arg)
4062{ 4063{
4063 gdth_ioctl_event evt; 4064 gdth_ioctl_event evt;
4064 gdth_ha_str *ha; 4065 gdth_ha_str *ha;
4065 ulong flags; 4066 unsigned long flags;
4066 4067
4067 if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event))) 4068 if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
4068 return -EFAULT; 4069 return -EFAULT;
@@ -4098,8 +4099,8 @@ static int ioc_event(void __user *arg)
4098static int ioc_lockdrv(void __user *arg) 4099static int ioc_lockdrv(void __user *arg)
4099{ 4100{
4100 gdth_ioctl_lockdrv ldrv; 4101 gdth_ioctl_lockdrv ldrv;
4101 unchar i, j; 4102 u8 i, j;
4102 ulong flags; 4103 unsigned long flags;
4103 gdth_ha_str *ha; 4104 gdth_ha_str *ha;
4104 4105
4105 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv))) 4106 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
@@ -4165,7 +4166,7 @@ static int ioc_general(void __user *arg, char *cmnd)
4165{ 4166{
4166 gdth_ioctl_general gen; 4167 gdth_ioctl_general gen;
4167 char *buf = NULL; 4168 char *buf = NULL;
4168 ulong64 paddr; 4169 u64 paddr;
4169 gdth_ha_str *ha; 4170 gdth_ha_str *ha;
4170 int rval; 4171 int rval;
4171 4172
@@ -4194,7 +4195,7 @@ static int ioc_general(void __user *arg, char *cmnd)
4194 gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo; 4195 gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo;
4195 /* addresses */ 4196 /* addresses */
4196 if (ha->cache_feat & SCATTER_GATHER) { 4197 if (ha->cache_feat & SCATTER_GATHER) {
4197 gen.command.u.cache64.DestAddr = (ulong64)-1; 4198 gen.command.u.cache64.DestAddr = (u64)-1;
4198 gen.command.u.cache64.sg_canz = 1; 4199 gen.command.u.cache64.sg_canz = 1;
4199 gen.command.u.cache64.sg_lst[0].sg_ptr = paddr; 4200 gen.command.u.cache64.sg_lst[0].sg_ptr = paddr;
4200 gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len; 4201 gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len;
@@ -4207,7 +4208,7 @@ static int ioc_general(void __user *arg, char *cmnd)
4207 if (ha->cache_feat & SCATTER_GATHER) { 4208 if (ha->cache_feat & SCATTER_GATHER) {
4208 gen.command.u.cache.DestAddr = 0xffffffff; 4209 gen.command.u.cache.DestAddr = 0xffffffff;
4209 gen.command.u.cache.sg_canz = 1; 4210 gen.command.u.cache.sg_canz = 1;
4210 gen.command.u.cache.sg_lst[0].sg_ptr = (ulong32)paddr; 4211 gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
4211 gen.command.u.cache.sg_lst[0].sg_len = gen.data_len; 4212 gen.command.u.cache.sg_lst[0].sg_len = gen.data_len;
4212 gen.command.u.cache.sg_lst[1].sg_len = 0; 4213 gen.command.u.cache.sg_lst[1].sg_len = 0;
4213 } else { 4214 } else {
@@ -4230,7 +4231,7 @@ static int ioc_general(void __user *arg, char *cmnd)
4230 gen.command.u.raw64.direction = gen.command.u.raw.direction; 4231 gen.command.u.raw64.direction = gen.command.u.raw.direction;
4231 /* addresses */ 4232 /* addresses */
4232 if (ha->raw_feat & SCATTER_GATHER) { 4233 if (ha->raw_feat & SCATTER_GATHER) {
4233 gen.command.u.raw64.sdata = (ulong64)-1; 4234 gen.command.u.raw64.sdata = (u64)-1;
4234 gen.command.u.raw64.sg_ranz = 1; 4235 gen.command.u.raw64.sg_ranz = 1;
4235 gen.command.u.raw64.sg_lst[0].sg_ptr = paddr; 4236 gen.command.u.raw64.sg_lst[0].sg_ptr = paddr;
4236 gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len; 4237 gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len;
@@ -4244,14 +4245,14 @@ static int ioc_general(void __user *arg, char *cmnd)
4244 if (ha->raw_feat & SCATTER_GATHER) { 4245 if (ha->raw_feat & SCATTER_GATHER) {
4245 gen.command.u.raw.sdata = 0xffffffff; 4246 gen.command.u.raw.sdata = 0xffffffff;
4246 gen.command.u.raw.sg_ranz = 1; 4247 gen.command.u.raw.sg_ranz = 1;
4247 gen.command.u.raw.sg_lst[0].sg_ptr = (ulong32)paddr; 4248 gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
4248 gen.command.u.raw.sg_lst[0].sg_len = gen.data_len; 4249 gen.command.u.raw.sg_lst[0].sg_len = gen.data_len;
4249 gen.command.u.raw.sg_lst[1].sg_len = 0; 4250 gen.command.u.raw.sg_lst[1].sg_len = 0;
4250 } else { 4251 } else {
4251 gen.command.u.raw.sdata = paddr; 4252 gen.command.u.raw.sdata = paddr;
4252 gen.command.u.raw.sg_ranz = 0; 4253 gen.command.u.raw.sg_ranz = 0;
4253 } 4254 }
4254 gen.command.u.raw.sense_data = (ulong32)paddr + gen.data_len; 4255 gen.command.u.raw.sense_data = (u32)paddr + gen.data_len;
4255 } 4256 }
4256 } else { 4257 } else {
4257 gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); 4258 gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
@@ -4283,7 +4284,7 @@ static int ioc_hdrlist(void __user *arg, char *cmnd)
4283 gdth_ioctl_rescan *rsc; 4284 gdth_ioctl_rescan *rsc;
4284 gdth_cmd_str *cmd; 4285 gdth_cmd_str *cmd;
4285 gdth_ha_str *ha; 4286 gdth_ha_str *ha;
4286 unchar i; 4287 u8 i;
4287 int rc = -ENOMEM; 4288 int rc = -ENOMEM;
4288 u32 cluster_type = 0; 4289 u32 cluster_type = 0;
4289 4290
@@ -4335,11 +4336,11 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4335{ 4336{
4336 gdth_ioctl_rescan *rsc; 4337 gdth_ioctl_rescan *rsc;
4337 gdth_cmd_str *cmd; 4338 gdth_cmd_str *cmd;
4338 ushort i, status, hdr_cnt; 4339 u16 i, status, hdr_cnt;
4339 ulong32 info; 4340 u32 info;
4340 int cyls, hds, secs; 4341 int cyls, hds, secs;
4341 int rc = -ENOMEM; 4342 int rc = -ENOMEM;
4342 ulong flags; 4343 unsigned long flags;
4343 gdth_ha_str *ha; 4344 gdth_ha_str *ha;
4344 4345
4345 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); 4346 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
@@ -4367,7 +4368,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4367 4368
4368 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 4369 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
4369 i = 0; 4370 i = 0;
4370 hdr_cnt = (status == S_OK ? (ushort)info : 0); 4371 hdr_cnt = (status == S_OK ? (u16)info : 0);
4371 } else { 4372 } else {
4372 i = rsc->hdr_no; 4373 i = rsc->hdr_no;
4373 hdr_cnt = i + 1; 4374 hdr_cnt = i + 1;
@@ -4418,7 +4419,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4418 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 4419 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
4419 4420
4420 spin_lock_irqsave(&ha->smp_lock, flags); 4421 spin_lock_irqsave(&ha->smp_lock, flags);
4421 ha->hdr[i].devtype = (status == S_OK ? (ushort)info : 0); 4422 ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
4422 spin_unlock_irqrestore(&ha->smp_lock, flags); 4423 spin_unlock_irqrestore(&ha->smp_lock, flags);
4423 4424
4424 cmd->Service = CACHESERVICE; 4425 cmd->Service = CACHESERVICE;
@@ -4432,7 +4433,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4432 4433
4433 spin_lock_irqsave(&ha->smp_lock, flags); 4434 spin_lock_irqsave(&ha->smp_lock, flags);
4434 ha->hdr[i].cluster_type = 4435 ha->hdr[i].cluster_type =
4435 ((status == S_OK && !shared_access) ? (ushort)info : 0); 4436 ((status == S_OK && !shared_access) ? (u16)info : 0);
4436 spin_unlock_irqrestore(&ha->smp_lock, flags); 4437 spin_unlock_irqrestore(&ha->smp_lock, flags);
4437 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; 4438 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
4438 4439
@@ -4446,7 +4447,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4446 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 4447 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
4447 4448
4448 spin_lock_irqsave(&ha->smp_lock, flags); 4449 spin_lock_irqsave(&ha->smp_lock, flags);
4449 ha->hdr[i].rw_attribs = (status == S_OK ? (ushort)info : 0); 4450 ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
4450 spin_unlock_irqrestore(&ha->smp_lock, flags); 4451 spin_unlock_irqrestore(&ha->smp_lock, flags);
4451 } 4452 }
4452 4453
@@ -4466,7 +4467,7 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4466{ 4467{
4467 gdth_ha_str *ha; 4468 gdth_ha_str *ha;
4468 Scsi_Cmnd *scp; 4469 Scsi_Cmnd *scp;
4469 ulong flags; 4470 unsigned long flags;
4470 char cmnd[MAX_COMMAND_SIZE]; 4471 char cmnd[MAX_COMMAND_SIZE];
4471 void __user *argp = (void __user *)arg; 4472 void __user *argp = (void __user *)arg;
4472 4473
@@ -4495,9 +4496,9 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4495 { 4496 {
4496 gdth_ioctl_osvers osv; 4497 gdth_ioctl_osvers osv;
4497 4498
4498 osv.version = (unchar)(LINUX_VERSION_CODE >> 16); 4499 osv.version = (u8)(LINUX_VERSION_CODE >> 16);
4499 osv.subversion = (unchar)(LINUX_VERSION_CODE >> 8); 4500 osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
4500 osv.revision = (ushort)(LINUX_VERSION_CODE & 0xff); 4501 osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
4501 if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers))) 4502 if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
4502 return -EFAULT; 4503 return -EFAULT;
4503 break; 4504 break;
@@ -4512,10 +4513,10 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4512 return -EFAULT; 4513 return -EFAULT;
4513 4514
4514 if (ha->type == GDT_ISA || ha->type == GDT_EISA) { 4515 if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
4515 ctrt.type = (unchar)((ha->stype>>20) - 0x10); 4516 ctrt.type = (u8)((ha->stype>>20) - 0x10);
4516 } else { 4517 } else {
4517 if (ha->type != GDT_PCIMPR) { 4518 if (ha->type != GDT_PCIMPR) {
4518 ctrt.type = (unchar)((ha->stype<<4) + 6); 4519 ctrt.type = (u8)((ha->stype<<4) + 6);
4519 } else { 4520 } else {
4520 ctrt.type = 4521 ctrt.type =
4521 (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe); 4522 (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
@@ -4546,7 +4547,7 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4546 case GDTIOCTL_LOCKCHN: 4547 case GDTIOCTL_LOCKCHN:
4547 { 4548 {
4548 gdth_ioctl_lockchn lchn; 4549 gdth_ioctl_lockchn lchn;
4549 unchar i, j; 4550 u8 i, j;
4550 4551
4551 if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) || 4552 if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
4552 (NULL == (ha = gdth_find_ha(lchn.ionode)))) 4553 (NULL == (ha = gdth_find_ha(lchn.ionode))))
@@ -4670,7 +4671,7 @@ static struct scsi_host_template gdth_template = {
4670}; 4671};
4671 4672
4672#ifdef CONFIG_ISA 4673#ifdef CONFIG_ISA
4673static int __init gdth_isa_probe_one(ulong32 isa_bios) 4674static int __init gdth_isa_probe_one(u32 isa_bios)
4674{ 4675{
4675 struct Scsi_Host *shp; 4676 struct Scsi_Host *shp;
4676 gdth_ha_str *ha; 4677 gdth_ha_str *ha;
@@ -4802,7 +4803,7 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios)
4802#endif /* CONFIG_ISA */ 4803#endif /* CONFIG_ISA */
4803 4804
4804#ifdef CONFIG_EISA 4805#ifdef CONFIG_EISA
4805static int __init gdth_eisa_probe_one(ushort eisa_slot) 4806static int __init gdth_eisa_probe_one(u16 eisa_slot)
4806{ 4807{
4807 struct Scsi_Host *shp; 4808 struct Scsi_Host *shp;
4808 gdth_ha_str *ha; 4809 gdth_ha_str *ha;
@@ -5120,7 +5121,7 @@ static void gdth_remove_one(gdth_ha_str *ha)
5120 scsi_host_put(shp); 5121 scsi_host_put(shp);
5121} 5122}
5122 5123
5123static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) 5124static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
5124{ 5125{
5125 gdth_ha_str *ha; 5126 gdth_ha_str *ha;
5126 5127
@@ -5158,14 +5159,14 @@ static int __init gdth_init(void)
5158 if (probe_eisa_isa) { 5159 if (probe_eisa_isa) {
5159 /* scanning for controllers, at first: ISA controller */ 5160 /* scanning for controllers, at first: ISA controller */
5160#ifdef CONFIG_ISA 5161#ifdef CONFIG_ISA
5161 ulong32 isa_bios; 5162 u32 isa_bios;
5162 for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL; 5163 for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL;
5163 isa_bios += 0x8000UL) 5164 isa_bios += 0x8000UL)
5164 gdth_isa_probe_one(isa_bios); 5165 gdth_isa_probe_one(isa_bios);
5165#endif 5166#endif
5166#ifdef CONFIG_EISA 5167#ifdef CONFIG_EISA
5167 { 5168 {
5168 ushort eisa_slot; 5169 u16 eisa_slot;
5169 for (eisa_slot = 0x1000; eisa_slot <= 0x8000; 5170 for (eisa_slot = 0x1000; eisa_slot <= 0x8000;
5170 eisa_slot += 0x1000) 5171 eisa_slot += 0x1000)
5171 gdth_eisa_probe_one(eisa_slot); 5172 gdth_eisa_probe_one(eisa_slot);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 1646444e9bd5..120a0625a7b5 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -321,524 +321,524 @@
321 321
322/* screenservice message */ 322/* screenservice message */
323typedef struct { 323typedef struct {
324 ulong32 msg_handle; /* message handle */ 324 u32 msg_handle; /* message handle */
325 ulong32 msg_len; /* size of message */ 325 u32 msg_len; /* size of message */
326 ulong32 msg_alen; /* answer length */ 326 u32 msg_alen; /* answer length */
327 unchar msg_answer; /* answer flag */ 327 u8 msg_answer; /* answer flag */
328 unchar msg_ext; /* more messages */ 328 u8 msg_ext; /* more messages */
329 unchar msg_reserved[2]; 329 u8 msg_reserved[2];
330 char msg_text[MSGLEN+2]; /* the message text */ 330 char msg_text[MSGLEN+2]; /* the message text */
331} PACKED gdth_msg_str; 331} __attribute__((packed)) gdth_msg_str;
332 332
333 333
334/* IOCTL data structures */ 334/* IOCTL data structures */
335 335
336/* Status coalescing buffer for returning multiple requests per interrupt */ 336/* Status coalescing buffer for returning multiple requests per interrupt */
337typedef struct { 337typedef struct {
338 ulong32 status; 338 u32 status;
339 ulong32 ext_status; 339 u32 ext_status;
340 ulong32 info0; 340 u32 info0;
341 ulong32 info1; 341 u32 info1;
342} PACKED gdth_coal_status; 342} __attribute__((packed)) gdth_coal_status;
343 343
344/* performance mode data structure */ 344/* performance mode data structure */
345typedef struct { 345typedef struct {
346 ulong32 version; /* The version of this IOCTL structure. */ 346 u32 version; /* The version of this IOCTL structure. */
347 ulong32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */ 347 u32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */
348 ulong32 st_buff_addr1; /* physical address of status buffer 1 */ 348 u32 st_buff_addr1; /* physical address of status buffer 1 */
349 ulong32 st_buff_u_addr1; /* reserved for 64 bit addressing */ 349 u32 st_buff_u_addr1; /* reserved for 64 bit addressing */
350 ulong32 st_buff_indx1; /* reserved command idx. for this buffer */ 350 u32 st_buff_indx1; /* reserved command idx. for this buffer */
351 ulong32 st_buff_addr2; /* physical address of status buffer 1 */ 351 u32 st_buff_addr2; /* physical address of status buffer 1 */
352 ulong32 st_buff_u_addr2; /* reserved for 64 bit addressing */ 352 u32 st_buff_u_addr2; /* reserved for 64 bit addressing */
353 ulong32 st_buff_indx2; /* reserved command idx. for this buffer */ 353 u32 st_buff_indx2; /* reserved command idx. for this buffer */
354 ulong32 st_buff_size; /* size of each buffer in bytes */ 354 u32 st_buff_size; /* size of each buffer in bytes */
355 ulong32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */ 355 u32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */
356 ulong32 cmd_buff_addr1; /* physical address of cmd buffer 1 */ 356 u32 cmd_buff_addr1; /* physical address of cmd buffer 1 */
357 ulong32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */ 357 u32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */
358 ulong32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */ 358 u32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */
359 ulong32 cmd_buff_addr2; /* physical address of cmd buffer 1 */ 359 u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
360 ulong32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */ 360 u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
361 ulong32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */ 361 u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
362 ulong32 cmd_buff_size; /* size of each cmd bufer in bytes */ 362 u32 cmd_buff_size; /* size of each cmd bufer in bytes */
363 ulong32 reserved1; 363 u32 reserved1;
364 ulong32 reserved2; 364 u32 reserved2;
365} PACKED gdth_perf_modes; 365} __attribute__((packed)) gdth_perf_modes;
366 366
367/* SCSI drive info */ 367/* SCSI drive info */
368typedef struct { 368typedef struct {
369 unchar vendor[8]; /* vendor string */ 369 u8 vendor[8]; /* vendor string */
370 unchar product[16]; /* product string */ 370 u8 product[16]; /* product string */
371 unchar revision[4]; /* revision */ 371 u8 revision[4]; /* revision */
372 ulong32 sy_rate; /* current rate for sync. tr. */ 372 u32 sy_rate; /* current rate for sync. tr. */
373 ulong32 sy_max_rate; /* max. rate for sync. tr. */ 373 u32 sy_max_rate; /* max. rate for sync. tr. */
374 ulong32 no_ldrive; /* belongs to this log. drv.*/ 374 u32 no_ldrive; /* belongs to this log. drv.*/
375 ulong32 blkcnt; /* number of blocks */ 375 u32 blkcnt; /* number of blocks */
376 ushort blksize; /* size of block in bytes */ 376 u16 blksize; /* size of block in bytes */
377 unchar available; /* flag: access is available */ 377 u8 available; /* flag: access is available */
378 unchar init; /* medium is initialized */ 378 u8 init; /* medium is initialized */
379 unchar devtype; /* SCSI devicetype */ 379 u8 devtype; /* SCSI devicetype */
380 unchar rm_medium; /* medium is removable */ 380 u8 rm_medium; /* medium is removable */
381 unchar wp_medium; /* medium is write protected */ 381 u8 wp_medium; /* medium is write protected */
382 unchar ansi; /* SCSI I/II or III? */ 382 u8 ansi; /* SCSI I/II or III? */
383 unchar protocol; /* same as ansi */ 383 u8 protocol; /* same as ansi */
384 unchar sync; /* flag: sync. transfer enab. */ 384 u8 sync; /* flag: sync. transfer enab. */
385 unchar disc; /* flag: disconnect enabled */ 385 u8 disc; /* flag: disconnect enabled */
386 unchar queueing; /* flag: command queing enab. */ 386 u8 queueing; /* flag: command queing enab. */
387 unchar cached; /* flag: caching enabled */ 387 u8 cached; /* flag: caching enabled */
388 unchar target_id; /* target ID of device */ 388 u8 target_id; /* target ID of device */
389 unchar lun; /* LUN id of device */ 389 u8 lun; /* LUN id of device */
390 unchar orphan; /* flag: drive fragment */ 390 u8 orphan; /* flag: drive fragment */
391 ulong32 last_error; /* sense key or drive state */ 391 u32 last_error; /* sense key or drive state */
392 ulong32 last_result; /* result of last command */ 392 u32 last_result; /* result of last command */
393 ulong32 check_errors; /* err. in last surface check */ 393 u32 check_errors; /* err. in last surface check */
394 unchar percent; /* progress for surface check */ 394 u8 percent; /* progress for surface check */
395 unchar last_check; /* IOCTRL operation */ 395 u8 last_check; /* IOCTRL operation */
396 unchar res[2]; 396 u8 res[2];
397 ulong32 flags; /* from 1.19/2.19: raw reserv.*/ 397 u32 flags; /* from 1.19/2.19: raw reserv.*/
398 unchar multi_bus; /* multi bus dev? (fibre ch.) */ 398 u8 multi_bus; /* multi bus dev? (fibre ch.) */
399 unchar mb_status; /* status: available? */ 399 u8 mb_status; /* status: available? */
400 unchar res2[2]; 400 u8 res2[2];
401 unchar mb_alt_status; /* status on second bus */ 401 u8 mb_alt_status; /* status on second bus */
402 unchar mb_alt_bid; /* number of second bus */ 402 u8 mb_alt_bid; /* number of second bus */
403 unchar mb_alt_tid; /* target id on second bus */ 403 u8 mb_alt_tid; /* target id on second bus */
404 unchar res3; 404 u8 res3;
405 unchar fc_flag; /* from 1.22/2.22: info valid?*/ 405 u8 fc_flag; /* from 1.22/2.22: info valid?*/
406 unchar res4; 406 u8 res4;
407 ushort fc_frame_size; /* frame size (bytes) */ 407 u16 fc_frame_size; /* frame size (bytes) */
408 char wwn[8]; /* world wide name */ 408 char wwn[8]; /* world wide name */
409} PACKED gdth_diskinfo_str; 409} __attribute__((packed)) gdth_diskinfo_str;
410 410
411/* get SCSI channel count */ 411/* get SCSI channel count */
412typedef struct { 412typedef struct {
413 ulong32 channel_no; /* number of channel */ 413 u32 channel_no; /* number of channel */
414 ulong32 drive_cnt; /* drive count */ 414 u32 drive_cnt; /* drive count */
415 unchar siop_id; /* SCSI processor ID */ 415 u8 siop_id; /* SCSI processor ID */
416 unchar siop_state; /* SCSI processor state */ 416 u8 siop_state; /* SCSI processor state */
417} PACKED gdth_getch_str; 417} __attribute__((packed)) gdth_getch_str;
418 418
419/* get SCSI drive numbers */ 419/* get SCSI drive numbers */
420typedef struct { 420typedef struct {
421 ulong32 sc_no; /* SCSI channel */ 421 u32 sc_no; /* SCSI channel */
422 ulong32 sc_cnt; /* sc_list[] elements */ 422 u32 sc_cnt; /* sc_list[] elements */
423 ulong32 sc_list[MAXID]; /* minor device numbers */ 423 u32 sc_list[MAXID]; /* minor device numbers */
424} PACKED gdth_drlist_str; 424} __attribute__((packed)) gdth_drlist_str;
425 425
426/* get grown/primary defect count */ 426/* get grown/primary defect count */
427typedef struct { 427typedef struct {
428 unchar sddc_type; /* 0x08: grown, 0x10: prim. */ 428 u8 sddc_type; /* 0x08: grown, 0x10: prim. */
429 unchar sddc_format; /* list entry format */ 429 u8 sddc_format; /* list entry format */
430 unchar sddc_len; /* list entry length */ 430 u8 sddc_len; /* list entry length */
431 unchar sddc_res; 431 u8 sddc_res;
432 ulong32 sddc_cnt; /* entry count */ 432 u32 sddc_cnt; /* entry count */
433} PACKED gdth_defcnt_str; 433} __attribute__((packed)) gdth_defcnt_str;
434 434
435/* disk statistics */ 435/* disk statistics */
436typedef struct { 436typedef struct {
437 ulong32 bid; /* SCSI channel */ 437 u32 bid; /* SCSI channel */
438 ulong32 first; /* first SCSI disk */ 438 u32 first; /* first SCSI disk */
439 ulong32 entries; /* number of elements */ 439 u32 entries; /* number of elements */
440 ulong32 count; /* (R) number of init. el. */ 440 u32 count; /* (R) number of init. el. */
441 ulong32 mon_time; /* time stamp */ 441 u32 mon_time; /* time stamp */
442 struct { 442 struct {
443 unchar tid; /* target ID */ 443 u8 tid; /* target ID */
444 unchar lun; /* LUN */ 444 u8 lun; /* LUN */
445 unchar res[2]; 445 u8 res[2];
446 ulong32 blk_size; /* block size in bytes */ 446 u32 blk_size; /* block size in bytes */
447 ulong32 rd_count; /* bytes read */ 447 u32 rd_count; /* bytes read */
448 ulong32 wr_count; /* bytes written */ 448 u32 wr_count; /* bytes written */
449 ulong32 rd_blk_count; /* blocks read */ 449 u32 rd_blk_count; /* blocks read */
450 ulong32 wr_blk_count; /* blocks written */ 450 u32 wr_blk_count; /* blocks written */
451 ulong32 retries; /* retries */ 451 u32 retries; /* retries */
452 ulong32 reassigns; /* reassigns */ 452 u32 reassigns; /* reassigns */
453 } PACKED list[1]; 453 } __attribute__((packed)) list[1];
454} PACKED gdth_dskstat_str; 454} __attribute__((packed)) gdth_dskstat_str;
455 455
456/* IO channel header */ 456/* IO channel header */
457typedef struct { 457typedef struct {
458 ulong32 version; /* version (-1UL: newest) */ 458 u32 version; /* version (-1UL: newest) */
459 unchar list_entries; /* list entry count */ 459 u8 list_entries; /* list entry count */
460 unchar first_chan; /* first channel number */ 460 u8 first_chan; /* first channel number */
461 unchar last_chan; /* last channel number */ 461 u8 last_chan; /* last channel number */
462 unchar chan_count; /* (R) channel count */ 462 u8 chan_count; /* (R) channel count */
463 ulong32 list_offset; /* offset of list[0] */ 463 u32 list_offset; /* offset of list[0] */
464} PACKED gdth_iochan_header; 464} __attribute__((packed)) gdth_iochan_header;
465 465
466/* get IO channel description */ 466/* get IO channel description */
467typedef struct { 467typedef struct {
468 gdth_iochan_header hdr; 468 gdth_iochan_header hdr;
469 struct { 469 struct {
470 ulong32 address; /* channel address */ 470 u32 address; /* channel address */
471 unchar type; /* type (SCSI, FCAL) */ 471 u8 type; /* type (SCSI, FCAL) */
472 unchar local_no; /* local number */ 472 u8 local_no; /* local number */
473 ushort features; /* channel features */ 473 u16 features; /* channel features */
474 } PACKED list[MAXBUS]; 474 } __attribute__((packed)) list[MAXBUS];
475} PACKED gdth_iochan_str; 475} __attribute__((packed)) gdth_iochan_str;
476 476
477/* get raw IO channel description */ 477/* get raw IO channel description */
478typedef struct { 478typedef struct {
479 gdth_iochan_header hdr; 479 gdth_iochan_header hdr;
480 struct { 480 struct {
481 unchar proc_id; /* processor id */ 481 u8 proc_id; /* processor id */
482 unchar proc_defect; /* defect ? */ 482 u8 proc_defect; /* defect ? */
483 unchar reserved[2]; 483 u8 reserved[2];
484 } PACKED list[MAXBUS]; 484 } __attribute__((packed)) list[MAXBUS];
485} PACKED gdth_raw_iochan_str; 485} __attribute__((packed)) gdth_raw_iochan_str;
486 486
487/* array drive component */ 487/* array drive component */
488typedef struct { 488typedef struct {
489 ulong32 al_controller; /* controller ID */ 489 u32 al_controller; /* controller ID */
490 unchar al_cache_drive; /* cache drive number */ 490 u8 al_cache_drive; /* cache drive number */
491 unchar al_status; /* cache drive state */ 491 u8 al_status; /* cache drive state */
492 unchar al_res[2]; 492 u8 al_res[2];
493} PACKED gdth_arraycomp_str; 493} __attribute__((packed)) gdth_arraycomp_str;
494 494
495/* array drive information */ 495/* array drive information */
496typedef struct { 496typedef struct {
497 unchar ai_type; /* array type (RAID0,4,5) */ 497 u8 ai_type; /* array type (RAID0,4,5) */
498 unchar ai_cache_drive_cnt; /* active cachedrives */ 498 u8 ai_cache_drive_cnt; /* active cachedrives */
499 unchar ai_state; /* array drive state */ 499 u8 ai_state; /* array drive state */
500 unchar ai_master_cd; /* master cachedrive */ 500 u8 ai_master_cd; /* master cachedrive */
501 ulong32 ai_master_controller; /* ID of master controller */ 501 u32 ai_master_controller; /* ID of master controller */
502 ulong32 ai_size; /* user capacity [sectors] */ 502 u32 ai_size; /* user capacity [sectors] */
503 ulong32 ai_striping_size; /* striping size [sectors] */ 503 u32 ai_striping_size; /* striping size [sectors] */
504 ulong32 ai_secsize; /* sector size [bytes] */ 504 u32 ai_secsize; /* sector size [bytes] */
505 ulong32 ai_err_info; /* failed cache drive */ 505 u32 ai_err_info; /* failed cache drive */
506 unchar ai_name[8]; /* name of the array drive */ 506 u8 ai_name[8]; /* name of the array drive */
507 unchar ai_controller_cnt; /* number of controllers */ 507 u8 ai_controller_cnt; /* number of controllers */
508 unchar ai_removable; /* flag: removable */ 508 u8 ai_removable; /* flag: removable */
509 unchar ai_write_protected; /* flag: write protected */ 509 u8 ai_write_protected; /* flag: write protected */
510 unchar ai_devtype; /* type: always direct access */ 510 u8 ai_devtype; /* type: always direct access */
511 gdth_arraycomp_str ai_drives[35]; /* drive components: */ 511 gdth_arraycomp_str ai_drives[35]; /* drive components: */
512 unchar ai_drive_entries; /* number of drive components */ 512 u8 ai_drive_entries; /* number of drive components */
513 unchar ai_protected; /* protection flag */ 513 u8 ai_protected; /* protection flag */
514 unchar ai_verify_state; /* state of a parity verify */ 514 u8 ai_verify_state; /* state of a parity verify */
515 unchar ai_ext_state; /* extended array drive state */ 515 u8 ai_ext_state; /* extended array drive state */
516 unchar ai_expand_state; /* array expand state (>=2.18)*/ 516 u8 ai_expand_state; /* array expand state (>=2.18)*/
517 unchar ai_reserved[3]; 517 u8 ai_reserved[3];
518} PACKED gdth_arrayinf_str; 518} __attribute__((packed)) gdth_arrayinf_str;
519 519
520/* get array drive list */ 520/* get array drive list */
521typedef struct { 521typedef struct {
522 ulong32 controller_no; /* controller no. */ 522 u32 controller_no; /* controller no. */
523 unchar cd_handle; /* master cachedrive */ 523 u8 cd_handle; /* master cachedrive */
524 unchar is_arrayd; /* Flag: is array drive? */ 524 u8 is_arrayd; /* Flag: is array drive? */
525 unchar is_master; /* Flag: is array master? */ 525 u8 is_master; /* Flag: is array master? */
526 unchar is_parity; /* Flag: is parity drive? */ 526 u8 is_parity; /* Flag: is parity drive? */
527 unchar is_hotfix; /* Flag: is hotfix drive? */ 527 u8 is_hotfix; /* Flag: is hotfix drive? */
528 unchar res[3]; 528 u8 res[3];
529} PACKED gdth_alist_str; 529} __attribute__((packed)) gdth_alist_str;
530 530
531typedef struct { 531typedef struct {
532 ulong32 entries_avail; /* allocated entries */ 532 u32 entries_avail; /* allocated entries */
533 ulong32 entries_init; /* returned entries */ 533 u32 entries_init; /* returned entries */
534 ulong32 first_entry; /* first entry number */ 534 u32 first_entry; /* first entry number */
535 ulong32 list_offset; /* offset of following list */ 535 u32 list_offset; /* offset of following list */
536 gdth_alist_str list[1]; /* list */ 536 gdth_alist_str list[1]; /* list */
537} PACKED gdth_arcdl_str; 537} __attribute__((packed)) gdth_arcdl_str;
538 538
539/* cache info/config IOCTL */ 539/* cache info/config IOCTL */
540typedef struct { 540typedef struct {
541 ulong32 version; /* firmware version */ 541 u32 version; /* firmware version */
542 ushort state; /* cache state (on/off) */ 542 u16 state; /* cache state (on/off) */
543 ushort strategy; /* cache strategy */ 543 u16 strategy; /* cache strategy */
544 ushort write_back; /* write back state (on/off) */ 544 u16 write_back; /* write back state (on/off) */
545 ushort block_size; /* cache block size */ 545 u16 block_size; /* cache block size */
546} PACKED gdth_cpar_str; 546} __attribute__((packed)) gdth_cpar_str;
547 547
548typedef struct { 548typedef struct {
549 ulong32 csize; /* cache size */ 549 u32 csize; /* cache size */
550 ulong32 read_cnt; /* read/write counter */ 550 u32 read_cnt; /* read/write counter */
551 ulong32 write_cnt; 551 u32 write_cnt;
552 ulong32 tr_hits; /* hits */ 552 u32 tr_hits; /* hits */
553 ulong32 sec_hits; 553 u32 sec_hits;
554 ulong32 sec_miss; /* misses */ 554 u32 sec_miss; /* misses */
555} PACKED gdth_cstat_str; 555} __attribute__((packed)) gdth_cstat_str;
556 556
557typedef struct { 557typedef struct {
558 gdth_cpar_str cpar; 558 gdth_cpar_str cpar;
559 gdth_cstat_str cstat; 559 gdth_cstat_str cstat;
560} PACKED gdth_cinfo_str; 560} __attribute__((packed)) gdth_cinfo_str;
561 561
562/* cache drive info */ 562/* cache drive info */
563typedef struct { 563typedef struct {
564 unchar cd_name[8]; /* cache drive name */ 564 u8 cd_name[8]; /* cache drive name */
565 ulong32 cd_devtype; /* SCSI devicetype */ 565 u32 cd_devtype; /* SCSI devicetype */
566 ulong32 cd_ldcnt; /* number of log. drives */ 566 u32 cd_ldcnt; /* number of log. drives */
567 ulong32 cd_last_error; /* last error */ 567 u32 cd_last_error; /* last error */
568 unchar cd_initialized; /* drive is initialized */ 568 u8 cd_initialized; /* drive is initialized */
569 unchar cd_removable; /* media is removable */ 569 u8 cd_removable; /* media is removable */
570 unchar cd_write_protected; /* write protected */ 570 u8 cd_write_protected; /* write protected */
571 unchar cd_flags; /* Pool Hot Fix? */ 571 u8 cd_flags; /* Pool Hot Fix? */
572 ulong32 ld_blkcnt; /* number of blocks */ 572 u32 ld_blkcnt; /* number of blocks */
573 ulong32 ld_blksize; /* blocksize */ 573 u32 ld_blksize; /* blocksize */
574 ulong32 ld_dcnt; /* number of disks */ 574 u32 ld_dcnt; /* number of disks */
575 ulong32 ld_slave; /* log. drive index */ 575 u32 ld_slave; /* log. drive index */
576 ulong32 ld_dtype; /* type of logical drive */ 576 u32 ld_dtype; /* type of logical drive */
577 ulong32 ld_last_error; /* last error */ 577 u32 ld_last_error; /* last error */
578 unchar ld_name[8]; /* log. drive name */ 578 u8 ld_name[8]; /* log. drive name */
579 unchar ld_error; /* error */ 579 u8 ld_error; /* error */
580} PACKED gdth_cdrinfo_str; 580} __attribute__((packed)) gdth_cdrinfo_str;
581 581
582/* OEM string */ 582/* OEM string */
583typedef struct { 583typedef struct {
584 ulong32 ctl_version; 584 u32 ctl_version;
585 ulong32 file_major_version; 585 u32 file_major_version;
586 ulong32 file_minor_version; 586 u32 file_minor_version;
587 ulong32 buffer_size; 587 u32 buffer_size;
588 ulong32 cpy_count; 588 u32 cpy_count;
589 ulong32 ext_error; 589 u32 ext_error;
590 ulong32 oem_id; 590 u32 oem_id;
591 ulong32 board_id; 591 u32 board_id;
592} PACKED gdth_oem_str_params; 592} __attribute__((packed)) gdth_oem_str_params;
593 593
594typedef struct { 594typedef struct {
595 unchar product_0_1_name[16]; 595 u8 product_0_1_name[16];
596 unchar product_4_5_name[16]; 596 u8 product_4_5_name[16];
597 unchar product_cluster_name[16]; 597 u8 product_cluster_name[16];
598 unchar product_reserved[16]; 598 u8 product_reserved[16];
599 unchar scsi_cluster_target_vendor_id[16]; 599 u8 scsi_cluster_target_vendor_id[16];
600 unchar cluster_raid_fw_name[16]; 600 u8 cluster_raid_fw_name[16];
601 unchar oem_brand_name[16]; 601 u8 oem_brand_name[16];
602 unchar oem_raid_type[16]; 602 u8 oem_raid_type[16];
603 unchar bios_type[13]; 603 u8 bios_type[13];
604 unchar bios_title[50]; 604 u8 bios_title[50];
605 unchar oem_company_name[37]; 605 u8 oem_company_name[37];
606 ulong32 pci_id_1; 606 u32 pci_id_1;
607 ulong32 pci_id_2; 607 u32 pci_id_2;
608 unchar validation_status[80]; 608 u8 validation_status[80];
609 unchar reserved_1[4]; 609 u8 reserved_1[4];
610 unchar scsi_host_drive_inquiry_vendor_id[16]; 610 u8 scsi_host_drive_inquiry_vendor_id[16];
611 unchar library_file_template[16]; 611 u8 library_file_template[16];
612 unchar reserved_2[16]; 612 u8 reserved_2[16];
613 unchar tool_name_1[32]; 613 u8 tool_name_1[32];
614 unchar tool_name_2[32]; 614 u8 tool_name_2[32];
615 unchar tool_name_3[32]; 615 u8 tool_name_3[32];
616 unchar oem_contact_1[84]; 616 u8 oem_contact_1[84];
617 unchar oem_contact_2[84]; 617 u8 oem_contact_2[84];
618 unchar oem_contact_3[84]; 618 u8 oem_contact_3[84];
619} PACKED gdth_oem_str; 619} __attribute__((packed)) gdth_oem_str;
620 620
621typedef struct { 621typedef struct {
622 gdth_oem_str_params params; 622 gdth_oem_str_params params;
623 gdth_oem_str text; 623 gdth_oem_str text;
624} PACKED gdth_oem_str_ioctl; 624} __attribute__((packed)) gdth_oem_str_ioctl;
625 625
626/* board features */ 626/* board features */
627typedef struct { 627typedef struct {
628 unchar chaining; /* Chaining supported */ 628 u8 chaining; /* Chaining supported */
629 unchar striping; /* Striping (RAID-0) supp. */ 629 u8 striping; /* Striping (RAID-0) supp. */
630 unchar mirroring; /* Mirroring (RAID-1) supp. */ 630 u8 mirroring; /* Mirroring (RAID-1) supp. */
631 unchar raid; /* RAID-4/5/10 supported */ 631 u8 raid; /* RAID-4/5/10 supported */
632} PACKED gdth_bfeat_str; 632} __attribute__((packed)) gdth_bfeat_str;
633 633
634/* board info IOCTL */ 634/* board info IOCTL */
635typedef struct { 635typedef struct {
636 ulong32 ser_no; /* serial no. */ 636 u32 ser_no; /* serial no. */
637 unchar oem_id[2]; /* OEM ID */ 637 u8 oem_id[2]; /* OEM ID */
638 ushort ep_flags; /* eprom flags */ 638 u16 ep_flags; /* eprom flags */
639 ulong32 proc_id; /* processor ID */ 639 u32 proc_id; /* processor ID */
640 ulong32 memsize; /* memory size (bytes) */ 640 u32 memsize; /* memory size (bytes) */
641 unchar mem_banks; /* memory banks */ 641 u8 mem_banks; /* memory banks */
642 unchar chan_type; /* channel type */ 642 u8 chan_type; /* channel type */
643 unchar chan_count; /* channel count */ 643 u8 chan_count; /* channel count */
644 unchar rdongle_pres; /* dongle present? */ 644 u8 rdongle_pres; /* dongle present? */
645 ulong32 epr_fw_ver; /* (eprom) firmware version */ 645 u32 epr_fw_ver; /* (eprom) firmware version */
646 ulong32 upd_fw_ver; /* (update) firmware version */ 646 u32 upd_fw_ver; /* (update) firmware version */
647 ulong32 upd_revision; /* update revision */ 647 u32 upd_revision; /* update revision */
648 char type_string[16]; /* controller name */ 648 char type_string[16]; /* controller name */
649 char raid_string[16]; /* RAID firmware name */ 649 char raid_string[16]; /* RAID firmware name */
650 unchar update_pres; /* update present? */ 650 u8 update_pres; /* update present? */
651 unchar xor_pres; /* XOR engine present? */ 651 u8 xor_pres; /* XOR engine present? */
652 unchar prom_type; /* ROM type (eprom/flash) */ 652 u8 prom_type; /* ROM type (eprom/flash) */
653 unchar prom_count; /* number of ROM devices */ 653 u8 prom_count; /* number of ROM devices */
654 ulong32 dup_pres; /* duplexing module present? */ 654 u32 dup_pres; /* duplexing module present? */
655 ulong32 chan_pres; /* number of expansion chn. */ 655 u32 chan_pres; /* number of expansion chn. */
656 ulong32 mem_pres; /* memory expansion inst. ? */ 656 u32 mem_pres; /* memory expansion inst. ? */
657 unchar ft_bus_system; /* fault bus supported? */ 657 u8 ft_bus_system; /* fault bus supported? */
658 unchar subtype_valid; /* board_subtype valid? */ 658 u8 subtype_valid; /* board_subtype valid? */
659 unchar board_subtype; /* subtype/hardware level */ 659 u8 board_subtype; /* subtype/hardware level */
660 unchar ramparity_pres; /* RAM parity check hardware? */ 660 u8 ramparity_pres; /* RAM parity check hardware? */
661} PACKED gdth_binfo_str; 661} __attribute__((packed)) gdth_binfo_str;
662 662
663/* get host drive info */ 663/* get host drive info */
664typedef struct { 664typedef struct {
665 char name[8]; /* host drive name */ 665 char name[8]; /* host drive name */
666 ulong32 size; /* size (sectors) */ 666 u32 size; /* size (sectors) */
667 unchar host_drive; /* host drive number */ 667 u8 host_drive; /* host drive number */
668 unchar log_drive; /* log. drive (master) */ 668 u8 log_drive; /* log. drive (master) */
669 unchar reserved; 669 u8 reserved;
670 unchar rw_attribs; /* r/w attribs */ 670 u8 rw_attribs; /* r/w attribs */
671 ulong32 start_sec; /* start sector */ 671 u32 start_sec; /* start sector */
672} PACKED gdth_hentry_str; 672} __attribute__((packed)) gdth_hentry_str;
673 673
674typedef struct { 674typedef struct {
675 ulong32 entries; /* entry count */ 675 u32 entries; /* entry count */
676 ulong32 offset; /* offset of entries */ 676 u32 offset; /* offset of entries */
677 unchar secs_p_head; /* sectors/head */ 677 u8 secs_p_head; /* sectors/head */
678 unchar heads_p_cyl; /* heads/cylinder */ 678 u8 heads_p_cyl; /* heads/cylinder */
679 unchar reserved; 679 u8 reserved;
680 unchar clust_drvtype; /* cluster drive type */ 680 u8 clust_drvtype; /* cluster drive type */
681 ulong32 location; /* controller number */ 681 u32 location; /* controller number */
682 gdth_hentry_str entry[MAX_HDRIVES]; /* entries */ 682 gdth_hentry_str entry[MAX_HDRIVES]; /* entries */
683} PACKED gdth_hget_str; 683} __attribute__((packed)) gdth_hget_str;
684 684
685 685
686/* DPRAM structures */ 686/* DPRAM structures */
687 687
688/* interface area ISA/PCI */ 688/* interface area ISA/PCI */
689typedef struct { 689typedef struct {
690 unchar S_Cmd_Indx; /* special command */ 690 u8 S_Cmd_Indx; /* special command */
691 unchar volatile S_Status; /* status special command */ 691 u8 volatile S_Status; /* status special command */
692 ushort reserved1; 692 u16 reserved1;
693 ulong32 S_Info[4]; /* add. info special command */ 693 u32 S_Info[4]; /* add. info special command */
694 unchar volatile Sema0; /* command semaphore */ 694 u8 volatile Sema0; /* command semaphore */
695 unchar reserved2[3]; 695 u8 reserved2[3];
696 unchar Cmd_Index; /* command number */ 696 u8 Cmd_Index; /* command number */
697 unchar reserved3[3]; 697 u8 reserved3[3];
698 ushort volatile Status; /* command status */ 698 u16 volatile Status; /* command status */
699 ushort Service; /* service(for async.events) */ 699 u16 Service; /* service(for async.events) */
700 ulong32 Info[2]; /* additional info */ 700 u32 Info[2]; /* additional info */
701 struct { 701 struct {
702 ushort offset; /* command offs. in the DPRAM*/ 702 u16 offset; /* command offs. in the DPRAM*/
703 ushort serv_id; /* service */ 703 u16 serv_id; /* service */
704 } PACKED comm_queue[MAXOFFSETS]; /* command queue */ 704 } __attribute__((packed)) comm_queue[MAXOFFSETS]; /* command queue */
705 ulong32 bios_reserved[2]; 705 u32 bios_reserved[2];
706 unchar gdt_dpr_cmd[1]; /* commands */ 706 u8 gdt_dpr_cmd[1]; /* commands */
707} PACKED gdt_dpr_if; 707} __attribute__((packed)) gdt_dpr_if;
708 708
709/* SRAM structure PCI controllers */ 709/* SRAM structure PCI controllers */
710typedef struct { 710typedef struct {
711 ulong32 magic; /* controller ID from BIOS */ 711 u32 magic; /* controller ID from BIOS */
712 ushort need_deinit; /* switch betw. BIOS/driver */ 712 u16 need_deinit; /* switch betw. BIOS/driver */
713 unchar switch_support; /* see need_deinit */ 713 u8 switch_support; /* see need_deinit */
714 unchar padding[9]; 714 u8 padding[9];
715 unchar os_used[16]; /* OS code per service */ 715 u8 os_used[16]; /* OS code per service */
716 unchar unused[28]; 716 u8 unused[28];
717 unchar fw_magic; /* contr. ID from firmware */ 717 u8 fw_magic; /* contr. ID from firmware */
718} PACKED gdt_pci_sram; 718} __attribute__((packed)) gdt_pci_sram;
719 719
720/* SRAM structure EISA controllers (but NOT GDT3000/3020) */ 720/* SRAM structure EISA controllers (but NOT GDT3000/3020) */
721typedef struct { 721typedef struct {
722 unchar os_used[16]; /* OS code per service */ 722 u8 os_used[16]; /* OS code per service */
723 ushort need_deinit; /* switch betw. BIOS/driver */ 723 u16 need_deinit; /* switch betw. BIOS/driver */
724 unchar switch_support; /* see need_deinit */ 724 u8 switch_support; /* see need_deinit */
725 unchar padding; 725 u8 padding;
726} PACKED gdt_eisa_sram; 726} __attribute__((packed)) gdt_eisa_sram;
727 727
728 728
729/* DPRAM ISA controllers */ 729/* DPRAM ISA controllers */
730typedef struct { 730typedef struct {
731 union { 731 union {
732 struct { 732 struct {
733 unchar bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */ 733 u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
734 ulong32 magic; /* controller (EISA) ID */ 734 u32 magic; /* controller (EISA) ID */
735 ushort need_deinit; /* switch betw. BIOS/driver */ 735 u16 need_deinit; /* switch betw. BIOS/driver */
736 unchar switch_support; /* see need_deinit */ 736 u8 switch_support; /* see need_deinit */
737 unchar padding[9]; 737 u8 padding[9];
738 unchar os_used[16]; /* OS code per service */ 738 u8 os_used[16]; /* OS code per service */
739 } PACKED dp_sram; 739 } __attribute__((packed)) dp_sram;
740 unchar bios_area[0x4000]; /* 16KB reserved for BIOS */ 740 u8 bios_area[0x4000]; /* 16KB reserved for BIOS */
741 } bu; 741 } bu;
742 union { 742 union {
743 gdt_dpr_if ic; /* interface area */ 743 gdt_dpr_if ic; /* interface area */
744 unchar if_area[0x3000]; /* 12KB for interface */ 744 u8 if_area[0x3000]; /* 12KB for interface */
745 } u; 745 } u;
746 struct { 746 struct {
747 unchar memlock; /* write protection DPRAM */ 747 u8 memlock; /* write protection DPRAM */
748 unchar event; /* release event */ 748 u8 event; /* release event */
749 unchar irqen; /* board interrupts enable */ 749 u8 irqen; /* board interrupts enable */
750 unchar irqdel; /* acknowledge board int. */ 750 u8 irqdel; /* acknowledge board int. */
751 unchar volatile Sema1; /* status semaphore */ 751 u8 volatile Sema1; /* status semaphore */
752 unchar rq; /* IRQ/DRQ configuration */ 752 u8 rq; /* IRQ/DRQ configuration */
753 } PACKED io; 753 } __attribute__((packed)) io;
754} PACKED gdt2_dpram_str; 754} __attribute__((packed)) gdt2_dpram_str;
755 755
756/* DPRAM PCI controllers */ 756/* DPRAM PCI controllers */
757typedef struct { 757typedef struct {
758 union { 758 union {
759 gdt_dpr_if ic; /* interface area */ 759 gdt_dpr_if ic; /* interface area */
760 unchar if_area[0xff0-sizeof(gdt_pci_sram)]; 760 u8 if_area[0xff0-sizeof(gdt_pci_sram)];
761 } u; 761 } u;
762 gdt_pci_sram gdt6sr; /* SRAM structure */ 762 gdt_pci_sram gdt6sr; /* SRAM structure */
763 struct { 763 struct {
764 unchar unused0[1]; 764 u8 unused0[1];
765 unchar volatile Sema1; /* command semaphore */ 765 u8 volatile Sema1; /* command semaphore */
766 unchar unused1[3]; 766 u8 unused1[3];
767 unchar irqen; /* board interrupts enable */ 767 u8 irqen; /* board interrupts enable */
768 unchar unused2[2]; 768 u8 unused2[2];
769 unchar event; /* release event */ 769 u8 event; /* release event */
770 unchar unused3[3]; 770 u8 unused3[3];
771 unchar irqdel; /* acknowledge board int. */ 771 u8 irqdel; /* acknowledge board int. */
772 unchar unused4[3]; 772 u8 unused4[3];
773 } PACKED io; 773 } __attribute__((packed)) io;
774} PACKED gdt6_dpram_str; 774} __attribute__((packed)) gdt6_dpram_str;
775 775
776/* PLX register structure (new PCI controllers) */ 776/* PLX register structure (new PCI controllers) */
777typedef struct { 777typedef struct {
778 unchar cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/ 778 u8 cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
779 unchar unused1[0x3f]; 779 u8 unused1[0x3f];
780 unchar volatile sema0_reg; /* command semaphore */ 780 u8 volatile sema0_reg; /* command semaphore */
781 unchar volatile sema1_reg; /* status semaphore */ 781 u8 volatile sema1_reg; /* status semaphore */
782 unchar unused2[2]; 782 u8 unused2[2];
783 ushort volatile status; /* command status */ 783 u16 volatile status; /* command status */
784 ushort service; /* service */ 784 u16 service; /* service */
785 ulong32 info[2]; /* additional info */ 785 u32 info[2]; /* additional info */
786 unchar unused3[0x10]; 786 u8 unused3[0x10];
787 unchar ldoor_reg; /* PCI to local doorbell */ 787 u8 ldoor_reg; /* PCI to local doorbell */
788 unchar unused4[3]; 788 u8 unused4[3];
789 unchar volatile edoor_reg; /* local to PCI doorbell */ 789 u8 volatile edoor_reg; /* local to PCI doorbell */
790 unchar unused5[3]; 790 u8 unused5[3];
791 unchar control0; /* control0 register(unused) */ 791 u8 control0; /* control0 register(unused) */
792 unchar control1; /* board interrupts enable */ 792 u8 control1; /* board interrupts enable */
793 unchar unused6[0x16]; 793 u8 unused6[0x16];
794} PACKED gdt6c_plx_regs; 794} __attribute__((packed)) gdt6c_plx_regs;
795 795
796/* DPRAM new PCI controllers */ 796/* DPRAM new PCI controllers */
797typedef struct { 797typedef struct {
798 union { 798 union {
799 gdt_dpr_if ic; /* interface area */ 799 gdt_dpr_if ic; /* interface area */
800 unchar if_area[0x4000-sizeof(gdt_pci_sram)]; 800 u8 if_area[0x4000-sizeof(gdt_pci_sram)];
801 } u; 801 } u;
802 gdt_pci_sram gdt6sr; /* SRAM structure */ 802 gdt_pci_sram gdt6sr; /* SRAM structure */
803} PACKED gdt6c_dpram_str; 803} __attribute__((packed)) gdt6c_dpram_str;
804 804
805/* i960 register structure (PCI MPR controllers) */ 805/* i960 register structure (PCI MPR controllers) */
806typedef struct { 806typedef struct {
807 unchar unused1[16]; 807 u8 unused1[16];
808 unchar volatile sema0_reg; /* command semaphore */ 808 u8 volatile sema0_reg; /* command semaphore */
809 unchar unused2; 809 u8 unused2;
810 unchar volatile sema1_reg; /* status semaphore */ 810 u8 volatile sema1_reg; /* status semaphore */
811 unchar unused3; 811 u8 unused3;
812 ushort volatile status; /* command status */ 812 u16 volatile status; /* command status */
813 ushort service; /* service */ 813 u16 service; /* service */
814 ulong32 info[2]; /* additional info */ 814 u32 info[2]; /* additional info */
815 unchar ldoor_reg; /* PCI to local doorbell */ 815 u8 ldoor_reg; /* PCI to local doorbell */
816 unchar unused4[11]; 816 u8 unused4[11];
817 unchar volatile edoor_reg; /* local to PCI doorbell */ 817 u8 volatile edoor_reg; /* local to PCI doorbell */
818 unchar unused5[7]; 818 u8 unused5[7];
819 unchar edoor_en_reg; /* board interrupts enable */ 819 u8 edoor_en_reg; /* board interrupts enable */
820 unchar unused6[27]; 820 u8 unused6[27];
821 ulong32 unused7[939]; 821 u32 unused7[939];
822 ulong32 severity; 822 u32 severity;
823 char evt_str[256]; /* event string */ 823 char evt_str[256]; /* event string */
824} PACKED gdt6m_i960_regs; 824} __attribute__((packed)) gdt6m_i960_regs;
825 825
826/* DPRAM PCI MPR controllers */ 826/* DPRAM PCI MPR controllers */
827typedef struct { 827typedef struct {
828 gdt6m_i960_regs i960r; /* 4KB i960 registers */ 828 gdt6m_i960_regs i960r; /* 4KB i960 registers */
829 union { 829 union {
830 gdt_dpr_if ic; /* interface area */ 830 gdt_dpr_if ic; /* interface area */
831 unchar if_area[0x3000-sizeof(gdt_pci_sram)]; 831 u8 if_area[0x3000-sizeof(gdt_pci_sram)];
832 } u; 832 } u;
833 gdt_pci_sram gdt6sr; /* SRAM structure */ 833 gdt_pci_sram gdt6sr; /* SRAM structure */
834} PACKED gdt6m_dpram_str; 834} __attribute__((packed)) gdt6m_dpram_str;
835 835
836 836
837/* PCI resources */ 837/* PCI resources */
838typedef struct { 838typedef struct {
839 struct pci_dev *pdev; 839 struct pci_dev *pdev;
840 ulong dpmem; /* DPRAM address */ 840 unsigned long dpmem; /* DPRAM address */
841 ulong io; /* IO address */ 841 unsigned long io; /* IO address */
842} gdth_pci_str; 842} gdth_pci_str;
843 843
844 844
@@ -846,93 +846,93 @@ typedef struct {
846typedef struct { 846typedef struct {
847 struct Scsi_Host *shost; 847 struct Scsi_Host *shost;
848 struct list_head list; 848 struct list_head list;
849 ushort hanum; 849 u16 hanum;
850 ushort oem_id; /* OEM */ 850 u16 oem_id; /* OEM */
851 ushort type; /* controller class */ 851 u16 type; /* controller class */
852 ulong32 stype; /* subtype (PCI: device ID) */ 852 u32 stype; /* subtype (PCI: device ID) */
853 ushort fw_vers; /* firmware version */ 853 u16 fw_vers; /* firmware version */
854 ushort cache_feat; /* feat. cache serv. (s/g,..)*/ 854 u16 cache_feat; /* feat. cache serv. (s/g,..)*/
855 ushort raw_feat; /* feat. raw service (s/g,..)*/ 855 u16 raw_feat; /* feat. raw service (s/g,..)*/
856 ushort screen_feat; /* feat. raw service (s/g,..)*/ 856 u16 screen_feat; /* feat. raw service (s/g,..)*/
857 ushort bmic; /* BMIC address (EISA) */ 857 u16 bmic; /* BMIC address (EISA) */
858 void __iomem *brd; /* DPRAM address */ 858 void __iomem *brd; /* DPRAM address */
859 ulong32 brd_phys; /* slot number/BIOS address */ 859 u32 brd_phys; /* slot number/BIOS address */
860 gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */ 860 gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
861 gdth_cmd_str cmdext; 861 gdth_cmd_str cmdext;
862 gdth_cmd_str *pccb; /* address command structure */ 862 gdth_cmd_str *pccb; /* address command structure */
863 ulong32 ccb_phys; /* phys. address */ 863 u32 ccb_phys; /* phys. address */
864#ifdef INT_COAL 864#ifdef INT_COAL
865 gdth_coal_status *coal_stat; /* buffer for coalescing int.*/ 865 gdth_coal_status *coal_stat; /* buffer for coalescing int.*/
866 ulong64 coal_stat_phys; /* phys. address */ 866 u64 coal_stat_phys; /* phys. address */
867#endif 867#endif
868 char *pscratch; /* scratch (DMA) buffer */ 868 char *pscratch; /* scratch (DMA) buffer */
869 ulong64 scratch_phys; /* phys. address */ 869 u64 scratch_phys; /* phys. address */
870 unchar scratch_busy; /* in use? */ 870 u8 scratch_busy; /* in use? */
871 unchar dma64_support; /* 64-bit DMA supported? */ 871 u8 dma64_support; /* 64-bit DMA supported? */
872 gdth_msg_str *pmsg; /* message buffer */ 872 gdth_msg_str *pmsg; /* message buffer */
873 ulong64 msg_phys; /* phys. address */ 873 u64 msg_phys; /* phys. address */
874 unchar scan_mode; /* current scan mode */ 874 u8 scan_mode; /* current scan mode */
875 unchar irq; /* IRQ */ 875 u8 irq; /* IRQ */
876 unchar drq; /* DRQ (ISA controllers) */ 876 u8 drq; /* DRQ (ISA controllers) */
877 ushort status; /* command status */ 877 u16 status; /* command status */
878 ushort service; /* service/firmware ver./.. */ 878 u16 service; /* service/firmware ver./.. */
879 ulong32 info; 879 u32 info;
880 ulong32 info2; /* additional info */ 880 u32 info2; /* additional info */
881 Scsi_Cmnd *req_first; /* top of request queue */ 881 Scsi_Cmnd *req_first; /* top of request queue */
882 struct { 882 struct {
883 unchar present; /* Flag: host drive present? */ 883 u8 present; /* Flag: host drive present? */
884 unchar is_logdrv; /* Flag: log. drive (master)? */ 884 u8 is_logdrv; /* Flag: log. drive (master)? */
885 unchar is_arraydrv; /* Flag: array drive? */ 885 u8 is_arraydrv; /* Flag: array drive? */
886 unchar is_master; /* Flag: array drive master? */ 886 u8 is_master; /* Flag: array drive master? */
887 unchar is_parity; /* Flag: parity drive? */ 887 u8 is_parity; /* Flag: parity drive? */
888 unchar is_hotfix; /* Flag: hotfix drive? */ 888 u8 is_hotfix; /* Flag: hotfix drive? */
889 unchar master_no; /* number of master drive */ 889 u8 master_no; /* number of master drive */
890 unchar lock; /* drive locked? (hot plug) */ 890 u8 lock; /* drive locked? (hot plug) */
891 unchar heads; /* mapping */ 891 u8 heads; /* mapping */
892 unchar secs; 892 u8 secs;
893 ushort devtype; /* further information */ 893 u16 devtype; /* further information */
894 ulong64 size; /* capacity */ 894 u64 size; /* capacity */
895 unchar ldr_no; /* log. drive no. */ 895 u8 ldr_no; /* log. drive no. */
896 unchar rw_attribs; /* r/w attributes */ 896 u8 rw_attribs; /* r/w attributes */
897 unchar cluster_type; /* cluster properties */ 897 u8 cluster_type; /* cluster properties */
898 unchar media_changed; /* Flag:MOUNT/UNMOUNT occured */ 898 u8 media_changed; /* Flag:MOUNT/UNMOUNT occured */
899 ulong32 start_sec; /* start sector */ 899 u32 start_sec; /* start sector */
900 } hdr[MAX_LDRIVES]; /* host drives */ 900 } hdr[MAX_LDRIVES]; /* host drives */
901 struct { 901 struct {
902 unchar lock; /* channel locked? (hot plug) */ 902 u8 lock; /* channel locked? (hot plug) */
903 unchar pdev_cnt; /* physical device count */ 903 u8 pdev_cnt; /* physical device count */
904 unchar local_no; /* local channel number */ 904 u8 local_no; /* local channel number */
905 unchar io_cnt[MAXID]; /* current IO count */ 905 u8 io_cnt[MAXID]; /* current IO count */
906 ulong32 address; /* channel address */ 906 u32 address; /* channel address */
907 ulong32 id_list[MAXID]; /* IDs of the phys. devices */ 907 u32 id_list[MAXID]; /* IDs of the phys. devices */
908 } raw[MAXBUS]; /* SCSI channels */ 908 } raw[MAXBUS]; /* SCSI channels */
909 struct { 909 struct {
910 Scsi_Cmnd *cmnd; /* pending request */ 910 Scsi_Cmnd *cmnd; /* pending request */
911 ushort service; /* service */ 911 u16 service; /* service */
912 } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */ 912 } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
913 struct gdth_cmndinfo { /* per-command private info */ 913 struct gdth_cmndinfo { /* per-command private info */
914 int index; 914 int index;
915 int internal_command; /* don't call scsi_done */ 915 int internal_command; /* don't call scsi_done */
916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ 916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
917 dma_addr_t sense_paddr; /* sense dma-addr */ 917 dma_addr_t sense_paddr; /* sense dma-addr */
918 unchar priority; 918 u8 priority;
919 int timeout_count; /* # of timeout calls */ 919 int timeout_count; /* # of timeout calls */
920 volatile int wait_for_completion; 920 volatile int wait_for_completion;
921 ushort status; 921 u16 status;
922 ulong32 info; 922 u32 info;
923 enum dma_data_direction dma_dir; 923 enum dma_data_direction dma_dir;
924 int phase; /* ???? */ 924 int phase; /* ???? */
925 int OpCode; 925 int OpCode;
926 } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */ 926 } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */
927 unchar bus_cnt; /* SCSI bus count */ 927 u8 bus_cnt; /* SCSI bus count */
928 unchar tid_cnt; /* Target ID count */ 928 u8 tid_cnt; /* Target ID count */
929 unchar bus_id[MAXBUS]; /* IOP IDs */ 929 u8 bus_id[MAXBUS]; /* IOP IDs */
930 unchar virt_bus; /* number of virtual bus */ 930 u8 virt_bus; /* number of virtual bus */
931 unchar more_proc; /* more /proc info supported */ 931 u8 more_proc; /* more /proc info supported */
932 ushort cmd_cnt; /* command count in DPRAM */ 932 u16 cmd_cnt; /* command count in DPRAM */
933 ushort cmd_len; /* length of actual command */ 933 u16 cmd_len; /* length of actual command */
934 ushort cmd_offs_dpmem; /* actual offset in DPRAM */ 934 u16 cmd_offs_dpmem; /* actual offset in DPRAM */
935 ushort ic_all_size; /* sizeof DPRAM interf. area */ 935 u16 ic_all_size; /* sizeof DPRAM interf. area */
936 gdth_cpar_str cpar; /* controller cache par. */ 936 gdth_cpar_str cpar; /* controller cache par. */
937 gdth_bfeat_str bfeat; /* controller features */ 937 gdth_bfeat_str bfeat; /* controller features */
938 gdth_binfo_str binfo; /* controller info */ 938 gdth_binfo_str binfo; /* controller info */
@@ -941,7 +941,7 @@ typedef struct {
941 struct pci_dev *pdev; 941 struct pci_dev *pdev;
942 char oem_name[8]; 942 char oem_name[8];
943#ifdef GDTH_DMA_STATISTICS 943#ifdef GDTH_DMA_STATISTICS
944 ulong dma32_cnt, dma64_cnt; /* statistics: DMA buffer */ 944 unsigned long dma32_cnt, dma64_cnt; /* statistics: DMA buffer */
945#endif 945#endif
946 struct scsi_device *sdev; 946 struct scsi_device *sdev;
947} gdth_ha_str; 947} gdth_ha_str;
@@ -953,65 +953,65 @@ static inline struct gdth_cmndinfo *gdth_cmnd_priv(struct scsi_cmnd* cmd)
953 953
954/* INQUIRY data format */ 954/* INQUIRY data format */
955typedef struct { 955typedef struct {
956 unchar type_qual; 956 u8 type_qual;
957 unchar modif_rmb; 957 u8 modif_rmb;
958 unchar version; 958 u8 version;
959 unchar resp_aenc; 959 u8 resp_aenc;
960 unchar add_length; 960 u8 add_length;
961 unchar reserved1; 961 u8 reserved1;
962 unchar reserved2; 962 u8 reserved2;
963 unchar misc; 963 u8 misc;
964 unchar vendor[8]; 964 u8 vendor[8];
965 unchar product[16]; 965 u8 product[16];
966 unchar revision[4]; 966 u8 revision[4];
967} PACKED gdth_inq_data; 967} __attribute__((packed)) gdth_inq_data;
968 968
969/* READ_CAPACITY data format */ 969/* READ_CAPACITY data format */
970typedef struct { 970typedef struct {
971 ulong32 last_block_no; 971 u32 last_block_no;
972 ulong32 block_length; 972 u32 block_length;
973} PACKED gdth_rdcap_data; 973} __attribute__((packed)) gdth_rdcap_data;
974 974
975/* READ_CAPACITY (16) data format */ 975/* READ_CAPACITY (16) data format */
976typedef struct { 976typedef struct {
977 ulong64 last_block_no; 977 u64 last_block_no;
978 ulong32 block_length; 978 u32 block_length;
979} PACKED gdth_rdcap16_data; 979} __attribute__((packed)) gdth_rdcap16_data;
980 980
981/* REQUEST_SENSE data format */ 981/* REQUEST_SENSE data format */
982typedef struct { 982typedef struct {
983 unchar errorcode; 983 u8 errorcode;
984 unchar segno; 984 u8 segno;
985 unchar key; 985 u8 key;
986 ulong32 info; 986 u32 info;
987 unchar add_length; 987 u8 add_length;
988 ulong32 cmd_info; 988 u32 cmd_info;
989 unchar adsc; 989 u8 adsc;
990 unchar adsq; 990 u8 adsq;
991 unchar fruc; 991 u8 fruc;
992 unchar key_spec[3]; 992 u8 key_spec[3];
993} PACKED gdth_sense_data; 993} __attribute__((packed)) gdth_sense_data;
994 994
995/* MODE_SENSE data format */ 995/* MODE_SENSE data format */
996typedef struct { 996typedef struct {
997 struct { 997 struct {
998 unchar data_length; 998 u8 data_length;
999 unchar med_type; 999 u8 med_type;
1000 unchar dev_par; 1000 u8 dev_par;
1001 unchar bd_length; 1001 u8 bd_length;
1002 } PACKED hd; 1002 } __attribute__((packed)) hd;
1003 struct { 1003 struct {
1004 unchar dens_code; 1004 u8 dens_code;
1005 unchar block_count[3]; 1005 u8 block_count[3];
1006 unchar reserved; 1006 u8 reserved;
1007 unchar block_length[3]; 1007 u8 block_length[3];
1008 } PACKED bd; 1008 } __attribute__((packed)) bd;
1009} PACKED gdth_modep_data; 1009} __attribute__((packed)) gdth_modep_data;
1010 1010
1011/* stack frame */ 1011/* stack frame */
1012typedef struct { 1012typedef struct {
1013 ulong b[10]; /* 32/64 bit compiler ! */ 1013 unsigned long b[10]; /* 32/64 bit compiler ! */
1014} PACKED gdth_stackframe; 1014} __attribute__((packed)) gdth_stackframe;
1015 1015
1016 1016
1017/* function prototyping */ 1017/* function prototyping */
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
index 783fae737f17..b004c6165887 100644
--- a/drivers/scsi/gdth_ioctl.h
+++ b/drivers/scsi/gdth_ioctl.h
@@ -32,109 +32,101 @@
32#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */ 32#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */
33#endif 33#endif
34 34
35/* typedefs */
36#ifdef __KERNEL__
37typedef u32 ulong32;
38typedef u64 ulong64;
39#endif
40
41#define PACKED __attribute__((packed))
42
43/* scatter/gather element */ 35/* scatter/gather element */
44typedef struct { 36typedef struct {
45 ulong32 sg_ptr; /* address */ 37 u32 sg_ptr; /* address */
46 ulong32 sg_len; /* length */ 38 u32 sg_len; /* length */
47} PACKED gdth_sg_str; 39} __attribute__((packed)) gdth_sg_str;
48 40
49/* scatter/gather element - 64bit addresses */ 41/* scatter/gather element - 64bit addresses */
50typedef struct { 42typedef struct {
51 ulong64 sg_ptr; /* address */ 43 u64 sg_ptr; /* address */
52 ulong32 sg_len; /* length */ 44 u32 sg_len; /* length */
53} PACKED gdth_sg64_str; 45} __attribute__((packed)) gdth_sg64_str;
54 46
55/* command structure */ 47/* command structure */
56typedef struct { 48typedef struct {
57 ulong32 BoardNode; /* board node (always 0) */ 49 u32 BoardNode; /* board node (always 0) */
58 ulong32 CommandIndex; /* command number */ 50 u32 CommandIndex; /* command number */
59 ushort OpCode; /* the command (READ,..) */ 51 u16 OpCode; /* the command (READ,..) */
60 union { 52 union {
61 struct { 53 struct {
62 ushort DeviceNo; /* number of cache drive */ 54 u16 DeviceNo; /* number of cache drive */
63 ulong32 BlockNo; /* block number */ 55 u32 BlockNo; /* block number */
64 ulong32 BlockCnt; /* block count */ 56 u32 BlockCnt; /* block count */
65 ulong32 DestAddr; /* dest. addr. (if s/g: -1) */ 57 u32 DestAddr; /* dest. addr. (if s/g: -1) */
66 ulong32 sg_canz; /* s/g element count */ 58 u32 sg_canz; /* s/g element count */
67 gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */ 59 gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
68 } PACKED cache; /* cache service cmd. str. */ 60 } __attribute__((packed)) cache; /* cache service cmd. str. */
69 struct { 61 struct {
70 ushort DeviceNo; /* number of cache drive */ 62 u16 DeviceNo; /* number of cache drive */
71 ulong64 BlockNo; /* block number */ 63 u64 BlockNo; /* block number */
72 ulong32 BlockCnt; /* block count */ 64 u32 BlockCnt; /* block count */
73 ulong64 DestAddr; /* dest. addr. (if s/g: -1) */ 65 u64 DestAddr; /* dest. addr. (if s/g: -1) */
74 ulong32 sg_canz; /* s/g element count */ 66 u32 sg_canz; /* s/g element count */
75 gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */ 67 gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
76 } PACKED cache64; /* cache service cmd. str. */ 68 } __attribute__((packed)) cache64; /* cache service cmd. str. */
77 struct { 69 struct {
78 ushort param_size; /* size of p_param buffer */ 70 u16 param_size; /* size of p_param buffer */
79 ulong32 subfunc; /* IOCTL function */ 71 u32 subfunc; /* IOCTL function */
80 ulong32 channel; /* device */ 72 u32 channel; /* device */
81 ulong64 p_param; /* buffer */ 73 u64 p_param; /* buffer */
82 } PACKED ioctl; /* IOCTL command structure */ 74 } __attribute__((packed)) ioctl; /* IOCTL command structure */
83 struct { 75 struct {
84 ushort reserved; 76 u16 reserved;
85 union { 77 union {
86 struct { 78 struct {
87 ulong32 msg_handle; /* message handle */ 79 u32 msg_handle; /* message handle */
88 ulong64 msg_addr; /* message buffer address */ 80 u64 msg_addr; /* message buffer address */
89 } PACKED msg; 81 } __attribute__((packed)) msg;
90 unchar data[12]; /* buffer for rtc data, ... */ 82 u8 data[12]; /* buffer for rtc data, ... */
91 } su; 83 } su;
92 } PACKED screen; /* screen service cmd. str. */ 84 } __attribute__((packed)) screen; /* screen service cmd. str. */
93 struct { 85 struct {
94 ushort reserved; 86 u16 reserved;
95 ulong32 direction; /* data direction */ 87 u32 direction; /* data direction */
96 ulong32 mdisc_time; /* disc. time (0: no timeout)*/ 88 u32 mdisc_time; /* disc. time (0: no timeout)*/
97 ulong32 mcon_time; /* connect time(0: no to.) */ 89 u32 mcon_time; /* connect time(0: no to.) */
98 ulong32 sdata; /* dest. addr. (if s/g: -1) */ 90 u32 sdata; /* dest. addr. (if s/g: -1) */
99 ulong32 sdlen; /* data length (bytes) */ 91 u32 sdlen; /* data length (bytes) */
100 ulong32 clen; /* SCSI cmd. length(6,10,12) */ 92 u32 clen; /* SCSI cmd. length(6,10,12) */
101 unchar cmd[12]; /* SCSI command */ 93 u8 cmd[12]; /* SCSI command */
102 unchar target; /* target ID */ 94 u8 target; /* target ID */
103 unchar lun; /* LUN */ 95 u8 lun; /* LUN */
104 unchar bus; /* SCSI bus number */ 96 u8 bus; /* SCSI bus number */
105 unchar priority; /* only 0 used */ 97 u8 priority; /* only 0 used */
106 ulong32 sense_len; /* sense data length */ 98 u32 sense_len; /* sense data length */
107 ulong32 sense_data; /* sense data addr. */ 99 u32 sense_data; /* sense data addr. */
108 ulong32 link_p; /* linked cmds (not supp.) */ 100 u32 link_p; /* linked cmds (not supp.) */
109 ulong32 sg_ranz; /* s/g element count */ 101 u32 sg_ranz; /* s/g element count */
110 gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */ 102 gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
111 } PACKED raw; /* raw service cmd. struct. */ 103 } __attribute__((packed)) raw; /* raw service cmd. struct. */
112 struct { 104 struct {
113 ushort reserved; 105 u16 reserved;
114 ulong32 direction; /* data direction */ 106 u32 direction; /* data direction */
115 ulong32 mdisc_time; /* disc. time (0: no timeout)*/ 107 u32 mdisc_time; /* disc. time (0: no timeout)*/
116 ulong32 mcon_time; /* connect time(0: no to.) */ 108 u32 mcon_time; /* connect time(0: no to.) */
117 ulong64 sdata; /* dest. addr. (if s/g: -1) */ 109 u64 sdata; /* dest. addr. (if s/g: -1) */
118 ulong32 sdlen; /* data length (bytes) */ 110 u32 sdlen; /* data length (bytes) */
119 ulong32 clen; /* SCSI cmd. length(6,..,16) */ 111 u32 clen; /* SCSI cmd. length(6,..,16) */
120 unchar cmd[16]; /* SCSI command */ 112 u8 cmd[16]; /* SCSI command */
121 unchar target; /* target ID */ 113 u8 target; /* target ID */
122 unchar lun; /* LUN */ 114 u8 lun; /* LUN */
123 unchar bus; /* SCSI bus number */ 115 u8 bus; /* SCSI bus number */
124 unchar priority; /* only 0 used */ 116 u8 priority; /* only 0 used */
125 ulong32 sense_len; /* sense data length */ 117 u32 sense_len; /* sense data length */
126 ulong64 sense_data; /* sense data addr. */ 118 u64 sense_data; /* sense data addr. */
127 ulong32 sg_ranz; /* s/g element count */ 119 u32 sg_ranz; /* s/g element count */
128 gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */ 120 gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
129 } PACKED raw64; /* raw service cmd. struct. */ 121 } __attribute__((packed)) raw64; /* raw service cmd. struct. */
130 } u; 122 } u;
131 /* additional variables */ 123 /* additional variables */
132 unchar Service; /* controller service */ 124 u8 Service; /* controller service */
133 unchar reserved; 125 u8 reserved;
134 ushort Status; /* command result */ 126 u16 Status; /* command result */
135 ulong32 Info; /* additional information */ 127 u32 Info; /* additional information */
136 void *RequestBuffer; /* request buffer */ 128 void *RequestBuffer; /* request buffer */
137} PACKED gdth_cmd_str; 129} __attribute__((packed)) gdth_cmd_str;
138 130
139/* controller event structure */ 131/* controller event structure */
140#define ES_ASYNC 1 132#define ES_ASYNC 1
@@ -142,129 +134,129 @@ typedef struct {
142#define ES_TEST 3 134#define ES_TEST 3
143#define ES_SYNC 4 135#define ES_SYNC 4
144typedef struct { 136typedef struct {
145 ushort size; /* size of structure */ 137 u16 size; /* size of structure */
146 union { 138 union {
147 char stream[16]; 139 char stream[16];
148 struct { 140 struct {
149 ushort ionode; 141 u16 ionode;
150 ushort service; 142 u16 service;
151 ulong32 index; 143 u32 index;
152 } PACKED driver; 144 } __attribute__((packed)) driver;
153 struct { 145 struct {
154 ushort ionode; 146 u16 ionode;
155 ushort service; 147 u16 service;
156 ushort status; 148 u16 status;
157 ulong32 info; 149 u32 info;
158 unchar scsi_coord[3]; 150 u8 scsi_coord[3];
159 } PACKED async; 151 } __attribute__((packed)) async;
160 struct { 152 struct {
161 ushort ionode; 153 u16 ionode;
162 ushort service; 154 u16 service;
163 ushort status; 155 u16 status;
164 ulong32 info; 156 u32 info;
165 ushort hostdrive; 157 u16 hostdrive;
166 unchar scsi_coord[3]; 158 u8 scsi_coord[3];
167 unchar sense_key; 159 u8 sense_key;
168 } PACKED sync; 160 } __attribute__((packed)) sync;
169 struct { 161 struct {
170 ulong32 l1, l2, l3, l4; 162 u32 l1, l2, l3, l4;
171 } PACKED test; 163 } __attribute__((packed)) test;
172 } eu; 164 } eu;
173 ulong32 severity; 165 u32 severity;
174 unchar event_string[256]; 166 u8 event_string[256];
175} PACKED gdth_evt_data; 167} __attribute__((packed)) gdth_evt_data;
176 168
177typedef struct { 169typedef struct {
178 ulong32 first_stamp; 170 u32 first_stamp;
179 ulong32 last_stamp; 171 u32 last_stamp;
180 ushort same_count; 172 u16 same_count;
181 ushort event_source; 173 u16 event_source;
182 ushort event_idx; 174 u16 event_idx;
183 unchar application; 175 u8 application;
184 unchar reserved; 176 u8 reserved;
185 gdth_evt_data event_data; 177 gdth_evt_data event_data;
186} PACKED gdth_evt_str; 178} __attribute__((packed)) gdth_evt_str;
187 179
188 180
189#ifdef GDTH_IOCTL_PROC 181#ifdef GDTH_IOCTL_PROC
190/* IOCTL structure (write) */ 182/* IOCTL structure (write) */
191typedef struct { 183typedef struct {
192 ulong32 magic; /* IOCTL magic */ 184 u32 magic; /* IOCTL magic */
193 ushort ioctl; /* IOCTL */ 185 u16 ioctl; /* IOCTL */
194 ushort ionode; /* controller number */ 186 u16 ionode; /* controller number */
195 ushort service; /* controller service */ 187 u16 service; /* controller service */
196 ushort timeout; /* timeout */ 188 u16 timeout; /* timeout */
197 union { 189 union {
198 struct { 190 struct {
199 unchar command[512]; /* controller command */ 191 u8 command[512]; /* controller command */
200 unchar data[1]; /* add. data */ 192 u8 data[1]; /* add. data */
201 } general; 193 } general;
202 struct { 194 struct {
203 unchar lock; /* lock/unlock */ 195 u8 lock; /* lock/unlock */
204 unchar drive_cnt; /* drive count */ 196 u8 drive_cnt; /* drive count */
205 ushort drives[MAX_HDRIVES];/* drives */ 197 u16 drives[MAX_HDRIVES];/* drives */
206 } lockdrv; 198 } lockdrv;
207 struct { 199 struct {
208 unchar lock; /* lock/unlock */ 200 u8 lock; /* lock/unlock */
209 unchar channel; /* channel */ 201 u8 channel; /* channel */
210 } lockchn; 202 } lockchn;
211 struct { 203 struct {
212 int erase; /* erase event ? */ 204 int erase; /* erase event ? */
213 int handle; 205 int handle;
214 unchar evt[EVENT_SIZE]; /* event structure */ 206 u8 evt[EVENT_SIZE]; /* event structure */
215 } event; 207 } event;
216 struct { 208 struct {
217 unchar bus; /* SCSI bus */ 209 u8 bus; /* SCSI bus */
218 unchar target; /* target ID */ 210 u8 target; /* target ID */
219 unchar lun; /* LUN */ 211 u8 lun; /* LUN */
220 unchar cmd_len; /* command length */ 212 u8 cmd_len; /* command length */
221 unchar cmd[12]; /* SCSI command */ 213 u8 cmd[12]; /* SCSI command */
222 } scsi; 214 } scsi;
223 struct { 215 struct {
224 ushort hdr_no; /* host drive number */ 216 u16 hdr_no; /* host drive number */
225 unchar flag; /* old meth./add/remove */ 217 u8 flag; /* old meth./add/remove */
226 } rescan; 218 } rescan;
227 } iu; 219 } iu;
228} gdth_iowr_str; 220} gdth_iowr_str;
229 221
230/* IOCTL structure (read) */ 222/* IOCTL structure (read) */
231typedef struct { 223typedef struct {
232 ulong32 size; /* buffer size */ 224 u32 size; /* buffer size */
233 ulong32 status; /* IOCTL error code */ 225 u32 status; /* IOCTL error code */
234 union { 226 union {
235 struct { 227 struct {
236 unchar data[1]; /* data */ 228 u8 data[1]; /* data */
237 } general; 229 } general;
238 struct { 230 struct {
239 ushort version; /* driver version */ 231 u16 version; /* driver version */
240 } drvers; 232 } drvers;
241 struct { 233 struct {
242 unchar type; /* controller type */ 234 u8 type; /* controller type */
243 ushort info; /* slot etc. */ 235 u16 info; /* slot etc. */
244 ushort oem_id; /* OEM ID */ 236 u16 oem_id; /* OEM ID */
245 ushort bios_ver; /* not used */ 237 u16 bios_ver; /* not used */
246 ushort access; /* not used */ 238 u16 access; /* not used */
247 ushort ext_type; /* extended type */ 239 u16 ext_type; /* extended type */
248 ushort device_id; /* device ID */ 240 u16 device_id; /* device ID */
249 ushort sub_device_id; /* sub device ID */ 241 u16 sub_device_id; /* sub device ID */
250 } ctrtype; 242 } ctrtype;
251 struct { 243 struct {
252 unchar version; /* OS version */ 244 u8 version; /* OS version */
253 unchar subversion; /* OS subversion */ 245 u8 subversion; /* OS subversion */
254 ushort revision; /* revision */ 246 u16 revision; /* revision */
255 } osvers; 247 } osvers;
256 struct { 248 struct {
257 ushort count; /* controller count */ 249 u16 count; /* controller count */
258 } ctrcnt; 250 } ctrcnt;
259 struct { 251 struct {
260 int handle; 252 int handle;
261 unchar evt[EVENT_SIZE]; /* event structure */ 253 u8 evt[EVENT_SIZE]; /* event structure */
262 } event; 254 } event;
263 struct { 255 struct {
264 unchar bus; /* SCSI bus, 0xff: invalid */ 256 u8 bus; /* SCSI bus, 0xff: invalid */
265 unchar target; /* target ID */ 257 u8 target; /* target ID */
266 unchar lun; /* LUN */ 258 u8 lun; /* LUN */
267 unchar cluster_type; /* cluster properties */ 259 u8 cluster_type; /* cluster properties */
268 } hdr_list[MAX_HDRIVES]; /* index is host drive number */ 260 } hdr_list[MAX_HDRIVES]; /* index is host drive number */
269 } iu; 261 } iu;
270} gdth_iord_str; 262} gdth_iord_str;
@@ -272,53 +264,53 @@ typedef struct {
272 264
273/* GDTIOCTL_GENERAL */ 265/* GDTIOCTL_GENERAL */
274typedef struct { 266typedef struct {
275 ushort ionode; /* controller number */ 267 u16 ionode; /* controller number */
276 ushort timeout; /* timeout */ 268 u16 timeout; /* timeout */
277 ulong32 info; /* error info */ 269 u32 info; /* error info */
278 ushort status; /* status */ 270 u16 status; /* status */
279 ulong data_len; /* data buffer size */ 271 unsigned long data_len; /* data buffer size */
280 ulong sense_len; /* sense buffer size */ 272 unsigned long sense_len; /* sense buffer size */
281 gdth_cmd_str command; /* command */ 273 gdth_cmd_str command; /* command */
282} gdth_ioctl_general; 274} gdth_ioctl_general;
283 275
284/* GDTIOCTL_LOCKDRV */ 276/* GDTIOCTL_LOCKDRV */
285typedef struct { 277typedef struct {
286 ushort ionode; /* controller number */ 278 u16 ionode; /* controller number */
287 unchar lock; /* lock/unlock */ 279 u8 lock; /* lock/unlock */
288 unchar drive_cnt; /* drive count */ 280 u8 drive_cnt; /* drive count */
289 ushort drives[MAX_HDRIVES]; /* drives */ 281 u16 drives[MAX_HDRIVES]; /* drives */
290} gdth_ioctl_lockdrv; 282} gdth_ioctl_lockdrv;
291 283
292/* GDTIOCTL_LOCKCHN */ 284/* GDTIOCTL_LOCKCHN */
293typedef struct { 285typedef struct {
294 ushort ionode; /* controller number */ 286 u16 ionode; /* controller number */
295 unchar lock; /* lock/unlock */ 287 u8 lock; /* lock/unlock */
296 unchar channel; /* channel */ 288 u8 channel; /* channel */
297} gdth_ioctl_lockchn; 289} gdth_ioctl_lockchn;
298 290
299/* GDTIOCTL_OSVERS */ 291/* GDTIOCTL_OSVERS */
300typedef struct { 292typedef struct {
301 unchar version; /* OS version */ 293 u8 version; /* OS version */
302 unchar subversion; /* OS subversion */ 294 u8 subversion; /* OS subversion */
303 ushort revision; /* revision */ 295 u16 revision; /* revision */
304} gdth_ioctl_osvers; 296} gdth_ioctl_osvers;
305 297
306/* GDTIOCTL_CTRTYPE */ 298/* GDTIOCTL_CTRTYPE */
307typedef struct { 299typedef struct {
308 ushort ionode; /* controller number */ 300 u16 ionode; /* controller number */
309 unchar type; /* controller type */ 301 u8 type; /* controller type */
310 ushort info; /* slot etc. */ 302 u16 info; /* slot etc. */
311 ushort oem_id; /* OEM ID */ 303 u16 oem_id; /* OEM ID */
312 ushort bios_ver; /* not used */ 304 u16 bios_ver; /* not used */
313 ushort access; /* not used */ 305 u16 access; /* not used */
314 ushort ext_type; /* extended type */ 306 u16 ext_type; /* extended type */
315 ushort device_id; /* device ID */ 307 u16 device_id; /* device ID */
316 ushort sub_device_id; /* sub device ID */ 308 u16 sub_device_id; /* sub device ID */
317} gdth_ioctl_ctrtype; 309} gdth_ioctl_ctrtype;
318 310
319/* GDTIOCTL_EVENT */ 311/* GDTIOCTL_EVENT */
320typedef struct { 312typedef struct {
321 ushort ionode; 313 u16 ionode;
322 int erase; /* erase event? */ 314 int erase; /* erase event? */
323 int handle; /* event handle */ 315 int handle; /* event handle */
324 gdth_evt_str event; 316 gdth_evt_str event;
@@ -326,22 +318,22 @@ typedef struct {
326 318
327/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */ 319/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */
328typedef struct { 320typedef struct {
329 ushort ionode; /* controller number */ 321 u16 ionode; /* controller number */
330 unchar flag; /* add/remove */ 322 u8 flag; /* add/remove */
331 ushort hdr_no; /* drive no. */ 323 u16 hdr_no; /* drive no. */
332 struct { 324 struct {
333 unchar bus; /* SCSI bus */ 325 u8 bus; /* SCSI bus */
334 unchar target; /* target ID */ 326 u8 target; /* target ID */
335 unchar lun; /* LUN */ 327 u8 lun; /* LUN */
336 unchar cluster_type; /* cluster properties */ 328 u8 cluster_type; /* cluster properties */
337 } hdr_list[MAX_HDRIVES]; /* index is host drive number */ 329 } hdr_list[MAX_HDRIVES]; /* index is host drive number */
338} gdth_ioctl_rescan; 330} gdth_ioctl_rescan;
339 331
340/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */ 332/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */
341typedef struct { 333typedef struct {
342 ushort ionode; /* controller number */ 334 u16 ionode; /* controller number */
343 ushort number; /* bus/host drive number */ 335 u16 number; /* bus/host drive number */
344 ushort status; /* status */ 336 u16 status; /* status */
345} gdth_ioctl_reset; 337} gdth_ioctl_reset;
346 338
347#endif 339#endif
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 1258da34fbc2..0572b9bf4bd6 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -3,6 +3,7 @@
3 */ 3 */
4 4
5#include <linux/completion.h> 5#include <linux/completion.h>
6#include <linux/slab.h>
6 7
7int gdth_proc_info(struct Scsi_Host *host, char *buffer,char **start,off_t offset,int length, 8int gdth_proc_info(struct Scsi_Host *host, char *buffer,char **start,off_t offset,int length,
8 int inout) 9 int inout)
@@ -43,7 +44,7 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
43 int i, found; 44 int i, found;
44 gdth_cmd_str gdtcmd; 45 gdth_cmd_str gdtcmd;
45 gdth_cpar_str *pcpar; 46 gdth_cpar_str *pcpar;
46 ulong64 paddr; 47 u64 paddr;
47 48
48 char cmnd[MAX_COMMAND_SIZE]; 49 char cmnd[MAX_COMMAND_SIZE];
49 memset(cmnd, 0xff, 12); 50 memset(cmnd, 0xff, 12);
@@ -156,8 +157,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
156 off_t begin = 0,pos = 0; 157 off_t begin = 0,pos = 0;
157 int id, i, j, k, sec, flag; 158 int id, i, j, k, sec, flag;
158 int no_mdrv = 0, drv_no, is_mirr; 159 int no_mdrv = 0, drv_no, is_mirr;
159 ulong32 cnt; 160 u32 cnt;
160 ulong64 paddr; 161 u64 paddr;
161 int rc = -ENOMEM; 162 int rc = -ENOMEM;
162 163
163 gdth_cmd_str *gdtcmd; 164 gdth_cmd_str *gdtcmd;
@@ -220,14 +221,14 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
220 221
221 if (ha->more_proc) 222 if (ha->more_proc)
222 sprintf(hrec, "%d.%02d.%02d-%c%03X", 223 sprintf(hrec, "%d.%02d.%02d-%c%03X",
223 (unchar)(ha->binfo.upd_fw_ver>>24), 224 (u8)(ha->binfo.upd_fw_ver>>24),
224 (unchar)(ha->binfo.upd_fw_ver>>16), 225 (u8)(ha->binfo.upd_fw_ver>>16),
225 (unchar)(ha->binfo.upd_fw_ver), 226 (u8)(ha->binfo.upd_fw_ver),
226 ha->bfeat.raid ? 'R':'N', 227 ha->bfeat.raid ? 'R':'N',
227 ha->binfo.upd_revision); 228 ha->binfo.upd_revision);
228 else 229 else
229 sprintf(hrec, "%d.%02d", (unchar)(ha->cpar.version>>8), 230 sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8),
230 (unchar)(ha->cpar.version)); 231 (u8)(ha->cpar.version));
231 232
232 size = sprintf(buffer+len, 233 size = sprintf(buffer+len,
233 " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n", 234 " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n",
@@ -281,7 +282,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
281 pds->bid = ha->raw[i].local_no; 282 pds->bid = ha->raw[i].local_no;
282 pds->first = 0; 283 pds->first = 0;
283 pds->entries = ha->raw[i].pdev_cnt; 284 pds->entries = ha->raw[i].pdev_cnt;
284 cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(ulong32)) / 285 cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) /
285 sizeof(pds->list[0]); 286 sizeof(pds->list[0]);
286 if (pds->entries > cnt) 287 if (pds->entries > cnt)
287 pds->entries = cnt; 288 pds->entries = cnt;
@@ -604,7 +605,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
604 605
605 size = sprintf(buffer+len, 606 size = sprintf(buffer+len,
606 " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n", 607 " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n",
607 (ulong32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec); 608 (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
608 len += size; pos = begin + len; 609 len += size; pos = begin + len;
609 if (pos < offset) { 610 if (pos < offset) {
610 len = 0; 611 len = 0;
@@ -664,9 +665,9 @@ free_fail:
664} 665}
665 666
666static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, 667static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
667 ulong64 *paddr) 668 u64 *paddr)
668{ 669{
669 ulong flags; 670 unsigned long flags;
670 char *ret_val; 671 char *ret_val;
671 672
672 if (size == 0) 673 if (size == 0)
@@ -691,9 +692,9 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
691 return ret_val; 692 return ret_val;
692} 693}
693 694
694static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr) 695static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr)
695{ 696{
696 ulong flags; 697 unsigned long flags;
697 698
698 if (buf == ha->pscratch) { 699 if (buf == ha->pscratch) {
699 spin_lock_irqsave(&ha->smp_lock, flags); 700 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -705,16 +706,16 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
705} 706}
706 707
707#ifdef GDTH_IOCTL_PROC 708#ifdef GDTH_IOCTL_PROC
708static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size) 709static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size)
709{ 710{
710 ulong flags; 711 unsigned long flags;
711 int ret_val; 712 int ret_val;
712 713
713 spin_lock_irqsave(&ha->smp_lock, flags); 714 spin_lock_irqsave(&ha->smp_lock, flags);
714 715
715 ret_val = FALSE; 716 ret_val = FALSE;
716 if (ha->scratch_busy) { 717 if (ha->scratch_busy) {
717 if (((gdth_iord_str *)ha->pscratch)->size == (ulong32)size) 718 if (((gdth_iord_str *)ha->pscratch)->size == (u32)size)
718 ret_val = TRUE; 719 ret_val = TRUE;
719 } 720 }
720 spin_unlock_irqrestore(&ha->smp_lock, flags); 721 spin_unlock_irqrestore(&ha->smp_lock, flags);
@@ -724,11 +725,11 @@ static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size)
724 725
725static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) 726static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
726{ 727{
727 ulong flags; 728 unsigned long flags;
728 int i; 729 int i;
729 Scsi_Cmnd *scp; 730 Scsi_Cmnd *scp;
730 struct gdth_cmndinfo *cmndinfo; 731 struct gdth_cmndinfo *cmndinfo;
731 unchar b, t; 732 u8 b, t;
732 733
733 spin_lock_irqsave(&ha->smp_lock, flags); 734 spin_lock_irqsave(&ha->smp_lock, flags);
734 735
@@ -738,8 +739,8 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
738 739
739 b = scp->device->channel; 740 b = scp->device->channel;
740 t = scp->device->id; 741 t = scp->device->id;
741 if (!SPECIAL_SCP(scp) && t == (unchar)id && 742 if (!SPECIAL_SCP(scp) && t == (u8)id &&
742 b == (unchar)busnum) { 743 b == (u8)busnum) {
743 cmndinfo->wait_for_completion = 0; 744 cmndinfo->wait_for_completion = 0;
744 spin_unlock_irqrestore(&ha->smp_lock, flags); 745 spin_unlock_irqrestore(&ha->smp_lock, flags);
745 while (!cmndinfo->wait_for_completion) 746 while (!cmndinfo->wait_for_completion)
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 9b900cc9ebe8..dab15f59f2cc 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -17,8 +17,8 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
17 int length, gdth_ha_str *ha); 17 int length, gdth_ha_str *ha);
18 18
19static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, 19static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
20 ulong64 *paddr); 20 u64 *paddr);
21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); 21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr);
22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); 22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
23 23
24#endif 24#endif
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 5d1bf7e3d245..48f406850c65 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -1,5 +1,6 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h> 2#include <linux/mm.h>
3#include <linux/slab.h>
3#include <linux/blkdev.h> 4#include <linux/blkdev.h>
4#include <linux/init.h> 5#include <linux/init.h>
5#include <linux/interrupt.h> 6#include <linux/interrupt.h>
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c968cc31cd86..6660fa92ffa1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/slab.h>
27#include <linux/kthread.h> 28#include <linux/kthread.h>
28#include <linux/string.h> 29#include <linux/string.h>
29#include <linux/mm.h> 30#include <linux/mm.h>
@@ -180,14 +181,20 @@ void scsi_remove_host(struct Scsi_Host *shost)
180EXPORT_SYMBOL(scsi_remove_host); 181EXPORT_SYMBOL(scsi_remove_host);
181 182
182/** 183/**
183 * scsi_add_host - add a scsi host 184 * scsi_add_host_with_dma - add a scsi host with dma device
184 * @shost: scsi host pointer to add 185 * @shost: scsi host pointer to add
185 * @dev: a struct device of type scsi class 186 * @dev: a struct device of type scsi class
187 * @dma_dev: dma device for the host
188 *
189 * Note: You rarely need to worry about this unless you're in a
190 * virtualised host environments, so use the simpler scsi_add_host()
191 * function instead.
186 * 192 *
187 * Return value: 193 * Return value:
188 * 0 on success / != 0 for error 194 * 0 on success / != 0 for error
189 **/ 195 **/
190int scsi_add_host(struct Scsi_Host *shost, struct device *dev) 196int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
197 struct device *dma_dev)
191{ 198{
192 struct scsi_host_template *sht = shost->hostt; 199 struct scsi_host_template *sht = shost->hostt;
193 int error = -EINVAL; 200 int error = -EINVAL;
@@ -207,6 +214,9 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
207 214
208 if (!shost->shost_gendev.parent) 215 if (!shost->shost_gendev.parent)
209 shost->shost_gendev.parent = dev ? dev : &platform_bus; 216 shost->shost_gendev.parent = dev ? dev : &platform_bus;
217 shost->dma_dev = dma_dev;
218
219 device_enable_async_suspend(&shost->shost_gendev);
210 220
211 error = device_add(&shost->shost_gendev); 221 error = device_add(&shost->shost_gendev);
212 if (error) 222 if (error)
@@ -215,6 +225,8 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
215 scsi_host_set_state(shost, SHOST_RUNNING); 225 scsi_host_set_state(shost, SHOST_RUNNING);
216 get_device(shost->shost_gendev.parent); 226 get_device(shost->shost_gendev.parent);
217 227
228 device_enable_async_suspend(&shost->shost_dev);
229
218 error = device_add(&shost->shost_dev); 230 error = device_add(&shost->shost_dev);
219 if (error) 231 if (error)
220 goto out_del_gendev; 232 goto out_del_gendev;
@@ -262,7 +274,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
262 fail: 274 fail:
263 return error; 275 return error;
264} 276}
265EXPORT_SYMBOL(scsi_add_host); 277EXPORT_SYMBOL(scsi_add_host_with_dma);
266 278
267static void scsi_host_dev_release(struct device *dev) 279static void scsi_host_dev_release(struct device *dev)
268{ 280{
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
new file mode 100644
index 000000000000..183d3a43c280
--- /dev/null
+++ b/drivers/scsi/hpsa.c
@@ -0,0 +1,3854 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/delay.h>
29#include <linux/fs.h>
30#include <linux/timer.h>
31#include <linux/seq_file.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp_lock.h>
35#include <linux/compat.h>
36#include <linux/blktrace_api.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/dma-mapping.h>
40#include <linux/completion.h>
41#include <linux/moduleparam.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_tcq.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <asm/atomic.h>
51#include <linux/kthread.h>
52#include "hpsa_cmd.h"
53#include "hpsa.h"
54
55/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
56#define HPSA_DRIVER_VERSION "2.0.2-1"
57#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
58
59/* How long to wait (in milliseconds) for board to go into simple mode */
60#define MAX_CONFIG_WAIT 30000
61#define MAX_IOCTL_CONFIG_WAIT 1000
62
63/*define how many times we will try a command because of bus resets */
64#define MAX_CMD_RETRIES 3
65
66/* Embedded module documentation macros - see modules.h */
67MODULE_AUTHOR("Hewlett-Packard Company");
68MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
69 HPSA_DRIVER_VERSION);
70MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
71MODULE_VERSION(HPSA_DRIVER_VERSION);
72MODULE_LICENSE("GPL");
73
74static int hpsa_allow_any;
75module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
76MODULE_PARM_DESC(hpsa_allow_any,
77 "Allow hpsa driver to access unknown HP Smart Array hardware");
78
79/* define the PCI info for the cards we can control */
80static const struct pci_device_id hpsa_pci_device_id[] = {
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
89#define PCI_DEVICE_ID_HP_CISSF 0x333f
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F},
91 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
92 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
93 {0,}
94};
95
96MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
97
98/* board_id = Subsystem Device ID & Vendor ID
99 * product = Marketing Name for the board
100 * access = Address of the struct of function pointers
101 */
102static struct board_type products[] = {
103 {0x3241103C, "Smart Array P212", &SA5_access},
104 {0x3243103C, "Smart Array P410", &SA5_access},
105 {0x3245103C, "Smart Array P410i", &SA5_access},
106 {0x3247103C, "Smart Array P411", &SA5_access},
107 {0x3249103C, "Smart Array P812", &SA5_access},
108 {0x324a103C, "Smart Array P712m", &SA5_access},
109 {0x324b103C, "Smart Array P711m", &SA5_access},
110 {0x3233103C, "StorageWorks P1210m", &SA5_access},
111 {0x333F103C, "StorageWorks P1210m", &SA5_access},
112 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
113};
114
115static int number_of_controllers;
116
117static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
118static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
119static void start_io(struct ctlr_info *h);
120
121#ifdef CONFIG_COMPAT
122static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
123#endif
124
125static void cmd_free(struct ctlr_info *h, struct CommandList *c);
126static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
127static struct CommandList *cmd_alloc(struct ctlr_info *h);
128static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
129static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
130 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
131 int cmd_type);
132
133static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
134 void (*done)(struct scsi_cmnd *));
135static void hpsa_scan_start(struct Scsi_Host *);
136static int hpsa_scan_finished(struct Scsi_Host *sh,
137 unsigned long elapsed_time);
138static int hpsa_change_queue_depth(struct scsi_device *sdev,
139 int qdepth, int reason);
140
141static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
142static int hpsa_slave_alloc(struct scsi_device *sdev);
143static void hpsa_slave_destroy(struct scsi_device *sdev);
144
145static ssize_t raid_level_show(struct device *dev,
146 struct device_attribute *attr, char *buf);
147static ssize_t lunid_show(struct device *dev,
148 struct device_attribute *attr, char *buf);
149static ssize_t unique_id_show(struct device *dev,
150 struct device_attribute *attr, char *buf);
151static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
152static ssize_t host_store_rescan(struct device *dev,
153 struct device_attribute *attr, const char *buf, size_t count);
154static int check_for_unit_attention(struct ctlr_info *h,
155 struct CommandList *c);
156static void check_ioctl_unit_attention(struct ctlr_info *h,
157 struct CommandList *c);
158/* performant mode helper functions */
159static void calc_bucket_map(int *bucket, int num_buckets,
160 int nsgs, int *bucket_map);
161static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
162static inline u32 next_command(struct ctlr_info *h);
163
164static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
165static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
166static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
167static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
168
169static struct device_attribute *hpsa_sdev_attrs[] = {
170 &dev_attr_raid_level,
171 &dev_attr_lunid,
172 &dev_attr_unique_id,
173 NULL,
174};
175
176static struct device_attribute *hpsa_shost_attrs[] = {
177 &dev_attr_rescan,
178 NULL,
179};
180
181static struct scsi_host_template hpsa_driver_template = {
182 .module = THIS_MODULE,
183 .name = "hpsa",
184 .proc_name = "hpsa",
185 .queuecommand = hpsa_scsi_queue_command,
186 .scan_start = hpsa_scan_start,
187 .scan_finished = hpsa_scan_finished,
188 .change_queue_depth = hpsa_change_queue_depth,
189 .this_id = -1,
190 .use_clustering = ENABLE_CLUSTERING,
191 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
192 .ioctl = hpsa_ioctl,
193 .slave_alloc = hpsa_slave_alloc,
194 .slave_destroy = hpsa_slave_destroy,
195#ifdef CONFIG_COMPAT
196 .compat_ioctl = hpsa_compat_ioctl,
197#endif
198 .sdev_attrs = hpsa_sdev_attrs,
199 .shost_attrs = hpsa_shost_attrs,
200};
201
202static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
203{
204 unsigned long *priv = shost_priv(sdev->host);
205 return (struct ctlr_info *) *priv;
206}
207
208static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
209{
210 unsigned long *priv = shost_priv(sh);
211 return (struct ctlr_info *) *priv;
212}
213
214static int check_for_unit_attention(struct ctlr_info *h,
215 struct CommandList *c)
216{
217 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
218 return 0;
219
220 switch (c->err_info->SenseInfo[12]) {
221 case STATE_CHANGED:
222 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
223 "detected, command retried\n", h->ctlr);
224 break;
225 case LUN_FAILED:
226 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
227 "detected, action required\n", h->ctlr);
228 break;
229 case REPORT_LUNS_CHANGED:
230 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
231 "changed, action required\n", h->ctlr);
232 /*
233 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
234 */
235 break;
236 case POWER_OR_RESET:
237 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
238 "or device reset detected\n", h->ctlr);
239 break;
240 case UNIT_ATTENTION_CLEARED:
241 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
242 "cleared by another initiator\n", h->ctlr);
243 break;
244 default:
245 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
246 "unit attention detected\n", h->ctlr);
247 break;
248 }
249 return 1;
250}
251
252static ssize_t host_store_rescan(struct device *dev,
253 struct device_attribute *attr,
254 const char *buf, size_t count)
255{
256 struct ctlr_info *h;
257 struct Scsi_Host *shost = class_to_shost(dev);
258 h = shost_to_hba(shost);
259 hpsa_scan_start(h->scsi_host);
260 return count;
261}
262
263/* Enqueuing and dequeuing functions for cmdlists. */
264static inline void addQ(struct hlist_head *list, struct CommandList *c)
265{
266 hlist_add_head(&c->list, list);
267}
268
269static inline u32 next_command(struct ctlr_info *h)
270{
271 u32 a;
272
273 if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
274 return h->access.command_completed(h);
275
276 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
277 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
278 (h->reply_pool_head)++;
279 h->commands_outstanding--;
280 } else {
281 a = FIFO_EMPTY;
282 }
283 /* Check for wraparound */
284 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
285 h->reply_pool_head = h->reply_pool;
286 h->reply_pool_wraparound ^= 1;
287 }
288 return a;
289}
290
291/* set_performant_mode: Modify the tag for cciss performant
292 * set bit 0 for pull model, bits 3-1 for block fetch
293 * register number
294 */
295static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
296{
297 if (likely(h->transMethod == CFGTBL_Trans_Performant))
298 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
299}
300
301static void enqueue_cmd_and_start_io(struct ctlr_info *h,
302 struct CommandList *c)
303{
304 unsigned long flags;
305
306 set_performant_mode(h, c);
307 spin_lock_irqsave(&h->lock, flags);
308 addQ(&h->reqQ, c);
309 h->Qdepth++;
310 start_io(h);
311 spin_unlock_irqrestore(&h->lock, flags);
312}
313
314static inline void removeQ(struct CommandList *c)
315{
316 if (WARN_ON(hlist_unhashed(&c->list)))
317 return;
318 hlist_del_init(&c->list);
319}
320
321static inline int is_hba_lunid(unsigned char scsi3addr[])
322{
323 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
324}
325
326static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
327{
328 return (scsi3addr[3] & 0xC0) == 0x40;
329}
330
331static inline int is_scsi_rev_5(struct ctlr_info *h)
332{
333 if (!h->hba_inquiry_data)
334 return 0;
335 if ((h->hba_inquiry_data[2] & 0x07) == 5)
336 return 1;
337 return 0;
338}
339
340static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
341 "UNKNOWN"
342};
343#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
344
345static ssize_t raid_level_show(struct device *dev,
346 struct device_attribute *attr, char *buf)
347{
348 ssize_t l = 0;
349 unsigned char rlevel;
350 struct ctlr_info *h;
351 struct scsi_device *sdev;
352 struct hpsa_scsi_dev_t *hdev;
353 unsigned long flags;
354
355 sdev = to_scsi_device(dev);
356 h = sdev_to_hba(sdev);
357 spin_lock_irqsave(&h->lock, flags);
358 hdev = sdev->hostdata;
359 if (!hdev) {
360 spin_unlock_irqrestore(&h->lock, flags);
361 return -ENODEV;
362 }
363
364 /* Is this even a logical drive? */
365 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
366 spin_unlock_irqrestore(&h->lock, flags);
367 l = snprintf(buf, PAGE_SIZE, "N/A\n");
368 return l;
369 }
370
371 rlevel = hdev->raid_level;
372 spin_unlock_irqrestore(&h->lock, flags);
373 if (rlevel > RAID_UNKNOWN)
374 rlevel = RAID_UNKNOWN;
375 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
376 return l;
377}
378
379static ssize_t lunid_show(struct device *dev,
380 struct device_attribute *attr, char *buf)
381{
382 struct ctlr_info *h;
383 struct scsi_device *sdev;
384 struct hpsa_scsi_dev_t *hdev;
385 unsigned long flags;
386 unsigned char lunid[8];
387
388 sdev = to_scsi_device(dev);
389 h = sdev_to_hba(sdev);
390 spin_lock_irqsave(&h->lock, flags);
391 hdev = sdev->hostdata;
392 if (!hdev) {
393 spin_unlock_irqrestore(&h->lock, flags);
394 return -ENODEV;
395 }
396 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
397 spin_unlock_irqrestore(&h->lock, flags);
398 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
399 lunid[0], lunid[1], lunid[2], lunid[3],
400 lunid[4], lunid[5], lunid[6], lunid[7]);
401}
402
403static ssize_t unique_id_show(struct device *dev,
404 struct device_attribute *attr, char *buf)
405{
406 struct ctlr_info *h;
407 struct scsi_device *sdev;
408 struct hpsa_scsi_dev_t *hdev;
409 unsigned long flags;
410 unsigned char sn[16];
411
412 sdev = to_scsi_device(dev);
413 h = sdev_to_hba(sdev);
414 spin_lock_irqsave(&h->lock, flags);
415 hdev = sdev->hostdata;
416 if (!hdev) {
417 spin_unlock_irqrestore(&h->lock, flags);
418 return -ENODEV;
419 }
420 memcpy(sn, hdev->device_id, sizeof(sn));
421 spin_unlock_irqrestore(&h->lock, flags);
422 return snprintf(buf, 16 * 2 + 2,
423 "%02X%02X%02X%02X%02X%02X%02X%02X"
424 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
425 sn[0], sn[1], sn[2], sn[3],
426 sn[4], sn[5], sn[6], sn[7],
427 sn[8], sn[9], sn[10], sn[11],
428 sn[12], sn[13], sn[14], sn[15]);
429}
430
431static int hpsa_find_target_lun(struct ctlr_info *h,
432 unsigned char scsi3addr[], int bus, int *target, int *lun)
433{
434 /* finds an unused bus, target, lun for a new physical device
435 * assumes h->devlock is held
436 */
437 int i, found = 0;
438 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
439
440 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
441
442 for (i = 0; i < h->ndevices; i++) {
443 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
444 set_bit(h->dev[i]->target, lun_taken);
445 }
446
447 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
448 if (!test_bit(i, lun_taken)) {
449 /* *bus = 1; */
450 *target = i;
451 *lun = 0;
452 found = 1;
453 break;
454 }
455 }
456 return !found;
457}
458
459/* Add an entry into h->dev[] array. */
460static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
461 struct hpsa_scsi_dev_t *device,
462 struct hpsa_scsi_dev_t *added[], int *nadded)
463{
464 /* assumes h->devlock is held */
465 int n = h->ndevices;
466 int i;
467 unsigned char addr1[8], addr2[8];
468 struct hpsa_scsi_dev_t *sd;
469
470 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
471 dev_err(&h->pdev->dev, "too many devices, some will be "
472 "inaccessible.\n");
473 return -1;
474 }
475
476 /* physical devices do not have lun or target assigned until now. */
477 if (device->lun != -1)
478 /* Logical device, lun is already assigned. */
479 goto lun_assigned;
480
481 /* If this device a non-zero lun of a multi-lun device
482 * byte 4 of the 8-byte LUN addr will contain the logical
483 * unit no, zero otherise.
484 */
485 if (device->scsi3addr[4] == 0) {
486 /* This is not a non-zero lun of a multi-lun device */
487 if (hpsa_find_target_lun(h, device->scsi3addr,
488 device->bus, &device->target, &device->lun) != 0)
489 return -1;
490 goto lun_assigned;
491 }
492
493 /* This is a non-zero lun of a multi-lun device.
494 * Search through our list and find the device which
495 * has the same 8 byte LUN address, excepting byte 4.
496 * Assign the same bus and target for this new LUN.
497 * Use the logical unit number from the firmware.
498 */
499 memcpy(addr1, device->scsi3addr, 8);
500 addr1[4] = 0;
501 for (i = 0; i < n; i++) {
502 sd = h->dev[i];
503 memcpy(addr2, sd->scsi3addr, 8);
504 addr2[4] = 0;
505 /* differ only in byte 4? */
506 if (memcmp(addr1, addr2, 8) == 0) {
507 device->bus = sd->bus;
508 device->target = sd->target;
509 device->lun = device->scsi3addr[4];
510 break;
511 }
512 }
513 if (device->lun == -1) {
514 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
515 " suspect firmware bug or unsupported hardware "
516 "configuration.\n");
517 return -1;
518 }
519
520lun_assigned:
521
522 h->dev[n] = device;
523 h->ndevices++;
524 added[*nadded] = device;
525 (*nadded)++;
526
527 /* initially, (before registering with scsi layer) we don't
528 * know our hostno and we don't want to print anything first
529 * time anyway (the scsi layer's inquiries will show that info)
530 */
531 /* if (hostno != -1) */
532 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
533 scsi_device_type(device->devtype), hostno,
534 device->bus, device->target, device->lun);
535 return 0;
536}
537
538/* Replace an entry from h->dev[] array. */
539static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
540 int entry, struct hpsa_scsi_dev_t *new_entry,
541 struct hpsa_scsi_dev_t *added[], int *nadded,
542 struct hpsa_scsi_dev_t *removed[], int *nremoved)
543{
544 /* assumes h->devlock is held */
545 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
546 removed[*nremoved] = h->dev[entry];
547 (*nremoved)++;
548 h->dev[entry] = new_entry;
549 added[*nadded] = new_entry;
550 (*nadded)++;
551 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
552 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
553 new_entry->target, new_entry->lun);
554}
555
556/* Remove an entry from h->dev[] array. */
557static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
558 struct hpsa_scsi_dev_t *removed[], int *nremoved)
559{
560 /* assumes h->devlock is held */
561 int i;
562 struct hpsa_scsi_dev_t *sd;
563
564 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
565
566 sd = h->dev[entry];
567 removed[*nremoved] = h->dev[entry];
568 (*nremoved)++;
569
570 for (i = entry; i < h->ndevices-1; i++)
571 h->dev[i] = h->dev[i+1];
572 h->ndevices--;
573 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
574 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
575 sd->lun);
576}
577
578#define SCSI3ADDR_EQ(a, b) ( \
579 (a)[7] == (b)[7] && \
580 (a)[6] == (b)[6] && \
581 (a)[5] == (b)[5] && \
582 (a)[4] == (b)[4] && \
583 (a)[3] == (b)[3] && \
584 (a)[2] == (b)[2] && \
585 (a)[1] == (b)[1] && \
586 (a)[0] == (b)[0])
587
588static void fixup_botched_add(struct ctlr_info *h,
589 struct hpsa_scsi_dev_t *added)
590{
591 /* called when scsi_add_device fails in order to re-adjust
592 * h->dev[] to match the mid layer's view.
593 */
594 unsigned long flags;
595 int i, j;
596
597 spin_lock_irqsave(&h->lock, flags);
598 for (i = 0; i < h->ndevices; i++) {
599 if (h->dev[i] == added) {
600 for (j = i; j < h->ndevices-1; j++)
601 h->dev[j] = h->dev[j+1];
602 h->ndevices--;
603 break;
604 }
605 }
606 spin_unlock_irqrestore(&h->lock, flags);
607 kfree(added);
608}
609
610static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
611 struct hpsa_scsi_dev_t *dev2)
612{
613 if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
614 (dev1->lun != -1 && dev2->lun != -1)) &&
615 dev1->devtype != 0x0C)
616 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
617
618 /* we compare everything except lun and target as these
619 * are not yet assigned. Compare parts likely
620 * to differ first
621 */
622 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
623 sizeof(dev1->scsi3addr)) != 0)
624 return 0;
625 if (memcmp(dev1->device_id, dev2->device_id,
626 sizeof(dev1->device_id)) != 0)
627 return 0;
628 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
629 return 0;
630 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
631 return 0;
632 if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
633 return 0;
634 if (dev1->devtype != dev2->devtype)
635 return 0;
636 if (dev1->raid_level != dev2->raid_level)
637 return 0;
638 if (dev1->bus != dev2->bus)
639 return 0;
640 return 1;
641}
642
643/* Find needle in haystack. If exact match found, return DEVICE_SAME,
644 * and return needle location in *index. If scsi3addr matches, but not
645 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
646 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
647 */
648static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
649 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
650 int *index)
651{
652 int i;
653#define DEVICE_NOT_FOUND 0
654#define DEVICE_CHANGED 1
655#define DEVICE_SAME 2
656 for (i = 0; i < haystack_size; i++) {
657 if (haystack[i] == NULL) /* previously removed. */
658 continue;
659 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
660 *index = i;
661 if (device_is_the_same(needle, haystack[i]))
662 return DEVICE_SAME;
663 else
664 return DEVICE_CHANGED;
665 }
666 }
667 *index = -1;
668 return DEVICE_NOT_FOUND;
669}
670
671static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
672 struct hpsa_scsi_dev_t *sd[], int nsds)
673{
674 /* sd contains scsi3 addresses and devtypes, and inquiry
675 * data. This function takes what's in sd to be the current
676 * reality and updates h->dev[] to reflect that reality.
677 */
678 int i, entry, device_change, changes = 0;
679 struct hpsa_scsi_dev_t *csd;
680 unsigned long flags;
681 struct hpsa_scsi_dev_t **added, **removed;
682 int nadded, nremoved;
683 struct Scsi_Host *sh = NULL;
684
685 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
686 GFP_KERNEL);
687 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
688 GFP_KERNEL);
689
690 if (!added || !removed) {
691 dev_warn(&h->pdev->dev, "out of memory in "
692 "adjust_hpsa_scsi_table\n");
693 goto free_and_out;
694 }
695
696 spin_lock_irqsave(&h->devlock, flags);
697
698 /* find any devices in h->dev[] that are not in
699 * sd[] and remove them from h->dev[], and for any
700 * devices which have changed, remove the old device
701 * info and add the new device info.
702 */
703 i = 0;
704 nremoved = 0;
705 nadded = 0;
706 while (i < h->ndevices) {
707 csd = h->dev[i];
708 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
709 if (device_change == DEVICE_NOT_FOUND) {
710 changes++;
711 hpsa_scsi_remove_entry(h, hostno, i,
712 removed, &nremoved);
713 continue; /* remove ^^^, hence i not incremented */
714 } else if (device_change == DEVICE_CHANGED) {
715 changes++;
716 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
717 added, &nadded, removed, &nremoved);
718 /* Set it to NULL to prevent it from being freed
719 * at the bottom of hpsa_update_scsi_devices()
720 */
721 sd[entry] = NULL;
722 }
723 i++;
724 }
725
726 /* Now, make sure every device listed in sd[] is also
727 * listed in h->dev[], adding them if they aren't found
728 */
729
730 for (i = 0; i < nsds; i++) {
731 if (!sd[i]) /* if already added above. */
732 continue;
733 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
734 h->ndevices, &entry);
735 if (device_change == DEVICE_NOT_FOUND) {
736 changes++;
737 if (hpsa_scsi_add_entry(h, hostno, sd[i],
738 added, &nadded) != 0)
739 break;
740 sd[i] = NULL; /* prevent from being freed later. */
741 } else if (device_change == DEVICE_CHANGED) {
742 /* should never happen... */
743 changes++;
744 dev_warn(&h->pdev->dev,
745 "device unexpectedly changed.\n");
746 /* but if it does happen, we just ignore that device */
747 }
748 }
749 spin_unlock_irqrestore(&h->devlock, flags);
750
751 /* Don't notify scsi mid layer of any changes the first time through
752 * (or if there are no changes) scsi_scan_host will do it later the
753 * first time through.
754 */
755 if (hostno == -1 || !changes)
756 goto free_and_out;
757
758 sh = h->scsi_host;
759 /* Notify scsi mid layer of any removed devices */
760 for (i = 0; i < nremoved; i++) {
761 struct scsi_device *sdev =
762 scsi_device_lookup(sh, removed[i]->bus,
763 removed[i]->target, removed[i]->lun);
764 if (sdev != NULL) {
765 scsi_remove_device(sdev);
766 scsi_device_put(sdev);
767 } else {
768 /* We don't expect to get here.
769 * future cmds to this device will get selection
770 * timeout as if the device was gone.
771 */
772 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
773 " for removal.", hostno, removed[i]->bus,
774 removed[i]->target, removed[i]->lun);
775 }
776 kfree(removed[i]);
777 removed[i] = NULL;
778 }
779
780 /* Notify scsi mid layer of any added devices */
781 for (i = 0; i < nadded; i++) {
782 if (scsi_add_device(sh, added[i]->bus,
783 added[i]->target, added[i]->lun) == 0)
784 continue;
785 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
786 "device not added.\n", hostno, added[i]->bus,
787 added[i]->target, added[i]->lun);
788 /* now we have to remove it from h->dev,
789 * since it didn't get added to scsi mid layer
790 */
791 fixup_botched_add(h, added[i]);
792 }
793
794free_and_out:
795 kfree(added);
796 kfree(removed);
797}
798
799/*
800 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
801 * Assume's h->devlock is held.
802 */
803static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
804 int bus, int target, int lun)
805{
806 int i;
807 struct hpsa_scsi_dev_t *sd;
808
809 for (i = 0; i < h->ndevices; i++) {
810 sd = h->dev[i];
811 if (sd->bus == bus && sd->target == target && sd->lun == lun)
812 return sd;
813 }
814 return NULL;
815}
816
817/* link sdev->hostdata to our per-device structure. */
818static int hpsa_slave_alloc(struct scsi_device *sdev)
819{
820 struct hpsa_scsi_dev_t *sd;
821 unsigned long flags;
822 struct ctlr_info *h;
823
824 h = sdev_to_hba(sdev);
825 spin_lock_irqsave(&h->devlock, flags);
826 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
827 sdev_id(sdev), sdev->lun);
828 if (sd != NULL)
829 sdev->hostdata = sd;
830 spin_unlock_irqrestore(&h->devlock, flags);
831 return 0;
832}
833
834static void hpsa_slave_destroy(struct scsi_device *sdev)
835{
836 /* nothing to do. */
837}
838
839static void hpsa_scsi_setup(struct ctlr_info *h)
840{
841 h->ndevices = 0;
842 h->scsi_host = NULL;
843 spin_lock_init(&h->devlock);
844}
845
846static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
847{
848 int i;
849
850 if (!h->cmd_sg_list)
851 return;
852 for (i = 0; i < h->nr_cmds; i++) {
853 kfree(h->cmd_sg_list[i]);
854 h->cmd_sg_list[i] = NULL;
855 }
856 kfree(h->cmd_sg_list);
857 h->cmd_sg_list = NULL;
858}
859
860static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
861{
862 int i;
863
864 if (h->chainsize <= 0)
865 return 0;
866
867 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
868 GFP_KERNEL);
869 if (!h->cmd_sg_list)
870 return -ENOMEM;
871 for (i = 0; i < h->nr_cmds; i++) {
872 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
873 h->chainsize, GFP_KERNEL);
874 if (!h->cmd_sg_list[i])
875 goto clean;
876 }
877 return 0;
878
879clean:
880 hpsa_free_sg_chain_blocks(h);
881 return -ENOMEM;
882}
883
884static void hpsa_map_sg_chain_block(struct ctlr_info *h,
885 struct CommandList *c)
886{
887 struct SGDescriptor *chain_sg, *chain_block;
888 u64 temp64;
889
890 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
891 chain_block = h->cmd_sg_list[c->cmdindex];
892 chain_sg->Ext = HPSA_SG_CHAIN;
893 chain_sg->Len = sizeof(*chain_sg) *
894 (c->Header.SGTotal - h->max_cmd_sg_entries);
895 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
896 PCI_DMA_TODEVICE);
897 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
898 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
899}
900
901static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
902 struct CommandList *c)
903{
904 struct SGDescriptor *chain_sg;
905 union u64bit temp64;
906
907 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
908 return;
909
910 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
911 temp64.val32.lower = chain_sg->Addr.lower;
912 temp64.val32.upper = chain_sg->Addr.upper;
913 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
914}
915
916static void complete_scsi_command(struct CommandList *cp,
917 int timeout, u32 tag)
918{
919 struct scsi_cmnd *cmd;
920 struct ctlr_info *h;
921 struct ErrorInfo *ei;
922
923 unsigned char sense_key;
924 unsigned char asc; /* additional sense code */
925 unsigned char ascq; /* additional sense code qualifier */
926
927 ei = cp->err_info;
928 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
929 h = cp->h;
930
931 scsi_dma_unmap(cmd); /* undo the DMA mappings */
932 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
933 hpsa_unmap_sg_chain_block(h, cp);
934
935 cmd->result = (DID_OK << 16); /* host byte */
936 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
937 cmd->result |= ei->ScsiStatus;
938
939 /* copy the sense data whether we need to or not. */
940 memcpy(cmd->sense_buffer, ei->SenseInfo,
941 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
942 SCSI_SENSE_BUFFERSIZE :
943 ei->SenseLen);
944 scsi_set_resid(cmd, ei->ResidualCnt);
945
946 if (ei->CommandStatus == 0) {
947 cmd->scsi_done(cmd);
948 cmd_free(h, cp);
949 return;
950 }
951
952 /* an error has occurred */
953 switch (ei->CommandStatus) {
954
955 case CMD_TARGET_STATUS:
956 if (ei->ScsiStatus) {
957 /* Get sense key */
958 sense_key = 0xf & ei->SenseInfo[2];
959 /* Get additional sense code */
960 asc = ei->SenseInfo[12];
961 /* Get addition sense code qualifier */
962 ascq = ei->SenseInfo[13];
963 }
964
965 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
966 if (check_for_unit_attention(h, cp)) {
967 cmd->result = DID_SOFT_ERROR << 16;
968 break;
969 }
970 if (sense_key == ILLEGAL_REQUEST) {
971 /*
972 * SCSI REPORT_LUNS is commonly unsupported on
973 * Smart Array. Suppress noisy complaint.
974 */
975 if (cp->Request.CDB[0] == REPORT_LUNS)
976 break;
977
978 /* If ASC/ASCQ indicate Logical Unit
979 * Not Supported condition,
980 */
981 if ((asc == 0x25) && (ascq == 0x0)) {
982 dev_warn(&h->pdev->dev, "cp %p "
983 "has check condition\n", cp);
984 break;
985 }
986 }
987
988 if (sense_key == NOT_READY) {
989 /* If Sense is Not Ready, Logical Unit
990 * Not ready, Manual Intervention
991 * required
992 */
993 if ((asc == 0x04) && (ascq == 0x03)) {
994 dev_warn(&h->pdev->dev, "cp %p "
995 "has check condition: unit "
996 "not ready, manual "
997 "intervention required\n", cp);
998 break;
999 }
1000 }
1001 if (sense_key == ABORTED_COMMAND) {
1002 /* Aborted command is retryable */
1003 dev_warn(&h->pdev->dev, "cp %p "
1004 "has check condition: aborted command: "
1005 "ASC: 0x%x, ASCQ: 0x%x\n",
1006 cp, asc, ascq);
1007 cmd->result = DID_SOFT_ERROR << 16;
1008 break;
1009 }
1010 /* Must be some other type of check condition */
1011 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1012 "unknown type: "
1013 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1014 "Returning result: 0x%x, "
1015 "cmd=[%02x %02x %02x %02x %02x "
1016 "%02x %02x %02x %02x %02x %02x "
1017 "%02x %02x %02x %02x %02x]\n",
1018 cp, sense_key, asc, ascq,
1019 cmd->result,
1020 cmd->cmnd[0], cmd->cmnd[1],
1021 cmd->cmnd[2], cmd->cmnd[3],
1022 cmd->cmnd[4], cmd->cmnd[5],
1023 cmd->cmnd[6], cmd->cmnd[7],
1024 cmd->cmnd[8], cmd->cmnd[9],
1025 cmd->cmnd[10], cmd->cmnd[11],
1026 cmd->cmnd[12], cmd->cmnd[13],
1027 cmd->cmnd[14], cmd->cmnd[15]);
1028 break;
1029 }
1030
1031
1032 /* Problem was not a check condition
1033 * Pass it up to the upper layers...
1034 */
1035 if (ei->ScsiStatus) {
1036 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1037 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1038 "Returning result: 0x%x\n",
1039 cp, ei->ScsiStatus,
1040 sense_key, asc, ascq,
1041 cmd->result);
1042 } else { /* scsi status is zero??? How??? */
1043 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1044 "Returning no connection.\n", cp),
1045
1046 /* Ordinarily, this case should never happen,
1047 * but there is a bug in some released firmware
1048 * revisions that allows it to happen if, for
1049 * example, a 4100 backplane loses power and
1050 * the tape drive is in it. We assume that
1051 * it's a fatal error of some kind because we
1052 * can't show that it wasn't. We will make it
1053 * look like selection timeout since that is
1054 * the most common reason for this to occur,
1055 * and it's severe enough.
1056 */
1057
1058 cmd->result = DID_NO_CONNECT << 16;
1059 }
1060 break;
1061
1062 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1063 break;
1064 case CMD_DATA_OVERRUN:
1065 dev_warn(&h->pdev->dev, "cp %p has"
1066 " completed with data overrun "
1067 "reported\n", cp);
1068 break;
1069 case CMD_INVALID: {
1070 /* print_bytes(cp, sizeof(*cp), 1, 0);
1071 print_cmd(cp); */
1072 /* We get CMD_INVALID if you address a non-existent device
1073 * instead of a selection timeout (no response). You will
1074 * see this if you yank out a drive, then try to access it.
1075 * This is kind of a shame because it means that any other
1076 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1077 * missing target. */
1078 cmd->result = DID_NO_CONNECT << 16;
1079 }
1080 break;
1081 case CMD_PROTOCOL_ERR:
1082 dev_warn(&h->pdev->dev, "cp %p has "
1083 "protocol error \n", cp);
1084 break;
1085 case CMD_HARDWARE_ERR:
1086 cmd->result = DID_ERROR << 16;
1087 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1088 break;
1089 case CMD_CONNECTION_LOST:
1090 cmd->result = DID_ERROR << 16;
1091 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1092 break;
1093 case CMD_ABORTED:
1094 cmd->result = DID_ABORT << 16;
1095 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1096 cp, ei->ScsiStatus);
1097 break;
1098 case CMD_ABORT_FAILED:
1099 cmd->result = DID_ERROR << 16;
1100 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1101 break;
1102 case CMD_UNSOLICITED_ABORT:
1103 cmd->result = DID_RESET << 16;
1104 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1105 "abort\n", cp);
1106 break;
1107 case CMD_TIMEOUT:
1108 cmd->result = DID_TIME_OUT << 16;
1109 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1110 break;
1111 default:
1112 cmd->result = DID_ERROR << 16;
1113 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1114 cp, ei->CommandStatus);
1115 }
1116 cmd->scsi_done(cmd);
1117 cmd_free(h, cp);
1118}
1119
1120static int hpsa_scsi_detect(struct ctlr_info *h)
1121{
1122 struct Scsi_Host *sh;
1123 int error;
1124
1125 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1126 if (sh == NULL)
1127 goto fail;
1128
1129 sh->io_port = 0;
1130 sh->n_io_port = 0;
1131 sh->this_id = -1;
1132 sh->max_channel = 3;
1133 sh->max_cmd_len = MAX_COMMAND_SIZE;
1134 sh->max_lun = HPSA_MAX_LUN;
1135 sh->max_id = HPSA_MAX_LUN;
1136 sh->can_queue = h->nr_cmds;
1137 sh->cmd_per_lun = h->nr_cmds;
1138 sh->sg_tablesize = h->maxsgentries;
1139 h->scsi_host = sh;
1140 sh->hostdata[0] = (unsigned long) h;
1141 sh->irq = h->intr[PERF_MODE_INT];
1142 sh->unique_id = sh->irq;
1143 error = scsi_add_host(sh, &h->pdev->dev);
1144 if (error)
1145 goto fail_host_put;
1146 scsi_scan_host(sh);
1147 return 0;
1148
1149 fail_host_put:
1150 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1151 " failed for controller %d\n", h->ctlr);
1152 scsi_host_put(sh);
1153 return error;
1154 fail:
1155 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1156 " failed for controller %d\n", h->ctlr);
1157 return -ENOMEM;
1158}
1159
1160static void hpsa_pci_unmap(struct pci_dev *pdev,
1161 struct CommandList *c, int sg_used, int data_direction)
1162{
1163 int i;
1164 union u64bit addr64;
1165
1166 for (i = 0; i < sg_used; i++) {
1167 addr64.val32.lower = c->SG[i].Addr.lower;
1168 addr64.val32.upper = c->SG[i].Addr.upper;
1169 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1170 data_direction);
1171 }
1172}
1173
1174static void hpsa_map_one(struct pci_dev *pdev,
1175 struct CommandList *cp,
1176 unsigned char *buf,
1177 size_t buflen,
1178 int data_direction)
1179{
1180 u64 addr64;
1181
1182 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1183 cp->Header.SGList = 0;
1184 cp->Header.SGTotal = 0;
1185 return;
1186 }
1187
1188 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1189 cp->SG[0].Addr.lower =
1190 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1191 cp->SG[0].Addr.upper =
1192 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1193 cp->SG[0].Len = buflen;
1194 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1195 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1196}
1197
1198static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1199 struct CommandList *c)
1200{
1201 DECLARE_COMPLETION_ONSTACK(wait);
1202
1203 c->waiting = &wait;
1204 enqueue_cmd_and_start_io(h, c);
1205 wait_for_completion(&wait);
1206}
1207
1208static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1209 struct CommandList *c, int data_direction)
1210{
1211 int retry_count = 0;
1212
1213 do {
1214 memset(c->err_info, 0, sizeof(c->err_info));
1215 hpsa_scsi_do_simple_cmd_core(h, c);
1216 retry_count++;
1217 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1218 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1219}
1220
1221static void hpsa_scsi_interpret_error(struct CommandList *cp)
1222{
1223 struct ErrorInfo *ei;
1224 struct device *d = &cp->h->pdev->dev;
1225
1226 ei = cp->err_info;
1227 switch (ei->CommandStatus) {
1228 case CMD_TARGET_STATUS:
1229 dev_warn(d, "cmd %p has completed with errors\n", cp);
1230 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1231 ei->ScsiStatus);
1232 if (ei->ScsiStatus == 0)
1233 dev_warn(d, "SCSI status is abnormally zero. "
1234 "(probably indicates selection timeout "
1235 "reported incorrectly due to a known "
1236 "firmware bug, circa July, 2001.)\n");
1237 break;
1238 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1239 dev_info(d, "UNDERRUN\n");
1240 break;
1241 case CMD_DATA_OVERRUN:
1242 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1243 break;
1244 case CMD_INVALID: {
1245 /* controller unfortunately reports SCSI passthru's
1246 * to non-existent targets as invalid commands.
1247 */
1248 dev_warn(d, "cp %p is reported invalid (probably means "
1249 "target device no longer present)\n", cp);
1250 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1251 print_cmd(cp); */
1252 }
1253 break;
1254 case CMD_PROTOCOL_ERR:
1255 dev_warn(d, "cp %p has protocol error \n", cp);
1256 break;
1257 case CMD_HARDWARE_ERR:
1258 /* cmd->result = DID_ERROR << 16; */
1259 dev_warn(d, "cp %p had hardware error\n", cp);
1260 break;
1261 case CMD_CONNECTION_LOST:
1262 dev_warn(d, "cp %p had connection lost\n", cp);
1263 break;
1264 case CMD_ABORTED:
1265 dev_warn(d, "cp %p was aborted\n", cp);
1266 break;
1267 case CMD_ABORT_FAILED:
1268 dev_warn(d, "cp %p reports abort failed\n", cp);
1269 break;
1270 case CMD_UNSOLICITED_ABORT:
1271 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1272 break;
1273 case CMD_TIMEOUT:
1274 dev_warn(d, "cp %p timed out\n", cp);
1275 break;
1276 default:
1277 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1278 ei->CommandStatus);
1279 }
1280}
1281
1282static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1283 unsigned char page, unsigned char *buf,
1284 unsigned char bufsize)
1285{
1286 int rc = IO_OK;
1287 struct CommandList *c;
1288 struct ErrorInfo *ei;
1289
1290 c = cmd_special_alloc(h);
1291
1292 if (c == NULL) { /* trouble... */
1293 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1294 return -ENOMEM;
1295 }
1296
1297 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1298 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1299 ei = c->err_info;
1300 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1301 hpsa_scsi_interpret_error(c);
1302 rc = -1;
1303 }
1304 cmd_special_free(h, c);
1305 return rc;
1306}
1307
1308static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1309{
1310 int rc = IO_OK;
1311 struct CommandList *c;
1312 struct ErrorInfo *ei;
1313
1314 c = cmd_special_alloc(h);
1315
1316 if (c == NULL) { /* trouble... */
1317 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1318 return -ENOMEM;
1319 }
1320
1321 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1322 hpsa_scsi_do_simple_cmd_core(h, c);
1323 /* no unmap needed here because no data xfer. */
1324
1325 ei = c->err_info;
1326 if (ei->CommandStatus != 0) {
1327 hpsa_scsi_interpret_error(c);
1328 rc = -1;
1329 }
1330 cmd_special_free(h, c);
1331 return rc;
1332}
1333
1334static void hpsa_get_raid_level(struct ctlr_info *h,
1335 unsigned char *scsi3addr, unsigned char *raid_level)
1336{
1337 int rc;
1338 unsigned char *buf;
1339
1340 *raid_level = RAID_UNKNOWN;
1341 buf = kzalloc(64, GFP_KERNEL);
1342 if (!buf)
1343 return;
1344 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1345 if (rc == 0)
1346 *raid_level = buf[8];
1347 if (*raid_level > RAID_UNKNOWN)
1348 *raid_level = RAID_UNKNOWN;
1349 kfree(buf);
1350 return;
1351}
1352
1353/* Get the device id from inquiry page 0x83 */
1354static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1355 unsigned char *device_id, int buflen)
1356{
1357 int rc;
1358 unsigned char *buf;
1359
1360 if (buflen > 16)
1361 buflen = 16;
1362 buf = kzalloc(64, GFP_KERNEL);
1363 if (!buf)
1364 return -1;
1365 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1366 if (rc == 0)
1367 memcpy(device_id, &buf[8], buflen);
1368 kfree(buf);
1369 return rc != 0;
1370}
1371
1372static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1373 struct ReportLUNdata *buf, int bufsize,
1374 int extended_response)
1375{
1376 int rc = IO_OK;
1377 struct CommandList *c;
1378 unsigned char scsi3addr[8];
1379 struct ErrorInfo *ei;
1380
1381 c = cmd_special_alloc(h);
1382 if (c == NULL) { /* trouble... */
1383 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1384 return -1;
1385 }
1386 /* address the controller */
1387 memset(scsi3addr, 0, sizeof(scsi3addr));
1388 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1389 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1390 if (extended_response)
1391 c->Request.CDB[1] = extended_response;
1392 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1393 ei = c->err_info;
1394 if (ei->CommandStatus != 0 &&
1395 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1396 hpsa_scsi_interpret_error(c);
1397 rc = -1;
1398 }
1399 cmd_special_free(h, c);
1400 return rc;
1401}
1402
1403static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1404 struct ReportLUNdata *buf,
1405 int bufsize, int extended_response)
1406{
1407 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1408}
1409
1410static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1411 struct ReportLUNdata *buf, int bufsize)
1412{
1413 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1414}
1415
1416static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1417 int bus, int target, int lun)
1418{
1419 device->bus = bus;
1420 device->target = target;
1421 device->lun = lun;
1422}
1423
1424static int hpsa_update_device_info(struct ctlr_info *h,
1425 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1426{
1427#define OBDR_TAPE_INQ_SIZE 49
1428 unsigned char *inq_buff;
1429
1430 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1431 if (!inq_buff)
1432 goto bail_out;
1433
1434 /* Do an inquiry to the device to see what it is. */
1435 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1436 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1437 /* Inquiry failed (msg printed already) */
1438 dev_err(&h->pdev->dev,
1439 "hpsa_update_device_info: inquiry failed\n");
1440 goto bail_out;
1441 }
1442
1443 /* As a side effect, record the firmware version number
1444 * if we happen to be talking to the RAID controller.
1445 */
1446 if (is_hba_lunid(scsi3addr))
1447 memcpy(h->firm_ver, &inq_buff[32], 4);
1448
1449 this_device->devtype = (inq_buff[0] & 0x1f);
1450 memcpy(this_device->scsi3addr, scsi3addr, 8);
1451 memcpy(this_device->vendor, &inq_buff[8],
1452 sizeof(this_device->vendor));
1453 memcpy(this_device->model, &inq_buff[16],
1454 sizeof(this_device->model));
1455 memcpy(this_device->revision, &inq_buff[32],
1456 sizeof(this_device->revision));
1457 memset(this_device->device_id, 0,
1458 sizeof(this_device->device_id));
1459 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1460 sizeof(this_device->device_id));
1461
1462 if (this_device->devtype == TYPE_DISK &&
1463 is_logical_dev_addr_mode(scsi3addr))
1464 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1465 else
1466 this_device->raid_level = RAID_UNKNOWN;
1467
1468 kfree(inq_buff);
1469 return 0;
1470
1471bail_out:
1472 kfree(inq_buff);
1473 return 1;
1474}
1475
1476static unsigned char *msa2xxx_model[] = {
1477 "MSA2012",
1478 "MSA2024",
1479 "MSA2312",
1480 "MSA2324",
1481 NULL,
1482};
1483
1484static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1485{
1486 int i;
1487
1488 for (i = 0; msa2xxx_model[i]; i++)
1489 if (strncmp(device->model, msa2xxx_model[i],
1490 strlen(msa2xxx_model[i])) == 0)
1491 return 1;
1492 return 0;
1493}
1494
1495/* Helper function to assign bus, target, lun mapping of devices.
1496 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1497 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1498 * Logical drive target and lun are assigned at this time, but
1499 * physical device lun and target assignment are deferred (assigned
1500 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1501 */
1502static void figure_bus_target_lun(struct ctlr_info *h,
1503 u8 *lunaddrbytes, int *bus, int *target, int *lun,
1504 struct hpsa_scsi_dev_t *device)
1505{
1506 u32 lunid;
1507
1508 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1509 /* logical device */
1510 if (unlikely(is_scsi_rev_5(h))) {
1511 /* p1210m, logical drives lun assignments
1512 * match SCSI REPORT LUNS data.
1513 */
1514 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1515 *bus = 0;
1516 *target = 0;
1517 *lun = (lunid & 0x3fff) + 1;
1518 } else {
1519 /* not p1210m... */
1520 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1521 if (is_msa2xxx(h, device)) {
1522 /* msa2xxx way, put logicals on bus 1
1523 * and match target/lun numbers box
1524 * reports.
1525 */
1526 *bus = 1;
1527 *target = (lunid >> 16) & 0x3fff;
1528 *lun = lunid & 0x00ff;
1529 } else {
1530 /* Traditional smart array way. */
1531 *bus = 0;
1532 *lun = 0;
1533 *target = lunid & 0x3fff;
1534 }
1535 }
1536 } else {
1537 /* physical device */
1538 if (is_hba_lunid(lunaddrbytes))
1539 if (unlikely(is_scsi_rev_5(h))) {
1540 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1541 *target = 0;
1542 *lun = 0;
1543 return;
1544 } else
1545 *bus = 3; /* traditional smartarray */
1546 else
1547 *bus = 2; /* physical disk */
1548 *target = -1;
1549 *lun = -1; /* we will fill these in later. */
1550 }
1551}
1552
1553/*
1554 * If there is no lun 0 on a target, linux won't find any devices.
1555 * For the MSA2xxx boxes, we have to manually detect the enclosure
1556 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1557 * it for some reason. *tmpdevice is the target we're adding,
1558 * this_device is a pointer into the current element of currentsd[]
1559 * that we're building up in update_scsi_devices(), below.
1560 * lunzerobits is a bitmap that tracks which targets already have a
1561 * lun 0 assigned.
1562 * Returns 1 if an enclosure was added, 0 if not.
1563 */
1564static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1565 struct hpsa_scsi_dev_t *tmpdevice,
1566 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1567 int bus, int target, int lun, unsigned long lunzerobits[],
1568 int *nmsa2xxx_enclosures)
1569{
1570 unsigned char scsi3addr[8];
1571
1572 if (test_bit(target, lunzerobits))
1573 return 0; /* There is already a lun 0 on this target. */
1574
1575 if (!is_logical_dev_addr_mode(lunaddrbytes))
1576 return 0; /* It's the logical targets that may lack lun 0. */
1577
1578 if (!is_msa2xxx(h, tmpdevice))
1579 return 0; /* It's only the MSA2xxx that have this problem. */
1580
1581 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1582 return 0;
1583
1584 if (is_hba_lunid(scsi3addr))
1585 return 0; /* Don't add the RAID controller here. */
1586
1587 if (is_scsi_rev_5(h))
1588 return 0; /* p1210m doesn't need to do this. */
1589
1590#define MAX_MSA2XXX_ENCLOSURES 32
1591 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1592 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1593 "enclosures exceeded. Check your hardware "
1594 "configuration.");
1595 return 0;
1596 }
1597
1598 memset(scsi3addr, 0, 8);
1599 scsi3addr[3] = target;
1600 if (hpsa_update_device_info(h, scsi3addr, this_device))
1601 return 0;
1602 (*nmsa2xxx_enclosures)++;
1603 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1604 set_bit(target, lunzerobits);
1605 return 1;
1606}
1607
1608/*
1609 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1610 * logdev. The number of luns in physdev and logdev are returned in
1611 * *nphysicals and *nlogicals, respectively.
1612 * Returns 0 on success, -1 otherwise.
1613 */
1614static int hpsa_gather_lun_info(struct ctlr_info *h,
1615 int reportlunsize,
1616 struct ReportLUNdata *physdev, u32 *nphysicals,
1617 struct ReportLUNdata *logdev, u32 *nlogicals)
1618{
1619 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1620 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1621 return -1;
1622 }
1623 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1624 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1625 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1626 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1627 *nphysicals - HPSA_MAX_PHYS_LUN);
1628 *nphysicals = HPSA_MAX_PHYS_LUN;
1629 }
1630 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1631 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1632 return -1;
1633 }
1634 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1635 /* Reject Logicals in excess of our max capability. */
1636 if (*nlogicals > HPSA_MAX_LUN) {
1637 dev_warn(&h->pdev->dev,
1638 "maximum logical LUNs (%d) exceeded. "
1639 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1640 *nlogicals - HPSA_MAX_LUN);
1641 *nlogicals = HPSA_MAX_LUN;
1642 }
1643 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1644 dev_warn(&h->pdev->dev,
1645 "maximum logical + physical LUNs (%d) exceeded. "
1646 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1647 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1648 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1649 }
1650 return 0;
1651}
1652
1653u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1654 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1655 struct ReportLUNdata *logdev_list)
1656{
1657 /* Helper function, figure out where the LUN ID info is coming from
1658 * given index i, lists of physical and logical devices, where in
1659 * the list the raid controller is supposed to appear (first or last)
1660 */
1661
1662 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1663 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1664
1665 if (i == raid_ctlr_position)
1666 return RAID_CTLR_LUNID;
1667
1668 if (i < logicals_start)
1669 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1670
1671 if (i < last_device)
1672 return &logdev_list->LUN[i - nphysicals -
1673 (raid_ctlr_position == 0)][0];
1674 BUG();
1675 return NULL;
1676}
1677
1678static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1679{
1680 /* the idea here is we could get notified
1681 * that some devices have changed, so we do a report
1682 * physical luns and report logical luns cmd, and adjust
1683 * our list of devices accordingly.
1684 *
1685 * The scsi3addr's of devices won't change so long as the
1686 * adapter is not reset. That means we can rescan and
1687 * tell which devices we already know about, vs. new
1688 * devices, vs. disappearing devices.
1689 */
1690 struct ReportLUNdata *physdev_list = NULL;
1691 struct ReportLUNdata *logdev_list = NULL;
1692 unsigned char *inq_buff = NULL;
1693 u32 nphysicals = 0;
1694 u32 nlogicals = 0;
1695 u32 ndev_allocated = 0;
1696 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1697 int ncurrent = 0;
1698 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1699 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1700 int bus, target, lun;
1701 int raid_ctlr_position;
1702 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1703
1704 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1705 GFP_KERNEL);
1706 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1707 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1708 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1709 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1710
1711 if (!currentsd || !physdev_list || !logdev_list ||
1712 !inq_buff || !tmpdevice) {
1713 dev_err(&h->pdev->dev, "out of memory\n");
1714 goto out;
1715 }
1716 memset(lunzerobits, 0, sizeof(lunzerobits));
1717
1718 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1719 logdev_list, &nlogicals))
1720 goto out;
1721
1722 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1723 * but each of them 4 times through different paths. The plus 1
1724 * is for the RAID controller.
1725 */
1726 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1727
1728 /* Allocate the per device structures */
1729 for (i = 0; i < ndevs_to_allocate; i++) {
1730 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1731 if (!currentsd[i]) {
1732 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1733 __FILE__, __LINE__);
1734 goto out;
1735 }
1736 ndev_allocated++;
1737 }
1738
1739 if (unlikely(is_scsi_rev_5(h)))
1740 raid_ctlr_position = 0;
1741 else
1742 raid_ctlr_position = nphysicals + nlogicals;
1743
1744 /* adjust our table of devices */
1745 nmsa2xxx_enclosures = 0;
1746 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1747 u8 *lunaddrbytes;
1748
1749 /* Figure out where the LUN ID info is coming from */
1750 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1751 i, nphysicals, nlogicals, physdev_list, logdev_list);
1752 /* skip masked physical devices. */
1753 if (lunaddrbytes[3] & 0xC0 &&
1754 i < nphysicals + (raid_ctlr_position == 0))
1755 continue;
1756
1757 /* Get device type, vendor, model, device id */
1758 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1759 continue; /* skip it if we can't talk to it. */
1760 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1761 tmpdevice);
1762 this_device = currentsd[ncurrent];
1763
1764 /*
1765 * For the msa2xxx boxes, we have to insert a LUN 0 which
1766 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1767 * is nonetheless an enclosure device there. We have to
1768 * present that otherwise linux won't find anything if
1769 * there is no lun 0.
1770 */
1771 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1772 lunaddrbytes, bus, target, lun, lunzerobits,
1773 &nmsa2xxx_enclosures)) {
1774 ncurrent++;
1775 this_device = currentsd[ncurrent];
1776 }
1777
1778 *this_device = *tmpdevice;
1779 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1780
1781 switch (this_device->devtype) {
1782 case TYPE_ROM: {
1783 /* We don't *really* support actual CD-ROM devices,
1784 * just "One Button Disaster Recovery" tape drive
1785 * which temporarily pretends to be a CD-ROM drive.
1786 * So we check that the device is really an OBDR tape
1787 * device by checking for "$DR-10" in bytes 43-48 of
1788 * the inquiry data.
1789 */
1790 char obdr_sig[7];
1791#define OBDR_TAPE_SIG "$DR-10"
1792 strncpy(obdr_sig, &inq_buff[43], 6);
1793 obdr_sig[6] = '\0';
1794 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1795 /* Not OBDR device, ignore it. */
1796 break;
1797 }
1798 ncurrent++;
1799 break;
1800 case TYPE_DISK:
1801 if (i < nphysicals)
1802 break;
1803 ncurrent++;
1804 break;
1805 case TYPE_TAPE:
1806 case TYPE_MEDIUM_CHANGER:
1807 ncurrent++;
1808 break;
1809 case TYPE_RAID:
1810 /* Only present the Smartarray HBA as a RAID controller.
1811 * If it's a RAID controller other than the HBA itself
1812 * (an external RAID controller, MSA500 or similar)
1813 * don't present it.
1814 */
1815 if (!is_hba_lunid(lunaddrbytes))
1816 break;
1817 ncurrent++;
1818 break;
1819 default:
1820 break;
1821 }
1822 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1823 break;
1824 }
1825 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1826out:
1827 kfree(tmpdevice);
1828 for (i = 0; i < ndev_allocated; i++)
1829 kfree(currentsd[i]);
1830 kfree(currentsd);
1831 kfree(inq_buff);
1832 kfree(physdev_list);
1833 kfree(logdev_list);
1834}
1835
1836/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1837 * dma mapping and fills in the scatter gather entries of the
1838 * hpsa command, cp.
1839 */
1840static int hpsa_scatter_gather(struct ctlr_info *h,
1841 struct CommandList *cp,
1842 struct scsi_cmnd *cmd)
1843{
1844 unsigned int len;
1845 struct scatterlist *sg;
1846 u64 addr64;
1847 int use_sg, i, sg_index, chained;
1848 struct SGDescriptor *curr_sg;
1849
1850 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
1851
1852 use_sg = scsi_dma_map(cmd);
1853 if (use_sg < 0)
1854 return use_sg;
1855
1856 if (!use_sg)
1857 goto sglist_finished;
1858
1859 curr_sg = cp->SG;
1860 chained = 0;
1861 sg_index = 0;
1862 scsi_for_each_sg(cmd, sg, use_sg, i) {
1863 if (i == h->max_cmd_sg_entries - 1 &&
1864 use_sg > h->max_cmd_sg_entries) {
1865 chained = 1;
1866 curr_sg = h->cmd_sg_list[cp->cmdindex];
1867 sg_index = 0;
1868 }
1869 addr64 = (u64) sg_dma_address(sg);
1870 len = sg_dma_len(sg);
1871 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
1872 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
1873 curr_sg->Len = len;
1874 curr_sg->Ext = 0; /* we are not chaining */
1875 curr_sg++;
1876 }
1877
1878 if (use_sg + chained > h->maxSG)
1879 h->maxSG = use_sg + chained;
1880
1881 if (chained) {
1882 cp->Header.SGList = h->max_cmd_sg_entries;
1883 cp->Header.SGTotal = (u16) (use_sg + 1);
1884 hpsa_map_sg_chain_block(h, cp);
1885 return 0;
1886 }
1887
1888sglist_finished:
1889
1890 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
1891 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
1892 return 0;
1893}
1894
1895
1896static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1897 void (*done)(struct scsi_cmnd *))
1898{
1899 struct ctlr_info *h;
1900 struct hpsa_scsi_dev_t *dev;
1901 unsigned char scsi3addr[8];
1902 struct CommandList *c;
1903 unsigned long flags;
1904
1905 /* Get the ptr to our adapter structure out of cmd->host. */
1906 h = sdev_to_hba(cmd->device);
1907 dev = cmd->device->hostdata;
1908 if (!dev) {
1909 cmd->result = DID_NO_CONNECT << 16;
1910 done(cmd);
1911 return 0;
1912 }
1913 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1914
1915 /* Need a lock as this is being allocated from the pool */
1916 spin_lock_irqsave(&h->lock, flags);
1917 c = cmd_alloc(h);
1918 spin_unlock_irqrestore(&h->lock, flags);
1919 if (c == NULL) { /* trouble... */
1920 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1921 return SCSI_MLQUEUE_HOST_BUSY;
1922 }
1923
1924 /* Fill in the command list header */
1925
1926 cmd->scsi_done = done; /* save this for use by completion code */
1927
1928 /* save c in case we have to abort it */
1929 cmd->host_scribble = (unsigned char *) c;
1930
1931 c->cmd_type = CMD_SCSI;
1932 c->scsi_cmd = cmd;
1933 c->Header.ReplyQueue = 0; /* unused in simple mode */
1934 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1935 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
1936 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
1937
1938 /* Fill in the request block... */
1939
1940 c->Request.Timeout = 0;
1941 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1942 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1943 c->Request.CDBLen = cmd->cmd_len;
1944 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1945 c->Request.Type.Type = TYPE_CMD;
1946 c->Request.Type.Attribute = ATTR_SIMPLE;
1947 switch (cmd->sc_data_direction) {
1948 case DMA_TO_DEVICE:
1949 c->Request.Type.Direction = XFER_WRITE;
1950 break;
1951 case DMA_FROM_DEVICE:
1952 c->Request.Type.Direction = XFER_READ;
1953 break;
1954 case DMA_NONE:
1955 c->Request.Type.Direction = XFER_NONE;
1956 break;
1957 case DMA_BIDIRECTIONAL:
1958 /* This can happen if a buggy application does a scsi passthru
1959 * and sets both inlen and outlen to non-zero. ( see
1960 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
1961 */
1962
1963 c->Request.Type.Direction = XFER_RSVD;
1964 /* This is technically wrong, and hpsa controllers should
1965 * reject it with CMD_INVALID, which is the most correct
1966 * response, but non-fibre backends appear to let it
1967 * slide by, and give the same results as if this field
1968 * were set correctly. Either way is acceptable for
1969 * our purposes here.
1970 */
1971
1972 break;
1973
1974 default:
1975 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
1976 cmd->sc_data_direction);
1977 BUG();
1978 break;
1979 }
1980
1981 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
1982 cmd_free(h, c);
1983 return SCSI_MLQUEUE_HOST_BUSY;
1984 }
1985 enqueue_cmd_and_start_io(h, c);
1986 /* the cmd'll come back via intr handler in complete_scsi_command() */
1987 return 0;
1988}
1989
1990static void hpsa_scan_start(struct Scsi_Host *sh)
1991{
1992 struct ctlr_info *h = shost_to_hba(sh);
1993 unsigned long flags;
1994
1995 /* wait until any scan already in progress is finished. */
1996 while (1) {
1997 spin_lock_irqsave(&h->scan_lock, flags);
1998 if (h->scan_finished)
1999 break;
2000 spin_unlock_irqrestore(&h->scan_lock, flags);
2001 wait_event(h->scan_wait_queue, h->scan_finished);
2002 /* Note: We don't need to worry about a race between this
2003 * thread and driver unload because the midlayer will
2004 * have incremented the reference count, so unload won't
2005 * happen if we're in here.
2006 */
2007 }
2008 h->scan_finished = 0; /* mark scan as in progress */
2009 spin_unlock_irqrestore(&h->scan_lock, flags);
2010
2011 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2012
2013 spin_lock_irqsave(&h->scan_lock, flags);
2014 h->scan_finished = 1; /* mark scan as finished. */
2015 wake_up_all(&h->scan_wait_queue);
2016 spin_unlock_irqrestore(&h->scan_lock, flags);
2017}
2018
2019static int hpsa_scan_finished(struct Scsi_Host *sh,
2020 unsigned long elapsed_time)
2021{
2022 struct ctlr_info *h = shost_to_hba(sh);
2023 unsigned long flags;
2024 int finished;
2025
2026 spin_lock_irqsave(&h->scan_lock, flags);
2027 finished = h->scan_finished;
2028 spin_unlock_irqrestore(&h->scan_lock, flags);
2029 return finished;
2030}
2031
2032static int hpsa_change_queue_depth(struct scsi_device *sdev,
2033 int qdepth, int reason)
2034{
2035 struct ctlr_info *h = sdev_to_hba(sdev);
2036
2037 if (reason != SCSI_QDEPTH_DEFAULT)
2038 return -ENOTSUPP;
2039
2040 if (qdepth < 1)
2041 qdepth = 1;
2042 else
2043 if (qdepth > h->nr_cmds)
2044 qdepth = h->nr_cmds;
2045 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2046 return sdev->queue_depth;
2047}
2048
2049static void hpsa_unregister_scsi(struct ctlr_info *h)
2050{
2051 /* we are being forcibly unloaded, and may not refuse. */
2052 scsi_remove_host(h->scsi_host);
2053 scsi_host_put(h->scsi_host);
2054 h->scsi_host = NULL;
2055}
2056
2057static int hpsa_register_scsi(struct ctlr_info *h)
2058{
2059 int rc;
2060
2061 rc = hpsa_scsi_detect(h);
2062 if (rc != 0)
2063 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
2064 " hpsa_scsi_detect(), rc is %d\n", rc);
2065 return rc;
2066}
2067
2068static int wait_for_device_to_become_ready(struct ctlr_info *h,
2069 unsigned char lunaddr[])
2070{
2071 int rc = 0;
2072 int count = 0;
2073 int waittime = 1; /* seconds */
2074 struct CommandList *c;
2075
2076 c = cmd_special_alloc(h);
2077 if (!c) {
2078 dev_warn(&h->pdev->dev, "out of memory in "
2079 "wait_for_device_to_become_ready.\n");
2080 return IO_ERROR;
2081 }
2082
2083 /* Send test unit ready until device ready, or give up. */
2084 while (count < HPSA_TUR_RETRY_LIMIT) {
2085
2086 /* Wait for a bit. do this first, because if we send
2087 * the TUR right away, the reset will just abort it.
2088 */
2089 msleep(1000 * waittime);
2090 count++;
2091
2092 /* Increase wait time with each try, up to a point. */
2093 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2094 waittime = waittime * 2;
2095
2096 /* Send the Test Unit Ready */
2097 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2098 hpsa_scsi_do_simple_cmd_core(h, c);
2099 /* no unmap needed here because no data xfer. */
2100
2101 if (c->err_info->CommandStatus == CMD_SUCCESS)
2102 break;
2103
2104 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2105 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2106 (c->err_info->SenseInfo[2] == NO_SENSE ||
2107 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2108 break;
2109
2110 dev_warn(&h->pdev->dev, "waiting %d secs "
2111 "for device to become ready.\n", waittime);
2112 rc = 1; /* device not ready. */
2113 }
2114
2115 if (rc)
2116 dev_warn(&h->pdev->dev, "giving up on device.\n");
2117 else
2118 dev_warn(&h->pdev->dev, "device is ready.\n");
2119
2120 cmd_special_free(h, c);
2121 return rc;
2122}
2123
2124/* Need at least one of these error handlers to keep ../scsi/hosts.c from
2125 * complaining. Doing a host- or bus-reset can't do anything good here.
2126 */
2127static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2128{
2129 int rc;
2130 struct ctlr_info *h;
2131 struct hpsa_scsi_dev_t *dev;
2132
2133 /* find the controller to which the command to be aborted was sent */
2134 h = sdev_to_hba(scsicmd->device);
2135 if (h == NULL) /* paranoia */
2136 return FAILED;
2137 dev = scsicmd->device->hostdata;
2138 if (!dev) {
2139 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2140 "device lookup failed.\n");
2141 return FAILED;
2142 }
2143 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2144 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2145 /* send a reset to the SCSI LUN which the command was sent to */
2146 rc = hpsa_send_reset(h, dev->scsi3addr);
2147 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2148 return SUCCESS;
2149
2150 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2151 return FAILED;
2152}
2153
2154/*
2155 * For operations that cannot sleep, a command block is allocated at init,
2156 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2157 * which ones are free or in use. Lock must be held when calling this.
2158 * cmd_free() is the complement.
2159 */
2160static struct CommandList *cmd_alloc(struct ctlr_info *h)
2161{
2162 struct CommandList *c;
2163 int i;
2164 union u64bit temp64;
2165 dma_addr_t cmd_dma_handle, err_dma_handle;
2166
2167 do {
2168 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2169 if (i == h->nr_cmds)
2170 return NULL;
2171 } while (test_and_set_bit
2172 (i & (BITS_PER_LONG - 1),
2173 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2174 c = h->cmd_pool + i;
2175 memset(c, 0, sizeof(*c));
2176 cmd_dma_handle = h->cmd_pool_dhandle
2177 + i * sizeof(*c);
2178 c->err_info = h->errinfo_pool + i;
2179 memset(c->err_info, 0, sizeof(*c->err_info));
2180 err_dma_handle = h->errinfo_pool_dhandle
2181 + i * sizeof(*c->err_info);
2182 h->nr_allocs++;
2183
2184 c->cmdindex = i;
2185
2186 INIT_HLIST_NODE(&c->list);
2187 c->busaddr = (u32) cmd_dma_handle;
2188 temp64.val = (u64) err_dma_handle;
2189 c->ErrDesc.Addr.lower = temp64.val32.lower;
2190 c->ErrDesc.Addr.upper = temp64.val32.upper;
2191 c->ErrDesc.Len = sizeof(*c->err_info);
2192
2193 c->h = h;
2194 return c;
2195}
2196
2197/* For operations that can wait for kmalloc to possibly sleep,
2198 * this routine can be called. Lock need not be held to call
2199 * cmd_special_alloc. cmd_special_free() is the complement.
2200 */
2201static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2202{
2203 struct CommandList *c;
2204 union u64bit temp64;
2205 dma_addr_t cmd_dma_handle, err_dma_handle;
2206
2207 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2208 if (c == NULL)
2209 return NULL;
2210 memset(c, 0, sizeof(*c));
2211
2212 c->cmdindex = -1;
2213
2214 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2215 &err_dma_handle);
2216
2217 if (c->err_info == NULL) {
2218 pci_free_consistent(h->pdev,
2219 sizeof(*c), c, cmd_dma_handle);
2220 return NULL;
2221 }
2222 memset(c->err_info, 0, sizeof(*c->err_info));
2223
2224 INIT_HLIST_NODE(&c->list);
2225 c->busaddr = (u32) cmd_dma_handle;
2226 temp64.val = (u64) err_dma_handle;
2227 c->ErrDesc.Addr.lower = temp64.val32.lower;
2228 c->ErrDesc.Addr.upper = temp64.val32.upper;
2229 c->ErrDesc.Len = sizeof(*c->err_info);
2230
2231 c->h = h;
2232 return c;
2233}
2234
2235static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2236{
2237 int i;
2238
2239 i = c - h->cmd_pool;
2240 clear_bit(i & (BITS_PER_LONG - 1),
2241 h->cmd_pool_bits + (i / BITS_PER_LONG));
2242 h->nr_frees++;
2243}
2244
2245static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2246{
2247 union u64bit temp64;
2248
2249 temp64.val32.lower = c->ErrDesc.Addr.lower;
2250 temp64.val32.upper = c->ErrDesc.Addr.upper;
2251 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2252 c->err_info, (dma_addr_t) temp64.val);
2253 pci_free_consistent(h->pdev, sizeof(*c),
2254 c, (dma_addr_t) c->busaddr);
2255}
2256
2257#ifdef CONFIG_COMPAT
2258
2259static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2260{
2261 IOCTL32_Command_struct __user *arg32 =
2262 (IOCTL32_Command_struct __user *) arg;
2263 IOCTL_Command_struct arg64;
2264 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2265 int err;
2266 u32 cp;
2267
2268 err = 0;
2269 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2270 sizeof(arg64.LUN_info));
2271 err |= copy_from_user(&arg64.Request, &arg32->Request,
2272 sizeof(arg64.Request));
2273 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2274 sizeof(arg64.error_info));
2275 err |= get_user(arg64.buf_size, &arg32->buf_size);
2276 err |= get_user(cp, &arg32->buf);
2277 arg64.buf = compat_ptr(cp);
2278 err |= copy_to_user(p, &arg64, sizeof(arg64));
2279
2280 if (err)
2281 return -EFAULT;
2282
2283 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2284 if (err)
2285 return err;
2286 err |= copy_in_user(&arg32->error_info, &p->error_info,
2287 sizeof(arg32->error_info));
2288 if (err)
2289 return -EFAULT;
2290 return err;
2291}
2292
2293static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2294 int cmd, void *arg)
2295{
2296 BIG_IOCTL32_Command_struct __user *arg32 =
2297 (BIG_IOCTL32_Command_struct __user *) arg;
2298 BIG_IOCTL_Command_struct arg64;
2299 BIG_IOCTL_Command_struct __user *p =
2300 compat_alloc_user_space(sizeof(arg64));
2301 int err;
2302 u32 cp;
2303
2304 err = 0;
2305 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2306 sizeof(arg64.LUN_info));
2307 err |= copy_from_user(&arg64.Request, &arg32->Request,
2308 sizeof(arg64.Request));
2309 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2310 sizeof(arg64.error_info));
2311 err |= get_user(arg64.buf_size, &arg32->buf_size);
2312 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2313 err |= get_user(cp, &arg32->buf);
2314 arg64.buf = compat_ptr(cp);
2315 err |= copy_to_user(p, &arg64, sizeof(arg64));
2316
2317 if (err)
2318 return -EFAULT;
2319
2320 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2321 if (err)
2322 return err;
2323 err |= copy_in_user(&arg32->error_info, &p->error_info,
2324 sizeof(arg32->error_info));
2325 if (err)
2326 return -EFAULT;
2327 return err;
2328}
2329
2330static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2331{
2332 switch (cmd) {
2333 case CCISS_GETPCIINFO:
2334 case CCISS_GETINTINFO:
2335 case CCISS_SETINTINFO:
2336 case CCISS_GETNODENAME:
2337 case CCISS_SETNODENAME:
2338 case CCISS_GETHEARTBEAT:
2339 case CCISS_GETBUSTYPES:
2340 case CCISS_GETFIRMVER:
2341 case CCISS_GETDRIVVER:
2342 case CCISS_REVALIDVOLS:
2343 case CCISS_DEREGDISK:
2344 case CCISS_REGNEWDISK:
2345 case CCISS_REGNEWD:
2346 case CCISS_RESCANDISK:
2347 case CCISS_GETLUNINFO:
2348 return hpsa_ioctl(dev, cmd, arg);
2349
2350 case CCISS_PASSTHRU32:
2351 return hpsa_ioctl32_passthru(dev, cmd, arg);
2352 case CCISS_BIG_PASSTHRU32:
2353 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2354
2355 default:
2356 return -ENOIOCTLCMD;
2357 }
2358}
2359#endif
2360
2361static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2362{
2363 struct hpsa_pci_info pciinfo;
2364
2365 if (!argp)
2366 return -EINVAL;
2367 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2368 pciinfo.bus = h->pdev->bus->number;
2369 pciinfo.dev_fn = h->pdev->devfn;
2370 pciinfo.board_id = h->board_id;
2371 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2372 return -EFAULT;
2373 return 0;
2374}
2375
2376static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2377{
2378 DriverVer_type DriverVer;
2379 unsigned char vmaj, vmin, vsubmin;
2380 int rc;
2381
2382 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2383 &vmaj, &vmin, &vsubmin);
2384 if (rc != 3) {
2385 dev_info(&h->pdev->dev, "driver version string '%s' "
2386 "unrecognized.", HPSA_DRIVER_VERSION);
2387 vmaj = 0;
2388 vmin = 0;
2389 vsubmin = 0;
2390 }
2391 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2392 if (!argp)
2393 return -EINVAL;
2394 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2395 return -EFAULT;
2396 return 0;
2397}
2398
2399static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2400{
2401 IOCTL_Command_struct iocommand;
2402 struct CommandList *c;
2403 char *buff = NULL;
2404 union u64bit temp64;
2405
2406 if (!argp)
2407 return -EINVAL;
2408 if (!capable(CAP_SYS_RAWIO))
2409 return -EPERM;
2410 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2411 return -EFAULT;
2412 if ((iocommand.buf_size < 1) &&
2413 (iocommand.Request.Type.Direction != XFER_NONE)) {
2414 return -EINVAL;
2415 }
2416 if (iocommand.buf_size > 0) {
2417 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2418 if (buff == NULL)
2419 return -EFAULT;
2420 }
2421 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2422 /* Copy the data into the buffer we created */
2423 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
2424 kfree(buff);
2425 return -EFAULT;
2426 }
2427 } else
2428 memset(buff, 0, iocommand.buf_size);
2429 c = cmd_special_alloc(h);
2430 if (c == NULL) {
2431 kfree(buff);
2432 return -ENOMEM;
2433 }
2434 /* Fill in the command type */
2435 c->cmd_type = CMD_IOCTL_PEND;
2436 /* Fill in Command Header */
2437 c->Header.ReplyQueue = 0; /* unused in simple mode */
2438 if (iocommand.buf_size > 0) { /* buffer to fill */
2439 c->Header.SGList = 1;
2440 c->Header.SGTotal = 1;
2441 } else { /* no buffers to fill */
2442 c->Header.SGList = 0;
2443 c->Header.SGTotal = 0;
2444 }
2445 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2446 /* use the kernel address the cmd block for tag */
2447 c->Header.Tag.lower = c->busaddr;
2448
2449 /* Fill in Request block */
2450 memcpy(&c->Request, &iocommand.Request,
2451 sizeof(c->Request));
2452
2453 /* Fill in the scatter gather information */
2454 if (iocommand.buf_size > 0) {
2455 temp64.val = pci_map_single(h->pdev, buff,
2456 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2457 c->SG[0].Addr.lower = temp64.val32.lower;
2458 c->SG[0].Addr.upper = temp64.val32.upper;
2459 c->SG[0].Len = iocommand.buf_size;
2460 c->SG[0].Ext = 0; /* we are not chaining*/
2461 }
2462 hpsa_scsi_do_simple_cmd_core(h, c);
2463 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2464 check_ioctl_unit_attention(h, c);
2465
2466 /* Copy the error information out */
2467 memcpy(&iocommand.error_info, c->err_info,
2468 sizeof(iocommand.error_info));
2469 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2470 kfree(buff);
2471 cmd_special_free(h, c);
2472 return -EFAULT;
2473 }
2474
2475 if (iocommand.Request.Type.Direction == XFER_READ) {
2476 /* Copy the data out of the buffer we created */
2477 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2478 kfree(buff);
2479 cmd_special_free(h, c);
2480 return -EFAULT;
2481 }
2482 }
2483 kfree(buff);
2484 cmd_special_free(h, c);
2485 return 0;
2486}
2487
2488static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2489{
2490 BIG_IOCTL_Command_struct *ioc;
2491 struct CommandList *c;
2492 unsigned char **buff = NULL;
2493 int *buff_size = NULL;
2494 union u64bit temp64;
2495 BYTE sg_used = 0;
2496 int status = 0;
2497 int i;
2498 u32 left;
2499 u32 sz;
2500 BYTE __user *data_ptr;
2501
2502 if (!argp)
2503 return -EINVAL;
2504 if (!capable(CAP_SYS_RAWIO))
2505 return -EPERM;
2506 ioc = (BIG_IOCTL_Command_struct *)
2507 kmalloc(sizeof(*ioc), GFP_KERNEL);
2508 if (!ioc) {
2509 status = -ENOMEM;
2510 goto cleanup1;
2511 }
2512 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2513 status = -EFAULT;
2514 goto cleanup1;
2515 }
2516 if ((ioc->buf_size < 1) &&
2517 (ioc->Request.Type.Direction != XFER_NONE)) {
2518 status = -EINVAL;
2519 goto cleanup1;
2520 }
2521 /* Check kmalloc limits using all SGs */
2522 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2523 status = -EINVAL;
2524 goto cleanup1;
2525 }
2526 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2527 status = -EINVAL;
2528 goto cleanup1;
2529 }
2530 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2531 if (!buff) {
2532 status = -ENOMEM;
2533 goto cleanup1;
2534 }
2535 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2536 if (!buff_size) {
2537 status = -ENOMEM;
2538 goto cleanup1;
2539 }
2540 left = ioc->buf_size;
2541 data_ptr = ioc->buf;
2542 while (left) {
2543 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2544 buff_size[sg_used] = sz;
2545 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2546 if (buff[sg_used] == NULL) {
2547 status = -ENOMEM;
2548 goto cleanup1;
2549 }
2550 if (ioc->Request.Type.Direction == XFER_WRITE) {
2551 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2552 status = -ENOMEM;
2553 goto cleanup1;
2554 }
2555 } else
2556 memset(buff[sg_used], 0, sz);
2557 left -= sz;
2558 data_ptr += sz;
2559 sg_used++;
2560 }
2561 c = cmd_special_alloc(h);
2562 if (c == NULL) {
2563 status = -ENOMEM;
2564 goto cleanup1;
2565 }
2566 c->cmd_type = CMD_IOCTL_PEND;
2567 c->Header.ReplyQueue = 0;
2568
2569 if (ioc->buf_size > 0) {
2570 c->Header.SGList = sg_used;
2571 c->Header.SGTotal = sg_used;
2572 } else {
2573 c->Header.SGList = 0;
2574 c->Header.SGTotal = 0;
2575 }
2576 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2577 c->Header.Tag.lower = c->busaddr;
2578 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2579 if (ioc->buf_size > 0) {
2580 int i;
2581 for (i = 0; i < sg_used; i++) {
2582 temp64.val = pci_map_single(h->pdev, buff[i],
2583 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2584 c->SG[i].Addr.lower = temp64.val32.lower;
2585 c->SG[i].Addr.upper = temp64.val32.upper;
2586 c->SG[i].Len = buff_size[i];
2587 /* we are not chaining */
2588 c->SG[i].Ext = 0;
2589 }
2590 }
2591 hpsa_scsi_do_simple_cmd_core(h, c);
2592 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2593 check_ioctl_unit_attention(h, c);
2594 /* Copy the error information out */
2595 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2596 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2597 cmd_special_free(h, c);
2598 status = -EFAULT;
2599 goto cleanup1;
2600 }
2601 if (ioc->Request.Type.Direction == XFER_READ) {
2602 /* Copy the data out of the buffer we created */
2603 BYTE __user *ptr = ioc->buf;
2604 for (i = 0; i < sg_used; i++) {
2605 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2606 cmd_special_free(h, c);
2607 status = -EFAULT;
2608 goto cleanup1;
2609 }
2610 ptr += buff_size[i];
2611 }
2612 }
2613 cmd_special_free(h, c);
2614 status = 0;
2615cleanup1:
2616 if (buff) {
2617 for (i = 0; i < sg_used; i++)
2618 kfree(buff[i]);
2619 kfree(buff);
2620 }
2621 kfree(buff_size);
2622 kfree(ioc);
2623 return status;
2624}
2625
2626static void check_ioctl_unit_attention(struct ctlr_info *h,
2627 struct CommandList *c)
2628{
2629 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2630 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2631 (void) check_for_unit_attention(h, c);
2632}
2633/*
2634 * ioctl
2635 */
2636static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2637{
2638 struct ctlr_info *h;
2639 void __user *argp = (void __user *)arg;
2640
2641 h = sdev_to_hba(dev);
2642
2643 switch (cmd) {
2644 case CCISS_DEREGDISK:
2645 case CCISS_REGNEWDISK:
2646 case CCISS_REGNEWD:
2647 hpsa_scan_start(h->scsi_host);
2648 return 0;
2649 case CCISS_GETPCIINFO:
2650 return hpsa_getpciinfo_ioctl(h, argp);
2651 case CCISS_GETDRIVVER:
2652 return hpsa_getdrivver_ioctl(h, argp);
2653 case CCISS_PASSTHRU:
2654 return hpsa_passthru_ioctl(h, argp);
2655 case CCISS_BIG_PASSTHRU:
2656 return hpsa_big_passthru_ioctl(h, argp);
2657 default:
2658 return -ENOTTY;
2659 }
2660}
2661
2662static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2663 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2664 int cmd_type)
2665{
2666 int pci_dir = XFER_NONE;
2667
2668 c->cmd_type = CMD_IOCTL_PEND;
2669 c->Header.ReplyQueue = 0;
2670 if (buff != NULL && size > 0) {
2671 c->Header.SGList = 1;
2672 c->Header.SGTotal = 1;
2673 } else {
2674 c->Header.SGList = 0;
2675 c->Header.SGTotal = 0;
2676 }
2677 c->Header.Tag.lower = c->busaddr;
2678 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2679
2680 c->Request.Type.Type = cmd_type;
2681 if (cmd_type == TYPE_CMD) {
2682 switch (cmd) {
2683 case HPSA_INQUIRY:
2684 /* are we trying to read a vital product page */
2685 if (page_code != 0) {
2686 c->Request.CDB[1] = 0x01;
2687 c->Request.CDB[2] = page_code;
2688 }
2689 c->Request.CDBLen = 6;
2690 c->Request.Type.Attribute = ATTR_SIMPLE;
2691 c->Request.Type.Direction = XFER_READ;
2692 c->Request.Timeout = 0;
2693 c->Request.CDB[0] = HPSA_INQUIRY;
2694 c->Request.CDB[4] = size & 0xFF;
2695 break;
2696 case HPSA_REPORT_LOG:
2697 case HPSA_REPORT_PHYS:
2698 /* Talking to controller so It's a physical command
2699 mode = 00 target = 0. Nothing to write.
2700 */
2701 c->Request.CDBLen = 12;
2702 c->Request.Type.Attribute = ATTR_SIMPLE;
2703 c->Request.Type.Direction = XFER_READ;
2704 c->Request.Timeout = 0;
2705 c->Request.CDB[0] = cmd;
2706 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2707 c->Request.CDB[7] = (size >> 16) & 0xFF;
2708 c->Request.CDB[8] = (size >> 8) & 0xFF;
2709 c->Request.CDB[9] = size & 0xFF;
2710 break;
2711
2712 case HPSA_READ_CAPACITY:
2713 c->Request.CDBLen = 10;
2714 c->Request.Type.Attribute = ATTR_SIMPLE;
2715 c->Request.Type.Direction = XFER_READ;
2716 c->Request.Timeout = 0;
2717 c->Request.CDB[0] = cmd;
2718 break;
2719 case HPSA_CACHE_FLUSH:
2720 c->Request.CDBLen = 12;
2721 c->Request.Type.Attribute = ATTR_SIMPLE;
2722 c->Request.Type.Direction = XFER_WRITE;
2723 c->Request.Timeout = 0;
2724 c->Request.CDB[0] = BMIC_WRITE;
2725 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2726 break;
2727 case TEST_UNIT_READY:
2728 c->Request.CDBLen = 6;
2729 c->Request.Type.Attribute = ATTR_SIMPLE;
2730 c->Request.Type.Direction = XFER_NONE;
2731 c->Request.Timeout = 0;
2732 break;
2733 default:
2734 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2735 BUG();
2736 return;
2737 }
2738 } else if (cmd_type == TYPE_MSG) {
2739 switch (cmd) {
2740
2741 case HPSA_DEVICE_RESET_MSG:
2742 c->Request.CDBLen = 16;
2743 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2744 c->Request.Type.Attribute = ATTR_SIMPLE;
2745 c->Request.Type.Direction = XFER_NONE;
2746 c->Request.Timeout = 0; /* Don't time out */
2747 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
2748 c->Request.CDB[1] = 0x03; /* Reset target above */
2749 /* If bytes 4-7 are zero, it means reset the */
2750 /* LunID device */
2751 c->Request.CDB[4] = 0x00;
2752 c->Request.CDB[5] = 0x00;
2753 c->Request.CDB[6] = 0x00;
2754 c->Request.CDB[7] = 0x00;
2755 break;
2756
2757 default:
2758 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2759 cmd);
2760 BUG();
2761 }
2762 } else {
2763 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2764 BUG();
2765 }
2766
2767 switch (c->Request.Type.Direction) {
2768 case XFER_READ:
2769 pci_dir = PCI_DMA_FROMDEVICE;
2770 break;
2771 case XFER_WRITE:
2772 pci_dir = PCI_DMA_TODEVICE;
2773 break;
2774 case XFER_NONE:
2775 pci_dir = PCI_DMA_NONE;
2776 break;
2777 default:
2778 pci_dir = PCI_DMA_BIDIRECTIONAL;
2779 }
2780
2781 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2782
2783 return;
2784}
2785
2786/*
2787 * Map (physical) PCI mem into (virtual) kernel space
2788 */
2789static void __iomem *remap_pci_mem(ulong base, ulong size)
2790{
2791 ulong page_base = ((ulong) base) & PAGE_MASK;
2792 ulong page_offs = ((ulong) base) - page_base;
2793 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2794
2795 return page_remapped ? (page_remapped + page_offs) : NULL;
2796}
2797
2798/* Takes cmds off the submission queue and sends them to the hardware,
2799 * then puts them on the queue of cmds waiting for completion.
2800 */
2801static void start_io(struct ctlr_info *h)
2802{
2803 struct CommandList *c;
2804
2805 while (!hlist_empty(&h->reqQ)) {
2806 c = hlist_entry(h->reqQ.first, struct CommandList, list);
2807 /* can't do anything if fifo is full */
2808 if ((h->access.fifo_full(h))) {
2809 dev_warn(&h->pdev->dev, "fifo full\n");
2810 break;
2811 }
2812
2813 /* Get the first entry from the Request Q */
2814 removeQ(c);
2815 h->Qdepth--;
2816
2817 /* Tell the controller execute command */
2818 h->access.submit_command(h, c);
2819
2820 /* Put job onto the completed Q */
2821 addQ(&h->cmpQ, c);
2822 }
2823}
2824
2825static inline unsigned long get_next_completion(struct ctlr_info *h)
2826{
2827 return h->access.command_completed(h);
2828}
2829
2830static inline bool interrupt_pending(struct ctlr_info *h)
2831{
2832 return h->access.intr_pending(h);
2833}
2834
2835static inline long interrupt_not_for_us(struct ctlr_info *h)
2836{
2837 return !(h->msi_vector || h->msix_vector) &&
2838 ((h->access.intr_pending(h) == 0) ||
2839 (h->interrupts_enabled == 0));
2840}
2841
2842static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2843 u32 raw_tag)
2844{
2845 if (unlikely(tag_index >= h->nr_cmds)) {
2846 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2847 return 1;
2848 }
2849 return 0;
2850}
2851
2852static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2853{
2854 removeQ(c);
2855 if (likely(c->cmd_type == CMD_SCSI))
2856 complete_scsi_command(c, 0, raw_tag);
2857 else if (c->cmd_type == CMD_IOCTL_PEND)
2858 complete(c->waiting);
2859}
2860
2861static inline u32 hpsa_tag_contains_index(u32 tag)
2862{
2863#define DIRECT_LOOKUP_BIT 0x10
2864 return tag & DIRECT_LOOKUP_BIT;
2865}
2866
2867static inline u32 hpsa_tag_to_index(u32 tag)
2868{
2869#define DIRECT_LOOKUP_SHIFT 5
2870 return tag >> DIRECT_LOOKUP_SHIFT;
2871}
2872
2873static inline u32 hpsa_tag_discard_error_bits(u32 tag)
2874{
2875#define HPSA_ERROR_BITS 0x03
2876 return tag & ~HPSA_ERROR_BITS;
2877}
2878
2879/* process completion of an indexed ("direct lookup") command */
2880static inline u32 process_indexed_cmd(struct ctlr_info *h,
2881 u32 raw_tag)
2882{
2883 u32 tag_index;
2884 struct CommandList *c;
2885
2886 tag_index = hpsa_tag_to_index(raw_tag);
2887 if (bad_tag(h, tag_index, raw_tag))
2888 return next_command(h);
2889 c = h->cmd_pool + tag_index;
2890 finish_cmd(c, raw_tag);
2891 return next_command(h);
2892}
2893
2894/* process completion of a non-indexed command */
2895static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2896 u32 raw_tag)
2897{
2898 u32 tag;
2899 struct CommandList *c = NULL;
2900 struct hlist_node *tmp;
2901
2902 tag = hpsa_tag_discard_error_bits(raw_tag);
2903 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2904 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2905 finish_cmd(c, raw_tag);
2906 return next_command(h);
2907 }
2908 }
2909 bad_tag(h, h->nr_cmds + 1, raw_tag);
2910 return next_command(h);
2911}
2912
2913static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2914{
2915 struct ctlr_info *h = dev_id;
2916 unsigned long flags;
2917 u32 raw_tag;
2918
2919 if (interrupt_not_for_us(h))
2920 return IRQ_NONE;
2921 spin_lock_irqsave(&h->lock, flags);
2922 raw_tag = get_next_completion(h);
2923 while (raw_tag != FIFO_EMPTY) {
2924 if (hpsa_tag_contains_index(raw_tag))
2925 raw_tag = process_indexed_cmd(h, raw_tag);
2926 else
2927 raw_tag = process_nonindexed_cmd(h, raw_tag);
2928 }
2929 spin_unlock_irqrestore(&h->lock, flags);
2930 return IRQ_HANDLED;
2931}
2932
2933/* Send a message CDB to the firmware. */
2934static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2935 unsigned char type)
2936{
2937 struct Command {
2938 struct CommandListHeader CommandHeader;
2939 struct RequestBlock Request;
2940 struct ErrDescriptor ErrorDescriptor;
2941 };
2942 struct Command *cmd;
2943 static const size_t cmd_sz = sizeof(*cmd) +
2944 sizeof(cmd->ErrorDescriptor);
2945 dma_addr_t paddr64;
2946 uint32_t paddr32, tag;
2947 void __iomem *vaddr;
2948 int i, err;
2949
2950 vaddr = pci_ioremap_bar(pdev, 0);
2951 if (vaddr == NULL)
2952 return -ENOMEM;
2953
2954 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
2955 * CCISS commands, so they must be allocated from the lower 4GiB of
2956 * memory.
2957 */
2958 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2959 if (err) {
2960 iounmap(vaddr);
2961 return -ENOMEM;
2962 }
2963
2964 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
2965 if (cmd == NULL) {
2966 iounmap(vaddr);
2967 return -ENOMEM;
2968 }
2969
2970 /* This must fit, because of the 32-bit consistent DMA mask. Also,
2971 * although there's no guarantee, we assume that the address is at
2972 * least 4-byte aligned (most likely, it's page-aligned).
2973 */
2974 paddr32 = paddr64;
2975
2976 cmd->CommandHeader.ReplyQueue = 0;
2977 cmd->CommandHeader.SGList = 0;
2978 cmd->CommandHeader.SGTotal = 0;
2979 cmd->CommandHeader.Tag.lower = paddr32;
2980 cmd->CommandHeader.Tag.upper = 0;
2981 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
2982
2983 cmd->Request.CDBLen = 16;
2984 cmd->Request.Type.Type = TYPE_MSG;
2985 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
2986 cmd->Request.Type.Direction = XFER_NONE;
2987 cmd->Request.Timeout = 0; /* Don't time out */
2988 cmd->Request.CDB[0] = opcode;
2989 cmd->Request.CDB[1] = type;
2990 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
2991 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
2992 cmd->ErrorDescriptor.Addr.upper = 0;
2993 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
2994
2995 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
2996
2997 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
2998 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2999 if (hpsa_tag_discard_error_bits(tag) == paddr32)
3000 break;
3001 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3002 }
3003
3004 iounmap(vaddr);
3005
3006 /* we leak the DMA buffer here ... no choice since the controller could
3007 * still complete the command.
3008 */
3009 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3010 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3011 opcode, type);
3012 return -ETIMEDOUT;
3013 }
3014
3015 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3016
3017 if (tag & HPSA_ERROR_BIT) {
3018 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3019 opcode, type);
3020 return -EIO;
3021 }
3022
3023 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3024 opcode, type);
3025 return 0;
3026}
3027
3028#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3029#define hpsa_noop(p) hpsa_message(p, 3, 0)
3030
3031static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
3032{
3033/* the #defines are stolen from drivers/pci/msi.h. */
3034#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
3035#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
3036
3037 int pos;
3038 u16 control = 0;
3039
3040 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
3041 if (pos) {
3042 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3043 if (control & PCI_MSI_FLAGS_ENABLE) {
3044 dev_info(&pdev->dev, "resetting MSI\n");
3045 pci_write_config_word(pdev, msi_control_reg(pos),
3046 control & ~PCI_MSI_FLAGS_ENABLE);
3047 }
3048 }
3049
3050 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3051 if (pos) {
3052 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3053 if (control & PCI_MSIX_FLAGS_ENABLE) {
3054 dev_info(&pdev->dev, "resetting MSI-X\n");
3055 pci_write_config_word(pdev, msi_control_reg(pos),
3056 control & ~PCI_MSIX_FLAGS_ENABLE);
3057 }
3058 }
3059
3060 return 0;
3061}
3062
3063/* This does a hard reset of the controller using PCI power management
3064 * states.
3065 */
3066static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
3067{
3068 u16 pmcsr, saved_config_space[32];
3069 int i, pos;
3070
3071 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3072
3073 /* This is very nearly the same thing as
3074 *
3075 * pci_save_state(pci_dev);
3076 * pci_set_power_state(pci_dev, PCI_D3hot);
3077 * pci_set_power_state(pci_dev, PCI_D0);
3078 * pci_restore_state(pci_dev);
3079 *
3080 * but we can't use these nice canned kernel routines on
3081 * kexec, because they also check the MSI/MSI-X state in PCI
3082 * configuration space and do the wrong thing when it is
3083 * set/cleared. Also, the pci_save/restore_state functions
3084 * violate the ordering requirements for restoring the
3085 * configuration space from the CCISS document (see the
3086 * comment below). So we roll our own ....
3087 */
3088
3089 for (i = 0; i < 32; i++)
3090 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
3091
3092 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3093 if (pos == 0) {
3094 dev_err(&pdev->dev,
3095 "hpsa_reset_controller: PCI PM not supported\n");
3096 return -ENODEV;
3097 }
3098
3099 /* Quoting from the Open CISS Specification: "The Power
3100 * Management Control/Status Register (CSR) controls the power
3101 * state of the device. The normal operating state is D0,
3102 * CSR=00h. The software off state is D3, CSR=03h. To reset
3103 * the controller, place the interface device in D3 then to
3104 * D0, this causes a secondary PCI reset which will reset the
3105 * controller."
3106 */
3107
3108 /* enter the D3hot power management state */
3109 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3110 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3111 pmcsr |= PCI_D3hot;
3112 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3113
3114 msleep(500);
3115
3116 /* enter the D0 power management state */
3117 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3118 pmcsr |= PCI_D0;
3119 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3120
3121 msleep(500);
3122
3123 /* Restore the PCI configuration space. The Open CISS
3124 * Specification says, "Restore the PCI Configuration
3125 * Registers, offsets 00h through 60h. It is important to
3126 * restore the command register, 16-bits at offset 04h,
3127 * last. Do not restore the configuration status register,
3128 * 16-bits at offset 06h." Note that the offset is 2*i.
3129 */
3130 for (i = 0; i < 32; i++) {
3131 if (i == 2 || i == 3)
3132 continue;
3133 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
3134 }
3135 wmb();
3136 pci_write_config_word(pdev, 4, saved_config_space[2]);
3137
3138 return 0;
3139}
3140
3141/*
3142 * We cannot read the structure directly, for portability we must use
3143 * the io functions.
3144 * This is for debug only.
3145 */
3146#ifdef HPSA_DEBUG
3147static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3148{
3149 int i;
3150 char temp_name[17];
3151
3152 dev_info(dev, "Controller Configuration information\n");
3153 dev_info(dev, "------------------------------------\n");
3154 for (i = 0; i < 4; i++)
3155 temp_name[i] = readb(&(tb->Signature[i]));
3156 temp_name[4] = '\0';
3157 dev_info(dev, " Signature = %s\n", temp_name);
3158 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3159 dev_info(dev, " Transport methods supported = 0x%x\n",
3160 readl(&(tb->TransportSupport)));
3161 dev_info(dev, " Transport methods active = 0x%x\n",
3162 readl(&(tb->TransportActive)));
3163 dev_info(dev, " Requested transport Method = 0x%x\n",
3164 readl(&(tb->HostWrite.TransportRequest)));
3165 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3166 readl(&(tb->HostWrite.CoalIntDelay)));
3167 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3168 readl(&(tb->HostWrite.CoalIntCount)));
3169 dev_info(dev, " Max outstanding commands = 0x%d\n",
3170 readl(&(tb->CmdsOutMax)));
3171 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3172 for (i = 0; i < 16; i++)
3173 temp_name[i] = readb(&(tb->ServerName[i]));
3174 temp_name[16] = '\0';
3175 dev_info(dev, " Server Name = %s\n", temp_name);
3176 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3177 readl(&(tb->HeartBeat)));
3178}
3179#endif /* HPSA_DEBUG */
3180
3181static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3182{
3183 int i, offset, mem_type, bar_type;
3184
3185 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3186 return 0;
3187 offset = 0;
3188 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3189 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3190 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3191 offset += 4;
3192 else {
3193 mem_type = pci_resource_flags(pdev, i) &
3194 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3195 switch (mem_type) {
3196 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3197 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3198 offset += 4; /* 32 bit */
3199 break;
3200 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3201 offset += 8;
3202 break;
3203 default: /* reserved in PCI 2.2 */
3204 dev_warn(&pdev->dev,
3205 "base address is invalid\n");
3206 return -1;
3207 break;
3208 }
3209 }
3210 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3211 return i + 1;
3212 }
3213 return -1;
3214}
3215
3216/* If MSI/MSI-X is supported by the kernel we will try to enable it on
3217 * controllers that are capable. If not, we use IO-APIC mode.
3218 */
3219
3220static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3221 struct pci_dev *pdev, u32 board_id)
3222{
3223#ifdef CONFIG_PCI_MSI
3224 int err;
3225 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3226 {0, 2}, {0, 3}
3227 };
3228
3229 /* Some boards advertise MSI but don't really support it */
3230 if ((board_id == 0x40700E11) ||
3231 (board_id == 0x40800E11) ||
3232 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3233 goto default_int_mode;
3234 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3235 dev_info(&pdev->dev, "MSIX\n");
3236 err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
3237 if (!err) {
3238 h->intr[0] = hpsa_msix_entries[0].vector;
3239 h->intr[1] = hpsa_msix_entries[1].vector;
3240 h->intr[2] = hpsa_msix_entries[2].vector;
3241 h->intr[3] = hpsa_msix_entries[3].vector;
3242 h->msix_vector = 1;
3243 return;
3244 }
3245 if (err > 0) {
3246 dev_warn(&pdev->dev, "only %d MSI-X vectors "
3247 "available\n", err);
3248 goto default_int_mode;
3249 } else {
3250 dev_warn(&pdev->dev, "MSI-X init failed %d\n",
3251 err);
3252 goto default_int_mode;
3253 }
3254 }
3255 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3256 dev_info(&pdev->dev, "MSI\n");
3257 if (!pci_enable_msi(pdev))
3258 h->msi_vector = 1;
3259 else
3260 dev_warn(&pdev->dev, "MSI init failed\n");
3261 }
3262default_int_mode:
3263#endif /* CONFIG_PCI_MSI */
3264 /* if we get here we're going to use the default interrupt mode */
3265 h->intr[PERF_MODE_INT] = pdev->irq;
3266}
3267
3268static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3269{
3270 ushort subsystem_vendor_id, subsystem_device_id, command;
3271 u32 board_id, scratchpad = 0;
3272 u64 cfg_offset;
3273 u32 cfg_base_addr;
3274 u64 cfg_base_addr_index;
3275 u32 trans_offset;
3276 int i, prod_index, err;
3277
3278 subsystem_vendor_id = pdev->subsystem_vendor;
3279 subsystem_device_id = pdev->subsystem_device;
3280 board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
3281 subsystem_vendor_id);
3282
3283 for (i = 0; i < ARRAY_SIZE(products); i++)
3284 if (board_id == products[i].board_id)
3285 break;
3286
3287 prod_index = i;
3288
3289 if (prod_index == ARRAY_SIZE(products)) {
3290 prod_index--;
3291 if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
3292 !hpsa_allow_any) {
3293 dev_warn(&pdev->dev, "unrecognized board ID:"
3294 " 0x%08lx, ignoring.\n",
3295 (unsigned long) board_id);
3296 return -ENODEV;
3297 }
3298 }
3299 /* check to see if controller has been disabled
3300 * BEFORE trying to enable it
3301 */
3302 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3303 if (!(command & 0x02)) {
3304 dev_warn(&pdev->dev, "controller appears to be disabled\n");
3305 return -ENODEV;
3306 }
3307
3308 err = pci_enable_device(pdev);
3309 if (err) {
3310 dev_warn(&pdev->dev, "unable to enable PCI device\n");
3311 return err;
3312 }
3313
3314 err = pci_request_regions(pdev, "hpsa");
3315 if (err) {
3316 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
3317 return err;
3318 }
3319
3320 /* If the kernel supports MSI/MSI-X we will try to enable that,
3321 * else we use the IO-APIC interrupt assigned to us by system ROM.
3322 */
3323 hpsa_interrupt_mode(h, pdev, board_id);
3324
3325 /* find the memory BAR */
3326 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3327 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
3328 break;
3329 }
3330 if (i == DEVICE_COUNT_RESOURCE) {
3331 dev_warn(&pdev->dev, "no memory BAR found\n");
3332 err = -ENODEV;
3333 goto err_out_free_res;
3334 }
3335
3336 h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
3337 * already removed
3338 */
3339
3340 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3341
3342 /* Wait for the board to become ready. */
3343 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3344 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3345 if (scratchpad == HPSA_FIRMWARE_READY)
3346 break;
3347 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3348 }
3349 if (scratchpad != HPSA_FIRMWARE_READY) {
3350 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3351 err = -ENODEV;
3352 goto err_out_free_res;
3353 }
3354
3355 /* get the address index number */
3356 cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
3357 cfg_base_addr &= (u32) 0x0000ffff;
3358 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3359 if (cfg_base_addr_index == -1) {
3360 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3361 err = -ENODEV;
3362 goto err_out_free_res;
3363 }
3364
3365 cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
3366 h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3367 cfg_base_addr_index) + cfg_offset,
3368 sizeof(h->cfgtable));
3369 /* Find performant mode table. */
3370 trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3371 h->transtable = remap_pci_mem(pci_resource_start(pdev,
3372 cfg_base_addr_index)+cfg_offset+trans_offset,
3373 sizeof(*h->transtable));
3374
3375 h->board_id = board_id;
3376 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3377 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3378
3379 /*
3380 * Limit in-command s/g elements to 32 save dma'able memory.
3381 * Howvever spec says if 0, use 31
3382 */
3383
3384 h->max_cmd_sg_entries = 31;
3385 if (h->maxsgentries > 512) {
3386 h->max_cmd_sg_entries = 32;
3387 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3388 h->maxsgentries--; /* save one for chain pointer */
3389 } else {
3390 h->maxsgentries = 31; /* default to traditional values */
3391 h->chainsize = 0;
3392 }
3393
3394 h->product_name = products[prod_index].product_name;
3395 h->access = *(products[prod_index].access);
3396 /* Allow room for some ioctls */
3397 h->nr_cmds = h->max_commands - 4;
3398
3399 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3400 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3401 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3402 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3403 dev_warn(&pdev->dev, "not a valid CISS config table\n");
3404 err = -ENODEV;
3405 goto err_out_free_res;
3406 }
3407#ifdef CONFIG_X86
3408 {
3409 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3410 u32 prefetch;
3411 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3412 prefetch |= 0x100;
3413 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3414 }
3415#endif
3416
3417 /* Disabling DMA prefetch for the P600
3418 * An ASIC bug may result in a prefetch beyond
3419 * physical memory.
3420 */
3421 if (board_id == 0x3225103C) {
3422 u32 dma_prefetch;
3423 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3424 dma_prefetch |= 0x8000;
3425 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3426 }
3427
3428 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3429 /* Update the field, and then ring the doorbell */
3430 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3431 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3432
3433 /* under certain very rare conditions, this can take awhile.
3434 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3435 * as we enter this code.)
3436 */
3437 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3438 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3439 break;
3440 /* delay and try again */
3441 msleep(10);
3442 }
3443
3444#ifdef HPSA_DEBUG
3445 print_cfg_table(&pdev->dev, h->cfgtable);
3446#endif /* HPSA_DEBUG */
3447
3448 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3449 dev_warn(&pdev->dev, "unable to get board into simple mode\n");
3450 err = -ENODEV;
3451 goto err_out_free_res;
3452 }
3453 return 0;
3454
3455err_out_free_res:
3456 /*
3457 * Deliberately omit pci_disable_device(): it does something nasty to
3458 * Smart Array controllers that pci_enable_device does not undo
3459 */
3460 pci_release_regions(pdev);
3461 return err;
3462}
3463
3464static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3465{
3466 int rc;
3467
3468#define HBA_INQUIRY_BYTE_COUNT 64
3469 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3470 if (!h->hba_inquiry_data)
3471 return;
3472 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3473 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3474 if (rc != 0) {
3475 kfree(h->hba_inquiry_data);
3476 h->hba_inquiry_data = NULL;
3477 }
3478}
3479
3480static int __devinit hpsa_init_one(struct pci_dev *pdev,
3481 const struct pci_device_id *ent)
3482{
3483 int i, rc;
3484 int dac;
3485 struct ctlr_info *h;
3486
3487 if (number_of_controllers == 0)
3488 printk(KERN_INFO DRIVER_NAME "\n");
3489 if (reset_devices) {
3490 /* Reset the controller with a PCI power-cycle */
3491 if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
3492 return -ENODEV;
3493
3494 /* Some devices (notably the HP Smart Array 5i Controller)
3495 need a little pause here */
3496 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3497
3498 /* Now try to get the controller to respond to a no-op */
3499 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3500 if (hpsa_noop(pdev) == 0)
3501 break;
3502 else
3503 dev_warn(&pdev->dev, "no-op failed%s\n",
3504 (i < 11 ? "; re-trying" : ""));
3505 }
3506 }
3507
3508 /* Command structures must be aligned on a 32-byte boundary because
3509 * the 5 lower bits of the address are used by the hardware. and by
3510 * the driver. See comments in hpsa.h for more info.
3511 */
3512#define COMMANDLIST_ALIGNMENT 32
3513 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
3514 h = kzalloc(sizeof(*h), GFP_KERNEL);
3515 if (!h)
3516 return -ENOMEM;
3517
3518 h->busy_initializing = 1;
3519 INIT_HLIST_HEAD(&h->cmpQ);
3520 INIT_HLIST_HEAD(&h->reqQ);
3521 rc = hpsa_pci_init(h, pdev);
3522 if (rc != 0)
3523 goto clean1;
3524
3525 sprintf(h->devname, "hpsa%d", number_of_controllers);
3526 h->ctlr = number_of_controllers;
3527 number_of_controllers++;
3528 h->pdev = pdev;
3529
3530 /* configure PCI DMA stuff */
3531 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3532 if (rc == 0) {
3533 dac = 1;
3534 } else {
3535 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3536 if (rc == 0) {
3537 dac = 0;
3538 } else {
3539 dev_err(&pdev->dev, "no suitable DMA available\n");
3540 goto clean1;
3541 }
3542 }
3543
3544 /* make sure the board interrupts are off */
3545 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3546 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
3547 IRQF_DISABLED, h->devname, h);
3548 if (rc) {
3549 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3550 h->intr[PERF_MODE_INT], h->devname);
3551 goto clean2;
3552 }
3553
3554 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3555 h->devname, pdev->device,
3556 h->intr[PERF_MODE_INT], dac ? "" : " not");
3557
3558 h->cmd_pool_bits =
3559 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3560 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3561 h->cmd_pool = pci_alloc_consistent(h->pdev,
3562 h->nr_cmds * sizeof(*h->cmd_pool),
3563 &(h->cmd_pool_dhandle));
3564 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3565 h->nr_cmds * sizeof(*h->errinfo_pool),
3566 &(h->errinfo_pool_dhandle));
3567 if ((h->cmd_pool_bits == NULL)
3568 || (h->cmd_pool == NULL)
3569 || (h->errinfo_pool == NULL)) {
3570 dev_err(&pdev->dev, "out of memory");
3571 rc = -ENOMEM;
3572 goto clean4;
3573 }
3574 if (hpsa_allocate_sg_chain_blocks(h))
3575 goto clean4;
3576 spin_lock_init(&h->lock);
3577 spin_lock_init(&h->scan_lock);
3578 init_waitqueue_head(&h->scan_wait_queue);
3579 h->scan_finished = 1; /* no scan currently in progress */
3580
3581 pci_set_drvdata(pdev, h);
3582 memset(h->cmd_pool_bits, 0,
3583 ((h->nr_cmds + BITS_PER_LONG -
3584 1) / BITS_PER_LONG) * sizeof(unsigned long));
3585
3586 hpsa_scsi_setup(h);
3587
3588 /* Turn the interrupts on so we can service requests */
3589 h->access.set_intr_mask(h, HPSA_INTR_ON);
3590
3591 hpsa_put_ctlr_into_performant_mode(h);
3592 hpsa_hba_inquiry(h);
3593 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3594 h->busy_initializing = 0;
3595 return 1;
3596
3597clean4:
3598 hpsa_free_sg_chain_blocks(h);
3599 kfree(h->cmd_pool_bits);
3600 if (h->cmd_pool)
3601 pci_free_consistent(h->pdev,
3602 h->nr_cmds * sizeof(struct CommandList),
3603 h->cmd_pool, h->cmd_pool_dhandle);
3604 if (h->errinfo_pool)
3605 pci_free_consistent(h->pdev,
3606 h->nr_cmds * sizeof(struct ErrorInfo),
3607 h->errinfo_pool,
3608 h->errinfo_pool_dhandle);
3609 free_irq(h->intr[PERF_MODE_INT], h);
3610clean2:
3611clean1:
3612 h->busy_initializing = 0;
3613 kfree(h);
3614 return rc;
3615}
3616
3617static void hpsa_flush_cache(struct ctlr_info *h)
3618{
3619 char *flush_buf;
3620 struct CommandList *c;
3621
3622 flush_buf = kzalloc(4, GFP_KERNEL);
3623 if (!flush_buf)
3624 return;
3625
3626 c = cmd_special_alloc(h);
3627 if (!c) {
3628 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3629 goto out_of_memory;
3630 }
3631 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3632 RAID_CTLR_LUNID, TYPE_CMD);
3633 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3634 if (c->err_info->CommandStatus != 0)
3635 dev_warn(&h->pdev->dev,
3636 "error flushing cache on controller\n");
3637 cmd_special_free(h, c);
3638out_of_memory:
3639 kfree(flush_buf);
3640}
3641
3642static void hpsa_shutdown(struct pci_dev *pdev)
3643{
3644 struct ctlr_info *h;
3645
3646 h = pci_get_drvdata(pdev);
3647 /* Turn board interrupts off and send the flush cache command
3648 * sendcmd will turn off interrupt, and send the flush...
3649 * To write all data in the battery backed cache to disks
3650 */
3651 hpsa_flush_cache(h);
3652 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3653 free_irq(h->intr[PERF_MODE_INT], h);
3654#ifdef CONFIG_PCI_MSI
3655 if (h->msix_vector)
3656 pci_disable_msix(h->pdev);
3657 else if (h->msi_vector)
3658 pci_disable_msi(h->pdev);
3659#endif /* CONFIG_PCI_MSI */
3660}
3661
3662static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3663{
3664 struct ctlr_info *h;
3665
3666 if (pci_get_drvdata(pdev) == NULL) {
3667 dev_err(&pdev->dev, "unable to remove device \n");
3668 return;
3669 }
3670 h = pci_get_drvdata(pdev);
3671 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3672 hpsa_shutdown(pdev);
3673 iounmap(h->vaddr);
3674 hpsa_free_sg_chain_blocks(h);
3675 pci_free_consistent(h->pdev,
3676 h->nr_cmds * sizeof(struct CommandList),
3677 h->cmd_pool, h->cmd_pool_dhandle);
3678 pci_free_consistent(h->pdev,
3679 h->nr_cmds * sizeof(struct ErrorInfo),
3680 h->errinfo_pool, h->errinfo_pool_dhandle);
3681 pci_free_consistent(h->pdev, h->reply_pool_size,
3682 h->reply_pool, h->reply_pool_dhandle);
3683 kfree(h->cmd_pool_bits);
3684 kfree(h->blockFetchTable);
3685 kfree(h->hba_inquiry_data);
3686 /*
3687 * Deliberately omit pci_disable_device(): it does something nasty to
3688 * Smart Array controllers that pci_enable_device does not undo
3689 */
3690 pci_release_regions(pdev);
3691 pci_set_drvdata(pdev, NULL);
3692 kfree(h);
3693}
3694
3695static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3696 __attribute__((unused)) pm_message_t state)
3697{
3698 return -ENOSYS;
3699}
3700
3701static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3702{
3703 return -ENOSYS;
3704}
3705
3706static struct pci_driver hpsa_pci_driver = {
3707 .name = "hpsa",
3708 .probe = hpsa_init_one,
3709 .remove = __devexit_p(hpsa_remove_one),
3710 .id_table = hpsa_pci_device_id, /* id_table */
3711 .shutdown = hpsa_shutdown,
3712 .suspend = hpsa_suspend,
3713 .resume = hpsa_resume,
3714};
3715
3716/* Fill in bucket_map[], given nsgs (the max number of
3717 * scatter gather elements supported) and bucket[],
3718 * which is an array of 8 integers. The bucket[] array
3719 * contains 8 different DMA transfer sizes (in 16
3720 * byte increments) which the controller uses to fetch
3721 * commands. This function fills in bucket_map[], which
3722 * maps a given number of scatter gather elements to one of
3723 * the 8 DMA transfer sizes. The point of it is to allow the
3724 * controller to only do as much DMA as needed to fetch the
3725 * command, with the DMA transfer size encoded in the lower
3726 * bits of the command address.
3727 */
3728static void calc_bucket_map(int bucket[], int num_buckets,
3729 int nsgs, int *bucket_map)
3730{
3731 int i, j, b, size;
3732
3733 /* even a command with 0 SGs requires 4 blocks */
3734#define MINIMUM_TRANSFER_BLOCKS 4
3735#define NUM_BUCKETS 8
3736 /* Note, bucket_map must have nsgs+1 entries. */
3737 for (i = 0; i <= nsgs; i++) {
3738 /* Compute size of a command with i SG entries */
3739 size = i + MINIMUM_TRANSFER_BLOCKS;
3740 b = num_buckets; /* Assume the biggest bucket */
3741 /* Find the bucket that is just big enough */
3742 for (j = 0; j < 8; j++) {
3743 if (bucket[j] >= size) {
3744 b = j;
3745 break;
3746 }
3747 }
3748 /* for a command with i SG entries, use bucket b. */
3749 bucket_map[i] = b;
3750 }
3751}
3752
3753static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
3754{
3755 u32 trans_support;
3756 u64 trans_offset;
3757 /* 5 = 1 s/g entry or 4k
3758 * 6 = 2 s/g entry or 8k
3759 * 8 = 4 s/g entry or 16k
3760 * 10 = 6 s/g entry or 24k
3761 */
3762 int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
3763 int i = 0;
3764 int l = 0;
3765 unsigned long register_value;
3766
3767 trans_support = readl(&(h->cfgtable->TransportSupport));
3768 if (!(trans_support & PERFORMANT_MODE))
3769 return;
3770
3771 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3772 h->max_sg_entries = 32;
3773 /* Performant mode ring buffer and supporting data structures */
3774 h->reply_pool_size = h->max_commands * sizeof(u64);
3775 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
3776 &(h->reply_pool_dhandle));
3777
3778 /* Need a block fetch table for performant mode */
3779 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
3780 sizeof(u32)), GFP_KERNEL);
3781
3782 if ((h->reply_pool == NULL)
3783 || (h->blockFetchTable == NULL))
3784 goto clean_up;
3785
3786 h->reply_pool_wraparound = 1; /* spec: init to 1 */
3787
3788 /* Controller spec: zero out this buffer. */
3789 memset(h->reply_pool, 0, h->reply_pool_size);
3790 h->reply_pool_head = h->reply_pool;
3791
3792 trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3793 bft[7] = h->max_sg_entries + 4;
3794 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
3795 for (i = 0; i < 8; i++)
3796 writel(bft[i], &h->transtable->BlockFetch[i]);
3797
3798 /* size of controller ring buffer */
3799 writel(h->max_commands, &h->transtable->RepQSize);
3800 writel(1, &h->transtable->RepQCount);
3801 writel(0, &h->transtable->RepQCtrAddrLow32);
3802 writel(0, &h->transtable->RepQCtrAddrHigh32);
3803 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
3804 writel(0, &h->transtable->RepQAddr0High32);
3805 writel(CFGTBL_Trans_Performant,
3806 &(h->cfgtable->HostWrite.TransportRequest));
3807 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3808 /* under certain very rare conditions, this can take awhile.
3809 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3810 * as we enter this code.) */
3811 for (l = 0; l < MAX_CONFIG_WAIT; l++) {
3812 register_value = readl(h->vaddr + SA5_DOORBELL);
3813 if (!(register_value & CFGTBL_ChangeReq))
3814 break;
3815 /* delay and try again */
3816 set_current_state(TASK_INTERRUPTIBLE);
3817 schedule_timeout(10);
3818 }
3819 register_value = readl(&(h->cfgtable->TransportActive));
3820 if (!(register_value & CFGTBL_Trans_Performant)) {
3821 dev_warn(&h->pdev->dev, "unable to get board into"
3822 " performant mode\n");
3823 return;
3824 }
3825
3826 /* Change the access methods to the performant access methods */
3827 h->access = SA5_performant_access;
3828 h->transMethod = CFGTBL_Trans_Performant;
3829
3830 return;
3831
3832clean_up:
3833 if (h->reply_pool)
3834 pci_free_consistent(h->pdev, h->reply_pool_size,
3835 h->reply_pool, h->reply_pool_dhandle);
3836 kfree(h->blockFetchTable);
3837}
3838
3839/*
3840 * This is it. Register the PCI driver information for the cards we control
3841 * the OS will call our registered routines when it finds one of our cards.
3842 */
3843static int __init hpsa_init(void)
3844{
3845 return pci_register_driver(&hpsa_pci_driver);
3846}
3847
3848static void __exit hpsa_cleanup(void)
3849{
3850 pci_unregister_driver(&hpsa_pci_driver);
3851}
3852
3853module_init(hpsa_init);
3854module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
new file mode 100644
index 000000000000..1bb5233b09a0
--- /dev/null
+++ b/drivers/scsi/hpsa.h
@@ -0,0 +1,362 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_H
22#define HPSA_H
23
24#include <scsi/scsicam.h>
25
26#define IO_OK 0
27#define IO_ERROR 1
28
29struct ctlr_info;
30
31struct access_method {
32 void (*submit_command)(struct ctlr_info *h,
33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h);
36 bool (*intr_pending)(struct ctlr_info *h);
37 unsigned long (*command_completed)(struct ctlr_info *h);
38};
39
40struct hpsa_scsi_dev_t {
41 int devtype;
42 int bus, target, lun; /* as presented to the OS */
43 unsigned char scsi3addr[8]; /* as presented to the HW */
44#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */
48 unsigned char revision[4]; /* bytes 32-35 of inquiry data */
49 unsigned char raid_level; /* from inquiry page 0xC1 */
50};
51
52struct ctlr_info {
53 int ctlr;
54 char devname[8];
55 char *product_name;
56 char firm_ver[4]; /* Firmware version */
57 struct pci_dev *pdev;
58 u32 board_id;
59 void __iomem *vaddr;
60 unsigned long paddr;
61 int nr_cmds; /* Number of commands allowed on this controller */
62 struct CfgTable __iomem *cfgtable;
63 int max_sg_entries;
64 int interrupts_enabled;
65 int major;
66 int max_commands;
67 int commands_outstanding;
68 int max_outstanding; /* Debug */
69 int usage_count; /* number of opens all all minor devices */
70# define PERF_MODE_INT 0
71# define DOORBELL_INT 1
72# define SIMPLE_MODE_INT 2
73# define MEMQ_MODE_INT 3
74 unsigned int intr[4];
75 unsigned int msix_vector;
76 unsigned int msi_vector;
77 struct access_method access;
78
79 /* queue and queue Info */
80 struct hlist_head reqQ;
81 struct hlist_head cmpQ;
82 unsigned int Qdepth;
83 unsigned int maxQsinceinit;
84 unsigned int maxSG;
85 spinlock_t lock;
86 int maxsgentries;
87 u8 max_cmd_sg_entries;
88 int chainsize;
89 struct SGDescriptor **cmd_sg_list;
90
91 /* pointers to command and error info pool */
92 struct CommandList *cmd_pool;
93 dma_addr_t cmd_pool_dhandle;
94 struct ErrorInfo *errinfo_pool;
95 dma_addr_t errinfo_pool_dhandle;
96 unsigned long *cmd_pool_bits;
97 int nr_allocs;
98 int nr_frees;
99 int busy_initializing;
100 int busy_scanning;
101 int scan_finished;
102 spinlock_t scan_lock;
103 wait_queue_head_t scan_wait_queue;
104
105 struct Scsi_Host *scsi_host;
106 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
107 int ndevices; /* number of used elements in .dev[] array. */
108#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
109 struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
110 /*
111 * Performant mode tables.
112 */
113 u32 trans_support;
114 u32 trans_offset;
115 struct TransTable_struct *transtable;
116 unsigned long transMethod;
117
118 /*
119 * Performant mode completion buffer
120 */
121 u64 *reply_pool;
122 dma_addr_t reply_pool_dhandle;
123 u64 *reply_pool_head;
124 size_t reply_pool_size;
125 unsigned char reply_pool_wraparound;
126 u32 *blockFetchTable;
127 unsigned char *hba_inquiry_data;
128};
129#define HPSA_ABORT_MSG 0
130#define HPSA_DEVICE_RESET_MSG 1
131#define HPSA_BUS_RESET_MSG 2
132#define HPSA_HOST_RESET_MSG 3
133#define HPSA_MSG_SEND_RETRY_LIMIT 10
134#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
135
136/* Maximum time in seconds driver will wait for command completions
137 * when polling before giving up.
138 */
139#define HPSA_MAX_POLL_TIME_SECS (20)
140
141/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
142 * how many times to retry TEST UNIT READY on a device
143 * while waiting for it to become ready before giving up.
144 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
145 * between sending TURs while waiting for a device
146 * to become ready.
147 */
148#define HPSA_TUR_RETRY_LIMIT (20)
149#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
150
151/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
152 * to become ready, in seconds, before giving up on it.
153 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
154 * between polling the board to see if it is ready, in
155 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
156 * HPSA_BOARD_READY_ITERATIONS are derived from those.
157 */
158#define HPSA_BOARD_READY_WAIT_SECS (120)
159#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
160#define HPSA_BOARD_READY_POLL_INTERVAL \
161 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
162#define HPSA_BOARD_READY_ITERATIONS \
163 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
164 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
165#define HPSA_POST_RESET_PAUSE_MSECS (3000)
166#define HPSA_POST_RESET_NOOP_RETRIES (12)
167
168/* Defining the diffent access_menthods */
169/*
170 * Memory mapped FIFO interface (SMART 53xx cards)
171 */
172#define SA5_DOORBELL 0x20
173#define SA5_REQUEST_PORT_OFFSET 0x40
174#define SA5_REPLY_INTR_MASK_OFFSET 0x34
175#define SA5_REPLY_PORT_OFFSET 0x44
176#define SA5_INTR_STATUS 0x30
177#define SA5_SCRATCHPAD_OFFSET 0xB0
178
179#define SA5_CTCFG_OFFSET 0xB4
180#define SA5_CTMEM_OFFSET 0xB8
181
182#define SA5_INTR_OFF 0x08
183#define SA5B_INTR_OFF 0x04
184#define SA5_INTR_PENDING 0x08
185#define SA5B_INTR_PENDING 0x04
186#define FIFO_EMPTY 0xffffffff
187#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
188
189#define HPSA_ERROR_BIT 0x02
190
191/* Performant mode flags */
192#define SA5_PERF_INTR_PENDING 0x04
193#define SA5_PERF_INTR_OFF 0x05
194#define SA5_OUTDB_STATUS_PERF_BIT 0x01
195#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
196#define SA5_OUTDB_CLEAR 0xA0
197#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
198#define SA5_OUTDB_STATUS 0x9C
199
200
201#define HPSA_INTR_ON 1
202#define HPSA_INTR_OFF 0
203/*
204 Send the command to the hardware
205*/
206static void SA5_submit_command(struct ctlr_info *h,
207 struct CommandList *c)
208{
209 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
210 c->Header.Tag.lower);
211 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
212 h->commands_outstanding++;
213 if (h->commands_outstanding > h->max_outstanding)
214 h->max_outstanding = h->commands_outstanding;
215}
216
217/*
218 * This card is the opposite of the other cards.
219 * 0 turns interrupts on...
220 * 0x08 turns them off...
221 */
222static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
223{
224 if (val) { /* Turn interrupts on */
225 h->interrupts_enabled = 1;
226 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
227 } else { /* Turn them off */
228 h->interrupts_enabled = 0;
229 writel(SA5_INTR_OFF,
230 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
231 }
232}
233
234static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
235{
236 if (val) { /* turn on interrupts */
237 h->interrupts_enabled = 1;
238 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
239 } else {
240 h->interrupts_enabled = 0;
241 writel(SA5_PERF_INTR_OFF,
242 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
243 }
244}
245
246static unsigned long SA5_performant_completed(struct ctlr_info *h)
247{
248 unsigned long register_value = FIFO_EMPTY;
249
250 /* flush the controller write of the reply queue by reading
251 * outbound doorbell status register.
252 */
253 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
254 /* msi auto clears the interrupt pending bit. */
255 if (!(h->msi_vector || h->msix_vector)) {
256 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
257 /* Do a read in order to flush the write to the controller
258 * (as per spec.)
259 */
260 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
261 }
262
263 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
264 register_value = *(h->reply_pool_head);
265 (h->reply_pool_head)++;
266 h->commands_outstanding--;
267 } else {
268 register_value = FIFO_EMPTY;
269 }
270 /* Check for wraparound */
271 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
272 h->reply_pool_head = h->reply_pool;
273 h->reply_pool_wraparound ^= 1;
274 }
275
276 return register_value;
277}
278
279/*
280 * Returns true if fifo is full.
281 *
282 */
283static unsigned long SA5_fifo_full(struct ctlr_info *h)
284{
285 if (h->commands_outstanding >= h->max_commands)
286 return 1;
287 else
288 return 0;
289
290}
291/*
292 * returns value read from hardware.
293 * returns FIFO_EMPTY if there is nothing to read
294 */
295static unsigned long SA5_completed(struct ctlr_info *h)
296{
297 unsigned long register_value
298 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
299
300 if (register_value != FIFO_EMPTY)
301 h->commands_outstanding--;
302
303#ifdef HPSA_DEBUG
304 if (register_value != FIFO_EMPTY)
305 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
306 register_value);
307 else
308 dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
309#endif
310
311 return register_value;
312}
313/*
314 * Returns true if an interrupt is pending..
315 */
316static bool SA5_intr_pending(struct ctlr_info *h)
317{
318 unsigned long register_value =
319 readl(h->vaddr + SA5_INTR_STATUS);
320 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
321 return register_value & SA5_INTR_PENDING;
322}
323
324static bool SA5_performant_intr_pending(struct ctlr_info *h)
325{
326 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
327
328 if (!register_value)
329 return false;
330
331 if (h->msi_vector || h->msix_vector)
332 return true;
333
334 /* Read outbound doorbell to flush */
335 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
336 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
337}
338
339static struct access_method SA5_access = {
340 SA5_submit_command,
341 SA5_intr_mask,
342 SA5_fifo_full,
343 SA5_intr_pending,
344 SA5_completed,
345};
346
347static struct access_method SA5_performant_access = {
348 SA5_submit_command,
349 SA5_performant_intr_mask,
350 SA5_fifo_full,
351 SA5_performant_intr_pending,
352 SA5_performant_completed,
353};
354
355struct board_type {
356 u32 board_id;
357 char *product_name;
358 struct access_method *access;
359};
360
361#endif /* HPSA_H */
362
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
new file mode 100644
index 000000000000..56fb9827681e
--- /dev/null
+++ b/drivers/scsi/hpsa_cmd.h
@@ -0,0 +1,378 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_CMD_H
22#define HPSA_CMD_H
23
24/* general boundary defintions */
25#define SENSEINFOBYTES 32 /* may vary between hbas */
26#define MAXSGENTRIES 32
27#define HPSA_SG_CHAIN 0x80000000
28#define MAXREPLYQS 256
29
30/* Command Status value */
31#define CMD_SUCCESS 0x0000
32#define CMD_TARGET_STATUS 0x0001
33#define CMD_DATA_UNDERRUN 0x0002
34#define CMD_DATA_OVERRUN 0x0003
35#define CMD_INVALID 0x0004
36#define CMD_PROTOCOL_ERR 0x0005
37#define CMD_HARDWARE_ERR 0x0006
38#define CMD_CONNECTION_LOST 0x0007
39#define CMD_ABORTED 0x0008
40#define CMD_ABORT_FAILED 0x0009
41#define CMD_UNSOLICITED_ABORT 0x000A
42#define CMD_TIMEOUT 0x000B
43#define CMD_UNABORTABLE 0x000C
44
45/* Unit Attentions ASC's as defined for the MSA2012sa */
46#define POWER_OR_RESET 0x29
47#define STATE_CHANGED 0x2a
48#define UNIT_ATTENTION_CLEARED 0x2f
49#define LUN_FAILED 0x3e
50#define REPORT_LUNS_CHANGED 0x3f
51
52/* Unit Attentions ASCQ's as defined for the MSA2012sa */
53
54 /* These ASCQ's defined for ASC = POWER_OR_RESET */
55#define POWER_ON_RESET 0x00
56#define POWER_ON_REBOOT 0x01
57#define SCSI_BUS_RESET 0x02
58#define MSA_TARGET_RESET 0x03
59#define CONTROLLER_FAILOVER 0x04
60#define TRANSCEIVER_SE 0x05
61#define TRANSCEIVER_LVD 0x06
62
63 /* These ASCQ's defined for ASC = STATE_CHANGED */
64#define RESERVATION_PREEMPTED 0x03
65#define ASYM_ACCESS_CHANGED 0x06
66#define LUN_CAPACITY_CHANGED 0x09
67
68/* transfer direction */
69#define XFER_NONE 0x00
70#define XFER_WRITE 0x01
71#define XFER_READ 0x02
72#define XFER_RSVD 0x03
73
74/* task attribute */
75#define ATTR_UNTAGGED 0x00
76#define ATTR_SIMPLE 0x04
77#define ATTR_HEADOFQUEUE 0x05
78#define ATTR_ORDERED 0x06
79#define ATTR_ACA 0x07
80
81/* cdb type */
82#define TYPE_CMD 0x00
83#define TYPE_MSG 0x01
84
85/* config space register offsets */
86#define CFG_VENDORID 0x00
87#define CFG_DEVICEID 0x02
88#define CFG_I2OBAR 0x10
89#define CFG_MEM1BAR 0x14
90
91/* i2o space register offsets */
92#define I2O_IBDB_SET 0x20
93#define I2O_IBDB_CLEAR 0x70
94#define I2O_INT_STATUS 0x30
95#define I2O_INT_MASK 0x34
96#define I2O_IBPOST_Q 0x40
97#define I2O_OBPOST_Q 0x44
98#define I2O_DMA1_CFG 0x214
99
100/* Configuration Table */
101#define CFGTBL_ChangeReq 0x00000001l
102#define CFGTBL_AccCmds 0x00000001l
103
104#define CFGTBL_Trans_Simple 0x00000002l
105#define CFGTBL_Trans_Performant 0x00000004l
106
107#define CFGTBL_BusType_Ultra2 0x00000001l
108#define CFGTBL_BusType_Ultra3 0x00000002l
109#define CFGTBL_BusType_Fibre1G 0x00000100l
110#define CFGTBL_BusType_Fibre2G 0x00000200l
111struct vals32 {
112 u32 lower;
113 u32 upper;
114};
115
116union u64bit {
117 struct vals32 val32;
118 u64 val;
119};
120
121/* FIXME this is a per controller value (barf!) */
122#define HPSA_MAX_TARGETS_PER_CTLR 16
123#define HPSA_MAX_LUN 256
124#define HPSA_MAX_PHYS_LUN 1024
125
126/* SCSI-3 Commands */
127#pragma pack(1)
128
129#define HPSA_INQUIRY 0x12
130struct InquiryData {
131 u8 data_byte[36];
132};
133
134#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
135#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
136struct ReportLUNdata {
137 u8 LUNListLength[4];
138 u32 reserved;
139 u8 LUN[HPSA_MAX_LUN][8];
140};
141
142struct ReportExtendedLUNdata {
143 u8 LUNListLength[4];
144 u8 extended_response_flag;
145 u8 reserved[3];
146 u8 LUN[HPSA_MAX_LUN][24];
147};
148
149struct SenseSubsystem_info {
150 u8 reserved[36];
151 u8 portname[8];
152 u8 reserved1[1108];
153};
154
155#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
156struct ReadCapdata {
157 u8 total_size[4]; /* Total size in blocks */
158 u8 block_size[4]; /* Size of blocks in bytes */
159};
160
161#if 0
162/* 12 byte commands not implemented in firmware yet. */
163#define HPSA_READ 0xa8
164#define HPSA_WRITE 0xaa
165#endif
166
167#define HPSA_READ 0x28 /* Read(10) */
168#define HPSA_WRITE 0x2a /* Write(10) */
169
170/* BMIC commands */
171#define BMIC_READ 0x26
172#define BMIC_WRITE 0x27
173#define BMIC_CACHE_FLUSH 0xc2
174#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
175
176/* Command List Structure */
177union SCSI3Addr {
178 struct {
179 u8 Dev;
180 u8 Bus:6;
181 u8 Mode:2; /* b00 */
182 } PeripDev;
183 struct {
184 u8 DevLSB;
185 u8 DevMSB:6;
186 u8 Mode:2; /* b01 */
187 } LogDev;
188 struct {
189 u8 Dev:5;
190 u8 Bus:3;
191 u8 Targ:6;
192 u8 Mode:2; /* b10 */
193 } LogUnit;
194};
195
196struct PhysDevAddr {
197 u32 TargetId:24;
198 u32 Bus:6;
199 u32 Mode:2;
200 /* 2 level target device addr */
201 union SCSI3Addr Target[2];
202};
203
204struct LogDevAddr {
205 u32 VolId:30;
206 u32 Mode:2;
207 u8 reserved[4];
208};
209
210union LUNAddr {
211 u8 LunAddrBytes[8];
212 union SCSI3Addr SCSI3Lun[4];
213 struct PhysDevAddr PhysDev;
214 struct LogDevAddr LogDev;
215};
216
217struct CommandListHeader {
218 u8 ReplyQueue;
219 u8 SGList;
220 u16 SGTotal;
221 struct vals32 Tag;
222 union LUNAddr LUN;
223};
224
225struct RequestBlock {
226 u8 CDBLen;
227 struct {
228 u8 Type:3;
229 u8 Attribute:3;
230 u8 Direction:2;
231 } Type;
232 u16 Timeout;
233 u8 CDB[16];
234};
235
236struct ErrDescriptor {
237 struct vals32 Addr;
238 u32 Len;
239};
240
241struct SGDescriptor {
242 struct vals32 Addr;
243 u32 Len;
244 u32 Ext;
245};
246
247union MoreErrInfo {
248 struct {
249 u8 Reserved[3];
250 u8 Type;
251 u32 ErrorInfo;
252 } Common_Info;
253 struct {
254 u8 Reserved[2];
255 u8 offense_size; /* size of offending entry */
256 u8 offense_num; /* byte # of offense 0-base */
257 u32 offense_value;
258 } Invalid_Cmd;
259};
260struct ErrorInfo {
261 u8 ScsiStatus;
262 u8 SenseLen;
263 u16 CommandStatus;
264 u32 ResidualCnt;
265 union MoreErrInfo MoreErrInfo;
266 u8 SenseInfo[SENSEINFOBYTES];
267};
268/* Command types */
269#define CMD_IOCTL_PEND 0x01
270#define CMD_SCSI 0x03
271
272/* This structure needs to be divisible by 32 for new
273 * indexing method and performant mode.
274 */
275#define PAD32 32
276#define PAD64DIFF 0
277#define USEEXTRA ((sizeof(void *) - 4)/4)
278#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
279
280#define DIRECT_LOOKUP_SHIFT 5
281#define DIRECT_LOOKUP_BIT 0x10
282
283#define HPSA_ERROR_BIT 0x02
284struct ctlr_info; /* defined in hpsa.h */
285/* The size of this structure needs to be divisible by 32
286 * on all architectures because low 5 bits of the addresses
287 * are used as follows:
288 *
289 * bit 0: to device, used to indicate "performant mode" command
290 * from device, indidcates error status.
291 * bit 1-3: to device, indicates block fetch table entry for
292 * reducing DMA in fetching commands from host memory.
293 * bit 4: used to indicate whether tag is "direct lookup" (index),
294 * or a bus address.
295 */
296
297struct CommandList {
298 struct CommandListHeader Header;
299 struct RequestBlock Request;
300 struct ErrDescriptor ErrDesc;
301 struct SGDescriptor SG[MAXSGENTRIES];
302 /* information associated with the command */
303 u32 busaddr; /* physical addr of this record */
304 struct ErrorInfo *err_info; /* pointer to the allocated mem */
305 struct ctlr_info *h;
306 int cmd_type;
307 long cmdindex;
308 struct hlist_node list;
309 struct request *rq;
310 struct completion *waiting;
311 void *scsi_cmd;
312
313/* on 64 bit architectures, to get this to be 32-byte-aligned
314 * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
315 * we need PAD_32 bytes of padding (see below). This does that.
316 * If it happens that 64 bit and 32 bit systems need different
317 * padding, PAD_32 and PAD_64 can be set independently, and.
318 * the code below will do the right thing.
319 */
320#define IS_32_BIT ((8 - sizeof(long))/4)
321#define IS_64_BIT (!IS_32_BIT)
322#define PAD_32 (4)
323#define PAD_64 (4)
324#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
325 u8 pad[COMMANDLIST_PAD];
326};
327
328/* Configuration Table Structure */
329struct HostWrite {
330 u32 TransportRequest;
331 u32 Reserved;
332 u32 CoalIntDelay;
333 u32 CoalIntCount;
334};
335
336#define SIMPLE_MODE 0x02
337#define PERFORMANT_MODE 0x04
338#define MEMQ_MODE 0x08
339
340struct CfgTable {
341 u8 Signature[4];
342 u32 SpecValence;
343 u32 TransportSupport;
344 u32 TransportActive;
345 struct HostWrite HostWrite;
346 u32 CmdsOutMax;
347 u32 BusTypes;
348 u32 TransMethodOffset;
349 u8 ServerName[16];
350 u32 HeartBeat;
351 u32 SCSI_Prefetch;
352 u32 MaxScatterGatherElements;
353 u32 MaxLogicalUnits;
354 u32 MaxPhysicalDevices;
355 u32 MaxPhysicalDrivesPerLogicalUnit;
356 u32 MaxPerformantModeCommands;
357};
358
359#define NUM_BLOCKFETCH_ENTRIES 8
360struct TransTable_struct {
361 u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
362 u32 RepQSize;
363 u32 RepQCount;
364 u32 RepQCtrAddrLow32;
365 u32 RepQCtrAddrHigh32;
366 u32 RepQAddr0Low32;
367 u32 RepQAddr0High32;
368};
369
370struct hpsa_pci_info {
371 unsigned char bus;
372 unsigned char dev_fn;
373 unsigned short domain;
374 u32 board_id;
375};
376
377#pragma pack()
378#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index a0e7e711ff9d..645f7cdf21ab 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -25,6 +25,7 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/gfp.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/io.h> 30#include <asm/io.h>
30#include <asm/div64.h> 31#include <asm/div64.h>
@@ -834,7 +835,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
834 atomic_read(&hba->resetting) == 0, 60 * HZ); 835 atomic_read(&hba->resetting) == 0, 60 * HZ);
835 836
836 if (atomic_read(&hba->resetting)) { 837 if (atomic_read(&hba->resetting)) {
837 /* IOP is in unkown state, abort reset */ 838 /* IOP is in unknown state, abort reset */
838 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); 839 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
839 return -1; 840 return -1;
840 } 841 }
@@ -861,10 +862,13 @@ static int hptiop_reset(struct scsi_cmnd *scp)
861} 862}
862 863
863static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, 864static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
864 int queue_depth) 865 int queue_depth, int reason)
865{ 866{
866 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; 867 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
867 868
869 if (reason != SCSI_QDEPTH_DEFAULT)
870 return -EOPNOTSUPP;
871
868 if (queue_depth > hba->max_requests) 872 if (queue_depth > hba->max_requests)
869 queue_depth = hba->max_requests; 873 queue_depth = hba->max_requests;
870 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 874 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 9c1e6a5b5af0..9a4b69d4f4eb 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -2336,7 +2336,7 @@ static int option_setup(char *str)
2336 char *cur = str; 2336 char *cur = str;
2337 int i = 1; 2337 int i = 1;
2338 2338
2339 while (cur && isdigit(*cur) && i <= IM_MAX_HOSTS) { 2339 while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) {
2340 ints[i++] = simple_strtoul(cur, NULL, 0); 2340 ints[i++] = simple_strtoul(cur, NULL, 0);
2341 if ((cur = strchr(cur, ',')) != NULL) 2341 if ((cur = strchr(cur, ',')) != NULL)
2342 cur++; 2342 cur++;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index bb2c696c006a..c2eea711a5ce 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -28,7 +28,9 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/kthread.h> 30#include <linux/kthread.h>
31#include <linux/slab.h>
31#include <linux/of.h> 32#include <linux/of.h>
33#include <linux/pm.h>
32#include <linux/stringify.h> 34#include <linux/stringify.h>
33#include <asm/firmware.h> 35#include <asm/firmware.h>
34#include <asm/irq.h> 36#include <asm/irq.h>
@@ -39,6 +41,7 @@
39#include <scsi/scsi_device.h> 41#include <scsi/scsi_device.h>
40#include <scsi/scsi_tcq.h> 42#include <scsi/scsi_tcq.h>
41#include <scsi/scsi_transport_fc.h> 43#include <scsi/scsi_transport_fc.h>
44#include <scsi/scsi_bsg_fc.h>
42#include "ibmvfc.h" 45#include "ibmvfc.h"
43 46
44static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; 47static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
@@ -558,12 +561,11 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
558/** 561/**
559 * ibmvfc_init_host - Start host initialization 562 * ibmvfc_init_host - Start host initialization
560 * @vhost: ibmvfc host struct 563 * @vhost: ibmvfc host struct
561 * @relogin: is this a re-login?
562 * 564 *
563 * Return value: 565 * Return value:
564 * nothing 566 * nothing
565 **/ 567 **/
566static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) 568static void ibmvfc_init_host(struct ibmvfc_host *vhost)
567{ 569{
568 struct ibmvfc_target *tgt; 570 struct ibmvfc_target *tgt;
569 571
@@ -577,10 +579,8 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
577 } 579 }
578 580
579 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { 581 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
580 if (!relogin) { 582 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
581 memset(vhost->async_crq.msgs, 0, PAGE_SIZE); 583 vhost->async_crq.cur = 0;
582 vhost->async_crq.cur = 0;
583 }
584 584
585 list_for_each_entry(tgt, &vhost->targets, queue) 585 list_for_each_entry(tgt, &vhost->targets, queue)
586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
@@ -1678,6 +1678,276 @@ static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1678} 1678}
1679 1679
1680/** 1680/**
1681 * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1682 * @evt: struct ibmvfc_event
1683 *
1684 **/
1685static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1686{
1687 struct ibmvfc_host *vhost = evt->vhost;
1688
1689 ibmvfc_free_event(evt);
1690 vhost->aborting_passthru = 0;
1691 dev_info(vhost->dev, "Passthru command cancelled\n");
1692}
1693
1694/**
1695 * ibmvfc_bsg_timeout - Handle a BSG timeout
1696 * @job: struct fc_bsg_job that timed out
1697 *
1698 * Returns:
1699 * 0 on success / other on failure
1700 **/
1701static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
1702{
1703 struct ibmvfc_host *vhost = shost_priv(job->shost);
1704 unsigned long port_id = (unsigned long)job->dd_data;
1705 struct ibmvfc_event *evt;
1706 struct ibmvfc_tmf *tmf;
1707 unsigned long flags;
1708 int rc;
1709
1710 ENTER;
1711 spin_lock_irqsave(vhost->host->host_lock, flags);
1712 if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1713 __ibmvfc_reset_host(vhost);
1714 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1715 return 0;
1716 }
1717
1718 vhost->aborting_passthru = 1;
1719 evt = ibmvfc_get_event(vhost);
1720 ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1721
1722 tmf = &evt->iu.tmf;
1723 memset(tmf, 0, sizeof(*tmf));
1724 tmf->common.version = 1;
1725 tmf->common.opcode = IBMVFC_TMF_MAD;
1726 tmf->common.length = sizeof(*tmf);
1727 tmf->scsi_id = port_id;
1728 tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
1729 tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY;
1730 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1731
1732 if (rc != 0) {
1733 vhost->aborting_passthru = 0;
1734 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
1735 rc = -EIO;
1736 } else
1737 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
1738 port_id);
1739
1740 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1741
1742 LEAVE;
1743 return rc;
1744}
1745
1746/**
1747 * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
1748 * @vhost: struct ibmvfc_host to send command
1749 * @port_id: port ID to send command
1750 *
1751 * Returns:
1752 * 0 on success / other on failure
1753 **/
1754static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
1755{
1756 struct ibmvfc_port_login *plogi;
1757 struct ibmvfc_target *tgt;
1758 struct ibmvfc_event *evt;
1759 union ibmvfc_iu rsp_iu;
1760 unsigned long flags;
1761 int rc = 0, issue_login = 1;
1762
1763 ENTER;
1764 spin_lock_irqsave(vhost->host->host_lock, flags);
1765 list_for_each_entry(tgt, &vhost->targets, queue) {
1766 if (tgt->scsi_id == port_id) {
1767 issue_login = 0;
1768 break;
1769 }
1770 }
1771
1772 if (!issue_login)
1773 goto unlock_out;
1774 if (unlikely((rc = ibmvfc_host_chkready(vhost))))
1775 goto unlock_out;
1776
1777 evt = ibmvfc_get_event(vhost);
1778 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1779 plogi = &evt->iu.plogi;
1780 memset(plogi, 0, sizeof(*plogi));
1781 plogi->common.version = 1;
1782 plogi->common.opcode = IBMVFC_PORT_LOGIN;
1783 plogi->common.length = sizeof(*plogi);
1784 plogi->scsi_id = port_id;
1785 evt->sync_iu = &rsp_iu;
1786 init_completion(&evt->comp);
1787
1788 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1789 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1790
1791 if (rc)
1792 return -EIO;
1793
1794 wait_for_completion(&evt->comp);
1795
1796 if (rsp_iu.plogi.common.status)
1797 rc = -EIO;
1798
1799 spin_lock_irqsave(vhost->host->host_lock, flags);
1800 ibmvfc_free_event(evt);
1801unlock_out:
1802 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1803 LEAVE;
1804 return rc;
1805}
1806
1807/**
1808 * ibmvfc_bsg_request - Handle a BSG request
1809 * @job: struct fc_bsg_job to be executed
1810 *
1811 * Returns:
1812 * 0 on success / other on failure
1813 **/
1814static int ibmvfc_bsg_request(struct fc_bsg_job *job)
1815{
1816 struct ibmvfc_host *vhost = shost_priv(job->shost);
1817 struct fc_rport *rport = job->rport;
1818 struct ibmvfc_passthru_mad *mad;
1819 struct ibmvfc_event *evt;
1820 union ibmvfc_iu rsp_iu;
1821 unsigned long flags, port_id = -1;
1822 unsigned int code = job->request->msgcode;
1823 int rc = 0, req_seg, rsp_seg, issue_login = 0;
1824 u32 fc_flags, rsp_len;
1825
1826 ENTER;
1827 job->reply->reply_payload_rcv_len = 0;
1828 if (rport)
1829 port_id = rport->port_id;
1830
1831 switch (code) {
1832 case FC_BSG_HST_ELS_NOLOGIN:
1833 port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
1834 (job->request->rqst_data.h_els.port_id[1] << 8) |
1835 job->request->rqst_data.h_els.port_id[2];
1836 case FC_BSG_RPT_ELS:
1837 fc_flags = IBMVFC_FC_ELS;
1838 break;
1839 case FC_BSG_HST_CT:
1840 issue_login = 1;
1841 port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
1842 (job->request->rqst_data.h_ct.port_id[1] << 8) |
1843 job->request->rqst_data.h_ct.port_id[2];
1844 case FC_BSG_RPT_CT:
1845 fc_flags = IBMVFC_FC_CT_IU;
1846 break;
1847 default:
1848 return -ENOTSUPP;
1849 };
1850
1851 if (port_id == -1)
1852 return -EINVAL;
1853 if (!mutex_trylock(&vhost->passthru_mutex))
1854 return -EBUSY;
1855
1856 job->dd_data = (void *)port_id;
1857 req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
1858 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1859
1860 if (!req_seg) {
1861 mutex_unlock(&vhost->passthru_mutex);
1862 return -ENOMEM;
1863 }
1864
1865 rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
1866 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1867
1868 if (!rsp_seg) {
1869 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1870 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1871 mutex_unlock(&vhost->passthru_mutex);
1872 return -ENOMEM;
1873 }
1874
1875 if (req_seg > 1 || rsp_seg > 1) {
1876 rc = -EINVAL;
1877 goto out;
1878 }
1879
1880 if (issue_login)
1881 rc = ibmvfc_bsg_plogi(vhost, port_id);
1882
1883 spin_lock_irqsave(vhost->host->host_lock, flags);
1884
1885 if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
1886 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1887 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1888 goto out;
1889 }
1890
1891 evt = ibmvfc_get_event(vhost);
1892 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1893 mad = &evt->iu.passthru;
1894
1895 memset(mad, 0, sizeof(*mad));
1896 mad->common.version = 1;
1897 mad->common.opcode = IBMVFC_PASSTHRU;
1898 mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
1899
1900 mad->cmd_ioba.va = (u64)evt->crq.ioba +
1901 offsetof(struct ibmvfc_passthru_mad, iu);
1902 mad->cmd_ioba.len = sizeof(mad->iu);
1903
1904 mad->iu.cmd_len = job->request_payload.payload_len;
1905 mad->iu.rsp_len = job->reply_payload.payload_len;
1906 mad->iu.flags = fc_flags;
1907 mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
1908
1909 mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list);
1910 mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list);
1911 mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list);
1912 mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list);
1913 mad->iu.scsi_id = port_id;
1914 mad->iu.tag = (u64)evt;
1915 rsp_len = mad->iu.rsp.len;
1916
1917 evt->sync_iu = &rsp_iu;
1918 init_completion(&evt->comp);
1919 rc = ibmvfc_send_event(evt, vhost, 0);
1920 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1921
1922 if (rc) {
1923 rc = -EIO;
1924 goto out;
1925 }
1926
1927 wait_for_completion(&evt->comp);
1928
1929 if (rsp_iu.passthru.common.status)
1930 rc = -EIO;
1931 else
1932 job->reply->reply_payload_rcv_len = rsp_len;
1933
1934 spin_lock_irqsave(vhost->host->host_lock, flags);
1935 ibmvfc_free_event(evt);
1936 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1937 job->reply->result = rc;
1938 job->job_done(job);
1939 rc = 0;
1940out:
1941 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1942 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1943 dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
1944 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1945 mutex_unlock(&vhost->passthru_mutex);
1946 LEAVE;
1947 return rc;
1948}
1949
1950/**
1681 * ibmvfc_reset_device - Reset the device with the specified reset type 1951 * ibmvfc_reset_device - Reset the device with the specified reset type
1682 * @sdev: scsi device to reset 1952 * @sdev: scsi device to reset
1683 * @type: reset type 1953 * @type: reset type
@@ -1731,7 +2001,10 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1731 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc); 2001 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
1732 wait_for_completion(&evt->comp); 2002 wait_for_completion(&evt->comp);
1733 2003
1734 if (rsp_iu.cmd.status) { 2004 if (rsp_iu.cmd.status)
2005 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2006
2007 if (rsp_code) {
1735 if (fc_rsp->flags & FCP_RSP_LEN_VALID) 2008 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1736 rsp_code = fc_rsp->data.info.rsp_code; 2009 rsp_code = fc_rsp->data.info.rsp_code;
1737 2010
@@ -1820,7 +2093,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1820 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); 2093 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
1821 wait_for_completion(&evt->comp); 2094 wait_for_completion(&evt->comp);
1822 2095
1823 if (rsp_iu.cmd.status) { 2096 if (rsp_iu.cmd.status)
2097 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2098
2099 if (rsp_code) {
1824 if (fc_rsp->flags & FCP_RSP_LEN_VALID) 2100 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1825 rsp_code = fc_rsp->data.info.rsp_code; 2101 rsp_code = fc_rsp->data.info.rsp_code;
1826 2102
@@ -2061,12 +2337,24 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2061} 2337}
2062 2338
2063/** 2339/**
2064 * ibmvfc_dev_cancel_all - Device iterated cancel all function 2340 * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function
2341 * @sdev: scsi device struct
2342 * @data: return code
2343 *
2344 **/
2345static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data)
2346{
2347 unsigned long *rc = data;
2348 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2349}
2350
2351/**
2352 * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2065 * @sdev: scsi device struct 2353 * @sdev: scsi device struct
2066 * @data: return code 2354 * @data: return code
2067 * 2355 *
2068 **/ 2356 **/
2069static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data) 2357static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2070{ 2358{
2071 unsigned long *rc = data; 2359 unsigned long *rc = data;
2072 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); 2360 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
@@ -2102,7 +2390,7 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2102 2390
2103 ENTER; 2391 ENTER;
2104 ibmvfc_wait_while_resetting(vhost); 2392 ibmvfc_wait_while_resetting(vhost);
2105 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2393 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2106 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); 2394 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2107 2395
2108 if (!cancel_rc && !reset_rc) 2396 if (!cancel_rc && !reset_rc)
@@ -2144,7 +2432,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2144 int rc = FAILED; 2432 int rc = FAILED;
2145 2433
2146 ENTER; 2434 ENTER;
2147 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2435 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts);
2148 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); 2436 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
2149 2437
2150 if (!cancel_rc && !abort_rc) 2438 if (!cancel_rc && !abort_rc)
@@ -2297,13 +2585,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2297 /* Send back a response */ 2585 /* Send back a response */
2298 rc = ibmvfc_send_crq_init_complete(vhost); 2586 rc = ibmvfc_send_crq_init_complete(vhost);
2299 if (rc == 0) 2587 if (rc == 0)
2300 ibmvfc_init_host(vhost, 0); 2588 ibmvfc_init_host(vhost);
2301 else 2589 else
2302 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); 2590 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2303 break; 2591 break;
2304 case IBMVFC_CRQ_INIT_COMPLETE: 2592 case IBMVFC_CRQ_INIT_COMPLETE:
2305 dev_info(vhost->dev, "Partner initialization complete\n"); 2593 dev_info(vhost->dev, "Partner initialization complete\n");
2306 ibmvfc_init_host(vhost, 0); 2594 ibmvfc_init_host(vhost);
2307 break; 2595 break;
2308 default: 2596 default:
2309 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); 2597 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
@@ -2478,12 +2766,17 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
2478 * ibmvfc_change_queue_depth - Change the device's queue depth 2766 * ibmvfc_change_queue_depth - Change the device's queue depth
2479 * @sdev: scsi device struct 2767 * @sdev: scsi device struct
2480 * @qdepth: depth to set 2768 * @qdepth: depth to set
2769 * @reason: calling context
2481 * 2770 *
2482 * Return value: 2771 * Return value:
2483 * actual depth set 2772 * actual depth set
2484 **/ 2773 **/
2485static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) 2774static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth,
2775 int reason)
2486{ 2776{
2777 if (reason != SCSI_QDEPTH_DEFAULT)
2778 return -EOPNOTSUPP;
2779
2487 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) 2780 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2488 qdepth = IBMVFC_MAX_CMDS_PER_LUN; 2781 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2489 2782
@@ -3725,7 +4018,7 @@ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
3725 case IBMVFC_MAD_SUCCESS: 4018 case IBMVFC_MAD_SUCCESS:
3726 if (list_empty(&vhost->sent) && 4019 if (list_empty(&vhost->sent) &&
3727 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { 4020 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
3728 ibmvfc_init_host(vhost, 0); 4021 ibmvfc_init_host(vhost);
3729 return; 4022 return;
3730 } 4023 }
3731 break; 4024 break;
@@ -3903,6 +4196,8 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3903 rport->supported_classes |= FC_COS_CLASS2; 4196 rport->supported_classes |= FC_COS_CLASS2;
3904 if (tgt->service_parms.class3_parms[0] & 0x80000000) 4197 if (tgt->service_parms.class3_parms[0] & 0x80000000)
3905 rport->supported_classes |= FC_COS_CLASS3; 4198 rport->supported_classes |= FC_COS_CLASS3;
4199 if (rport->rqst_q)
4200 blk_queue_max_segments(rport->rqst_q, 1);
3906 } else 4201 } else
3907 tgt_dbg(tgt, "rport add failed\n"); 4202 tgt_dbg(tgt, "rport add failed\n");
3908 spin_unlock_irqrestore(vhost->host->host_lock, flags); 4203 spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -4342,6 +4637,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4342 init_waitqueue_head(&vhost->work_wait_q); 4637 init_waitqueue_head(&vhost->work_wait_q);
4343 init_waitqueue_head(&vhost->init_wait_q); 4638 init_waitqueue_head(&vhost->init_wait_q);
4344 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); 4639 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4640 mutex_init(&vhost->passthru_mutex);
4345 4641
4346 if ((rc = ibmvfc_alloc_mem(vhost))) 4642 if ((rc = ibmvfc_alloc_mem(vhost)))
4347 goto free_scsi_host; 4643 goto free_scsi_host;
@@ -4374,6 +4670,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4374 goto remove_shost; 4670 goto remove_shost;
4375 } 4671 }
4376 4672
4673 if (shost_to_fc_host(shost)->rqst_q)
4674 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
4377 dev_set_drvdata(dev, vhost); 4675 dev_set_drvdata(dev, vhost);
4378 spin_lock(&ibmvfc_driver_lock); 4676 spin_lock(&ibmvfc_driver_lock);
4379 list_add_tail(&vhost->queue, &ibmvfc_head); 4677 list_add_tail(&vhost->queue, &ibmvfc_head);
@@ -4414,7 +4712,11 @@ static int ibmvfc_remove(struct vio_dev *vdev)
4414 4712
4415 ENTER; 4713 ENTER;
4416 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); 4714 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
4715
4716 spin_lock_irqsave(vhost->host->host_lock, flags);
4417 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 4717 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
4718 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4719
4418 ibmvfc_wait_while_resetting(vhost); 4720 ibmvfc_wait_while_resetting(vhost);
4419 ibmvfc_release_crq_queue(vhost); 4721 ibmvfc_release_crq_queue(vhost);
4420 kthread_stop(vhost->work_thread); 4722 kthread_stop(vhost->work_thread);
@@ -4436,6 +4738,27 @@ static int ibmvfc_remove(struct vio_dev *vdev)
4436} 4738}
4437 4739
4438/** 4740/**
4741 * ibmvfc_resume - Resume from suspend
4742 * @dev: device struct
4743 *
4744 * We may have lost an interrupt across suspend/resume, so kick the
4745 * interrupt handler
4746 *
4747 */
4748static int ibmvfc_resume(struct device *dev)
4749{
4750 unsigned long flags;
4751 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
4752 struct vio_dev *vdev = to_vio_dev(dev);
4753
4754 spin_lock_irqsave(vhost->host->host_lock, flags);
4755 vio_disable_interrupts(vdev);
4756 tasklet_schedule(&vhost->tasklet);
4757 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4758 return 0;
4759}
4760
4761/**
4439 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver 4762 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
4440 * @vdev: vio device struct 4763 * @vdev: vio device struct
4441 * 4764 *
@@ -4455,6 +4778,10 @@ static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
4455}; 4778};
4456MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); 4779MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
4457 4780
4781static struct dev_pm_ops ibmvfc_pm_ops = {
4782 .resume = ibmvfc_resume
4783};
4784
4458static struct vio_driver ibmvfc_driver = { 4785static struct vio_driver ibmvfc_driver = {
4459 .id_table = ibmvfc_device_table, 4786 .id_table = ibmvfc_device_table,
4460 .probe = ibmvfc_probe, 4787 .probe = ibmvfc_probe,
@@ -4463,6 +4790,7 @@ static struct vio_driver ibmvfc_driver = {
4463 .driver = { 4790 .driver = {
4464 .name = IBMVFC_NAME, 4791 .name = IBMVFC_NAME,
4465 .owner = THIS_MODULE, 4792 .owner = THIS_MODULE,
4793 .pm = &ibmvfc_pm_ops,
4466 } 4794 }
4467}; 4795};
4468 4796
@@ -4498,6 +4826,9 @@ static struct fc_function_template ibmvfc_transport_functions = {
4498 4826
4499 .get_starget_port_id = ibmvfc_get_starget_port_id, 4827 .get_starget_port_id = ibmvfc_get_starget_port_id,
4500 .show_starget_port_id = 1, 4828 .show_starget_port_id = 1,
4829
4830 .bsg_request = ibmvfc_bsg_request,
4831 .bsg_timeout = ibmvfc_bsg_timeout,
4501}; 4832};
4502 4833
4503/** 4834/**
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 007fa1c9ef14..d25106a958d7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.6" 32#define IBMVFC_DRIVER_VERSION "1.0.7"
33#define IBMVFC_DRIVER_DATE "(May 28, 2009)" 33#define IBMVFC_DRIVER_DATE "(October 16, 2009)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -58,9 +58,10 @@
58 * 1 for ERP 58 * 1 for ERP
59 * 1 for initialization 59 * 1 for initialization
60 * 1 for NPIV Logout 60 * 1 for NPIV Logout
61 * 2 for BSG passthru
61 * 2 for each discovery thread 62 * 2 for each discovery thread
62 */ 63 */
63#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) 64#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + 2 + (disc_threads * 2))
64 65
65#define IBMVFC_MAD_SUCCESS 0x00 66#define IBMVFC_MAD_SUCCESS 0x00
66#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 67#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -466,7 +467,10 @@ struct ibmvfc_passthru_iu {
466 u16 error; 467 u16 error;
467 u32 flags; 468 u32 flags;
468#define IBMVFC_FC_ELS 0x01 469#define IBMVFC_FC_ELS 0x01
470#define IBMVFC_FC_CT_IU 0x02
469 u32 cancel_key; 471 u32 cancel_key;
472#define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000
473#define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001
470 u32 reserved; 474 u32 reserved;
471 struct srp_direct_buf cmd; 475 struct srp_direct_buf cmd;
472 struct srp_direct_buf rsp; 476 struct srp_direct_buf rsp;
@@ -693,6 +697,7 @@ struct ibmvfc_host {
693 int disc_buf_sz; 697 int disc_buf_sz;
694 int log_level; 698 int log_level;
695 struct ibmvfc_discover_targets_buf *disc_buf; 699 struct ibmvfc_discover_targets_buf *disc_buf;
700 struct mutex passthru_mutex;
696 int task_set; 701 int task_set;
697 int init_retries; 702 int init_retries;
698 int discovery_threads; 703 int discovery_threads;
@@ -702,6 +707,7 @@ struct ibmvfc_host {
702 int delay_init; 707 int delay_init;
703 int scan_complete; 708 int scan_complete;
704 int logged_in; 709 int logged_in;
710 int aborting_passthru;
705 int events_to_log; 711 int events_to_log;
706#define IBMVFC_AE_LINKUP 0x0001 712#define IBMVFC_AE_LINKUP 0x0001
707#define IBMVFC_AE_LINKDOWN 0x0002 713#define IBMVFC_AE_LINKDOWN 0x0002
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9b0e9d31983..88bad0e81bdd 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -40,7 +40,7 @@
40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's
41 * Senders cannot access the buffer directly, but send messages by 41 * Senders cannot access the buffer directly, but send messages by
42 * making a hypervisor call and passing in the 16 bytes. The hypervisor 42 * making a hypervisor call and passing in the 16 bytes. The hypervisor
43 * puts the message in the next 16 byte space in round-robbin fashion, 43 * puts the message in the next 16 byte space in round-robin fashion,
44 * turns on the high order bit of the message (the valid bit), and 44 * turns on the high order bit of the message (the valid bit), and
45 * generates an interrupt to the receiver (if interrupts are turned on.) 45 * generates an interrupt to the receiver (if interrupts are turned on.)
46 * The receiver just turns off the valid bit when they have copied out 46 * The receiver just turns off the valid bit when they have copied out
@@ -70,7 +70,9 @@
70#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/dma-mapping.h> 71#include <linux/dma-mapping.h>
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/slab.h>
73#include <linux/of.h> 74#include <linux/of.h>
75#include <linux/pm.h>
74#include <asm/firmware.h> 76#include <asm/firmware.h>
75#include <asm/vio.h> 77#include <asm/vio.h>
76#include <scsi/scsi.h> 78#include <scsi/scsi.h>
@@ -321,16 +323,6 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
321 srp_cmd->buf_fmt = fmt; 323 srp_cmd->buf_fmt = fmt;
322} 324}
323 325
324static void unmap_sg_list(int num_entries,
325 struct device *dev,
326 struct srp_direct_buf *md)
327{
328 int i;
329
330 for (i = 0; i < num_entries; ++i)
331 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
332}
333
334/** 326/**
335 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 327 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
336 * @cmd: srp_cmd whose additional_data member will be unmapped 328 * @cmd: srp_cmd whose additional_data member will be unmapped
@@ -348,24 +340,9 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
348 340
349 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) 341 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
350 return; 342 return;
351 else if (out_fmt == SRP_DATA_DESC_DIRECT ||
352 in_fmt == SRP_DATA_DESC_DIRECT) {
353 struct srp_direct_buf *data =
354 (struct srp_direct_buf *) cmd->add_data;
355 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
356 } else {
357 struct srp_indirect_buf *indirect =
358 (struct srp_indirect_buf *) cmd->add_data;
359 int num_mapped = indirect->table_desc.len /
360 sizeof(struct srp_direct_buf);
361 343
362 if (num_mapped <= MAX_INDIRECT_BUFS) { 344 if (evt_struct->cmnd)
363 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); 345 scsi_dma_unmap(evt_struct->cmnd);
364 return;
365 }
366
367 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
368 }
369} 346}
370 347
371static int map_sg_list(struct scsi_cmnd *cmd, int nseg, 348static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
@@ -1637,12 +1614,17 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1637 * ibmvscsi_change_queue_depth - Change the device's queue depth 1614 * ibmvscsi_change_queue_depth - Change the device's queue depth
1638 * @sdev: scsi device struct 1615 * @sdev: scsi device struct
1639 * @qdepth: depth to set 1616 * @qdepth: depth to set
1617 * @reason: calling context
1640 * 1618 *
1641 * Return value: 1619 * Return value:
1642 * actual depth set 1620 * actual depth set
1643 **/ 1621 **/
1644static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) 1622static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth,
1623 int reason)
1645{ 1624{
1625 if (reason != SCSI_QDEPTH_DEFAULT)
1626 return -EOPNOTSUPP;
1627
1646 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) 1628 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1647 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; 1629 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1648 1630
@@ -1986,6 +1968,19 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
1986} 1968}
1987 1969
1988/** 1970/**
1971 * ibmvscsi_resume: Resume from suspend
1972 * @dev: device struct
1973 *
1974 * We may have lost an interrupt across suspend/resume, so kick the
1975 * interrupt handler
1976 */
1977static int ibmvscsi_resume(struct device *dev)
1978{
1979 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
1980 return ibmvscsi_ops->resume(hostdata);
1981}
1982
1983/**
1989 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 1984 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
1990 * support. 1985 * support.
1991 */ 1986 */
@@ -1995,6 +1990,10 @@ static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1995}; 1990};
1996MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 1991MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
1997 1992
1993static struct dev_pm_ops ibmvscsi_pm_ops = {
1994 .resume = ibmvscsi_resume
1995};
1996
1998static struct vio_driver ibmvscsi_driver = { 1997static struct vio_driver ibmvscsi_driver = {
1999 .id_table = ibmvscsi_device_table, 1998 .id_table = ibmvscsi_device_table,
2000 .probe = ibmvscsi_probe, 1999 .probe = ibmvscsi_probe,
@@ -2003,6 +2002,7 @@ static struct vio_driver ibmvscsi_driver = {
2003 .driver = { 2002 .driver = {
2004 .name = "ibmvscsi", 2003 .name = "ibmvscsi",
2005 .owner = THIS_MODULE, 2004 .owner = THIS_MODULE,
2005 .pm = &ibmvscsi_pm_ops,
2006 } 2006 }
2007}; 2007};
2008 2008
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 76425303def0..9cb7c6a773e1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -120,6 +120,7 @@ struct ibmvscsi_ops {
120 struct ibmvscsi_host_data *hostdata); 120 struct ibmvscsi_host_data *hostdata);
121 int (*send_crq)(struct ibmvscsi_host_data *hostdata, 121 int (*send_crq)(struct ibmvscsi_host_data *hostdata,
122 u64 word1, u64 word2); 122 u64 word1, u64 word2);
123 int (*resume) (struct ibmvscsi_host_data *hostdata);
123}; 124};
124 125
125extern struct ibmvscsi_ops iseriesvscsi_ops; 126extern struct ibmvscsi_ops iseriesvscsi_ops;
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index d5eaf9727109..e2056d517e99 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -23,6 +23,7 @@
23 */ 23 */
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/slab.h>
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_transport_srp.h> 29#include <scsi/scsi_transport_srp.h>
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 0775fdee5fa8..f4776451a754 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -158,10 +158,16 @@ static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
158 0); 158 0);
159} 159}
160 160
161static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata)
162{
163 return 0;
164}
165
161struct ibmvscsi_ops iseriesvscsi_ops = { 166struct ibmvscsi_ops iseriesvscsi_ops = {
162 .init_crq_queue = iseriesvscsi_init_crq_queue, 167 .init_crq_queue = iseriesvscsi_init_crq_queue,
163 .release_crq_queue = iseriesvscsi_release_crq_queue, 168 .release_crq_queue = iseriesvscsi_release_crq_queue,
164 .reset_crq_queue = iseriesvscsi_reset_crq_queue, 169 .reset_crq_queue = iseriesvscsi_reset_crq_queue,
165 .reenable_crq_queue = iseriesvscsi_reenable_crq_queue, 170 .reenable_crq_queue = iseriesvscsi_reenable_crq_queue,
166 .send_crq = iseriesvscsi_send_crq, 171 .send_crq = iseriesvscsi_send_crq,
172 .resume = iseriesvscsi_resume,
167}; 173};
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 462a8574dad9..a864ccc0a342 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -32,6 +32,7 @@
32#include <asm/iommu.h> 32#include <asm/iommu.h>
33#include <asm/hvcall.h> 33#include <asm/hvcall.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/gfp.h>
35#include <linux/interrupt.h> 36#include <linux/interrupt.h>
36#include "ibmvscsi.h" 37#include "ibmvscsi.h"
37 38
@@ -334,10 +335,23 @@ static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
334 return rc; 335 return rc;
335} 336}
336 337
338/**
339 * rpavscsi_resume: - resume after suspend
340 * @hostdata: ibmvscsi_host_data of host
341 *
342 */
343static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
344{
345 vio_disable_interrupts(to_vio_dev(hostdata->dev));
346 tasklet_schedule(&hostdata->srp_task);
347 return 0;
348}
349
337struct ibmvscsi_ops rpavscsi_ops = { 350struct ibmvscsi_ops rpavscsi_ops = {
338 .init_crq_queue = rpavscsi_init_crq_queue, 351 .init_crq_queue = rpavscsi_init_crq_queue,
339 .release_crq_queue = rpavscsi_release_crq_queue, 352 .release_crq_queue = rpavscsi_release_crq_queue,
340 .reset_crq_queue = rpavscsi_reset_crq_queue, 353 .reset_crq_queue = rpavscsi_reset_crq_queue,
341 .reenable_crq_queue = rpavscsi_reenable_crq_queue, 354 .reenable_crq_queue = rpavscsi_reenable_crq_queue,
342 .send_crq = rpavscsi_send_crq, 355 .send_crq = rpavscsi_send_crq,
356 .resume = rpavscsi_resume,
343}; 357};
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index c2a9a13d788f..4734ab0b3ff6 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -15,6 +15,7 @@
15#include <linux/parport.h> 15#include <linux/parport.h>
16#include <linux/workqueue.h> 16#include <linux/workqueue.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/slab.h>
18#include <asm/io.h> 19#include <asm/io.h>
19 20
20#include <scsi/scsi.h> 21#include <scsi/scsi.h>
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 89a59484be02..a7714160fbc3 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -531,7 +531,7 @@ static void initio_read_eeprom(unsigned long base)
531 * initio_stop_bm - stop bus master 531 * initio_stop_bm - stop bus master
532 * @host: InitIO we are stopping 532 * @host: InitIO we are stopping
533 * 533 *
534 * Stop any pending DMA operation, aborting the DMA if neccessary 534 * Stop any pending DMA operation, aborting the DMA if necessary
535 */ 535 */
536 536
537static void initio_stop_bm(struct initio_host * host) 537static void initio_stop_bm(struct initio_host * host)
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 76d294fc7846..520461b9bc09 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -59,6 +59,7 @@
59#include <linux/types.h> 59#include <linux/types.h>
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/slab.h>
62#include <linux/ioport.h> 63#include <linux/ioport.h>
63#include <linux/delay.h> 64#include <linux/delay.h>
64#include <linux/pci.h> 65#include <linux/pci.h>
@@ -72,6 +73,8 @@
72#include <linux/moduleparam.h> 73#include <linux/moduleparam.h>
73#include <linux/libata.h> 74#include <linux/libata.h>
74#include <linux/hdreg.h> 75#include <linux/hdreg.h>
76#include <linux/reboot.h>
77#include <linux/stringify.h>
75#include <asm/io.h> 78#include <asm/io.h>
76#include <asm/irq.h> 79#include <asm/irq.h>
77#include <asm/processor.h> 80#include <asm/processor.h>
@@ -91,8 +94,8 @@ static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0; 94static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0; 95static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = 0; 96static unsigned int ipr_transop_timeout = 0;
94static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0; 97static unsigned int ipr_debug = 0;
98static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
96static unsigned int ipr_dual_ioa_raid = 1; 99static unsigned int ipr_dual_ioa_raid = 1;
97static DEFINE_SPINLOCK(ipr_driver_lock); 100static DEFINE_SPINLOCK(ipr_driver_lock);
98 101
@@ -104,13 +107,20 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104 { 107 {
105 .set_interrupt_mask_reg = 0x0022C, 108 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230, 109 .clr_interrupt_mask_reg = 0x00230,
110 .clr_interrupt_mask_reg32 = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C, 111 .sense_interrupt_mask_reg = 0x0022C,
112 .sense_interrupt_mask_reg32 = 0x0022C,
108 .clr_interrupt_reg = 0x00228, 113 .clr_interrupt_reg = 0x00228,
114 .clr_interrupt_reg32 = 0x00228,
109 .sense_interrupt_reg = 0x00224, 115 .sense_interrupt_reg = 0x00224,
116 .sense_interrupt_reg32 = 0x00224,
110 .ioarrin_reg = 0x00404, 117 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214, 118 .sense_uproc_interrupt_reg = 0x00214,
119 .sense_uproc_interrupt_reg32 = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214, 120 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218 121 .set_uproc_interrupt_reg32 = 0x00214,
122 .clr_uproc_interrupt_reg = 0x00218,
123 .clr_uproc_interrupt_reg32 = 0x00218
114 } 124 }
115 }, 125 },
116 { /* Snipe and Scamp */ 126 { /* Snipe and Scamp */
@@ -119,25 +129,59 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
119 { 129 {
120 .set_interrupt_mask_reg = 0x00288, 130 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C, 131 .clr_interrupt_mask_reg = 0x0028C,
132 .clr_interrupt_mask_reg32 = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288, 133 .sense_interrupt_mask_reg = 0x00288,
134 .sense_interrupt_mask_reg32 = 0x00288,
123 .clr_interrupt_reg = 0x00284, 135 .clr_interrupt_reg = 0x00284,
136 .clr_interrupt_reg32 = 0x00284,
124 .sense_interrupt_reg = 0x00280, 137 .sense_interrupt_reg = 0x00280,
138 .sense_interrupt_reg32 = 0x00280,
125 .ioarrin_reg = 0x00504, 139 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290, 140 .sense_uproc_interrupt_reg = 0x00290,
141 .sense_uproc_interrupt_reg32 = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290, 142 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294 143 .set_uproc_interrupt_reg32 = 0x00290,
144 .clr_uproc_interrupt_reg = 0x00294,
145 .clr_uproc_interrupt_reg32 = 0x00294
146 }
147 },
148 { /* CRoC */
149 .mailbox = 0x00040,
150 .cache_line_size = 0x20,
151 {
152 .set_interrupt_mask_reg = 0x00010,
153 .clr_interrupt_mask_reg = 0x00018,
154 .clr_interrupt_mask_reg32 = 0x0001C,
155 .sense_interrupt_mask_reg = 0x00010,
156 .sense_interrupt_mask_reg32 = 0x00014,
157 .clr_interrupt_reg = 0x00008,
158 .clr_interrupt_reg32 = 0x0000C,
159 .sense_interrupt_reg = 0x00000,
160 .sense_interrupt_reg32 = 0x00004,
161 .ioarrin_reg = 0x00070,
162 .sense_uproc_interrupt_reg = 0x00020,
163 .sense_uproc_interrupt_reg32 = 0x00024,
164 .set_uproc_interrupt_reg = 0x00020,
165 .set_uproc_interrupt_reg32 = 0x00024,
166 .clr_uproc_interrupt_reg = 0x00028,
167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C,
169 .dump_addr_reg = 0x00064,
170 .dump_data_reg = 0x00068
129 } 171 }
130 }, 172 },
131}; 173};
132 174
133static const struct ipr_chip_t ipr_chip[] = { 175static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 176 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, 180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
139 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, 181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
140 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } 182 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
141}; 185};
142 186
143static int ipr_max_bus_speeds [] = { 187static int ipr_max_bus_speeds [] = {
@@ -156,12 +200,13 @@ module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
156MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
157module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
158MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
159module_param_named(enable_cache, ipr_enable_cache, int, 0);
160MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
161module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
162MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
163module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
164MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
207module_param_named(max_devs, ipr_max_devs, int, 0);
208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
165MODULE_LICENSE("GPL"); 210MODULE_LICENSE("GPL");
166MODULE_VERSION(IPR_DRIVER_VERSION); 211MODULE_VERSION(IPR_DRIVER_VERSION);
167 212
@@ -180,6 +225,20 @@ struct ipr_error_table_t ipr_error_table[] = {
180 "FFFE: Soft device bus error recovered by the IOA"}, 225 "FFFE: Soft device bus error recovered by the IOA"},
181 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 226 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
182 "4101: Soft device bus fabric error"}, 227 "4101: Soft device bus fabric error"},
228 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229 "FFFC: Logical block guard error recovered by the device"},
230 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231 "FFFC: Logical block reference tag error recovered by the device"},
232 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233 "4171: Recovered scatter list tag / sequence number error"},
234 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
238 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFD: Recovered logical block reference tag error detected by the IOA"},
240 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "FFFD: Logical block guard error recovered by the IOA"},
183 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 242 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
184 "FFF9: Device sector reassign successful"}, 243 "FFF9: Device sector reassign successful"},
185 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, 244 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -236,12 +295,28 @@ struct ipr_error_table_t ipr_error_table[] = {
236 "3120: SCSI bus is not operational"}, 295 "3120: SCSI bus is not operational"},
237 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 296 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
238 "4100: Hard device bus fabric error"}, 297 "4100: Hard device bus fabric error"},
298 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299 "310C: Logical block guard error detected by the device"},
300 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301 "310C: Logical block reference tag error detected by the device"},
302 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303 "4170: Scatter list tag / sequence number error"},
304 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "8150: Logical block CRC error on IOA to Host transfer"},
306 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307 "4170: Logical block sequence number error on IOA to Host transfer"},
308 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310D: Logical block reference tag error detected by the IOA"},
310 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "310D: Logical block guard error detected by the IOA"},
239 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 312 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
240 "9000: IOA reserved area data check"}, 313 "9000: IOA reserved area data check"},
241 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 314 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
242 "9001: IOA reserved area invalid data pattern"}, 315 "9001: IOA reserved area invalid data pattern"},
243 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 316 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
244 "9002: IOA reserved area LRC error"}, 317 "9002: IOA reserved area LRC error"},
318 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319 "Hardware Error, IOA metadata access error"},
245 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 320 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
246 "102E: Out of alternate sectors for disk storage"}, 321 "102E: Out of alternate sectors for disk storage"},
247 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, 322 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
@@ -306,6 +381,8 @@ struct ipr_error_table_t ipr_error_table[] = {
306 "Illegal request, commands not allowed to this device"}, 381 "Illegal request, commands not allowed to this device"},
307 {0x05258100, 0, 0, 382 {0x05258100, 0, 0,
308 "Illegal request, command not allowed to a secondary adapter"}, 383 "Illegal request, command not allowed to a secondary adapter"},
384 {0x05258200, 0, 0,
385 "Illegal request, command not allowed to a non-optimized resource"},
309 {0x05260000, 0, 0, 386 {0x05260000, 0, 0,
310 "Illegal request, invalid field in parameter list"}, 387 "Illegal request, invalid field in parameter list"},
311 {0x05260100, 0, 0, 388 {0x05260100, 0, 0,
@@ -468,7 +545,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
468 trace_entry->time = jiffies; 545 trace_entry->time = jiffies;
469 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 546 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
470 trace_entry->type = type; 547 trace_entry->type = type;
471 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command; 548 if (ipr_cmd->ioa_cfg->sis64)
549 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550 else
551 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
472 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 552 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
473 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 553 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
474 trace_entry->u.add_data = add_data; 554 trace_entry->u.add_data = add_data;
@@ -488,16 +568,23 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
488{ 568{
489 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
490 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 570 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
491 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); 571 dma_addr_t dma_addr = ipr_cmd->dma_addr;
492 572
493 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 573 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
494 ioarcb->write_data_transfer_length = 0; 574 ioarcb->data_transfer_length = 0;
495 ioarcb->read_data_transfer_length = 0; 575 ioarcb->read_data_transfer_length = 0;
496 ioarcb->write_ioadl_len = 0; 576 ioarcb->ioadl_len = 0;
497 ioarcb->read_ioadl_len = 0; 577 ioarcb->read_ioadl_len = 0;
498 ioarcb->write_ioadl_addr = 578
499 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 579 if (ipr_cmd->ioa_cfg->sis64)
500 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 580 ioarcb->u.sis64_addr_data.data_ioadl_addr =
581 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
582 else {
583 ioarcb->write_ioadl_addr =
584 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
585 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
586 }
587
501 ioasa->ioasc = 0; 588 ioasa->ioasc = 0;
502 ioasa->residual_data_len = 0; 589 ioasa->residual_data_len = 0;
503 ioasa->u.gata.status = 0; 590 ioasa->u.gata.status = 0;
@@ -562,10 +649,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
562 ioa_cfg->allow_interrupts = 0; 649 ioa_cfg->allow_interrupts = 0;
563 650
564 /* Set interrupt mask to stop all new interrupts */ 651 /* Set interrupt mask to stop all new interrupts */
565 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 652 if (ioa_cfg->sis64)
653 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
654 else
655 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
566 656
567 /* Clear any pending interrupts */ 657 /* Clear any pending interrupts */
568 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg); 658 if (ioa_cfg->sis64)
659 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
660 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
569 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 661 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
570} 662}
571 663
@@ -693,6 +785,35 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
693} 785}
694 786
695/** 787/**
788 * ipr_send_command - Send driver initiated requests.
789 * @ipr_cmd: ipr command struct
790 *
791 * This function sends a command to the adapter using the correct write call.
792 * In the case of sis64, calculate the ioarcb size required. Then or in the
793 * appropriate bits.
794 *
795 * Return value:
796 * none
797 **/
798static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
799{
800 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
801 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
802
803 if (ioa_cfg->sis64) {
804 /* The default size is 256 bytes */
805 send_dma_addr |= 0x1;
806
807 /* If the number of ioadls * size of ioadl > 128 bytes,
808 then use a 512 byte ioarcb */
809 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
810 send_dma_addr |= 0x4;
811 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
812 } else
813 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
814}
815
816/**
696 * ipr_do_req - Send driver initiated requests. 817 * ipr_do_req - Send driver initiated requests.
697 * @ipr_cmd: ipr command struct 818 * @ipr_cmd: ipr command struct
698 * @done: done function 819 * @done: done function
@@ -724,8 +845,8 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
724 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 845 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
725 846
726 mb(); 847 mb();
727 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 848
728 ioa_cfg->regs.ioarrin_reg); 849 ipr_send_command(ipr_cmd);
729} 850}
730 851
731/** 852/**
@@ -747,6 +868,51 @@ static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
747} 868}
748 869
749/** 870/**
871 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
872 * @ipr_cmd: ipr command struct
873 * @dma_addr: dma address
874 * @len: transfer length
875 * @flags: ioadl flag value
876 *
877 * This function initializes an ioadl in the case where there is only a single
878 * descriptor.
879 *
880 * Return value:
881 * nothing
882 **/
883static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
884 u32 len, int flags)
885{
886 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
887 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
888
889 ipr_cmd->dma_use_sg = 1;
890
891 if (ipr_cmd->ioa_cfg->sis64) {
892 ioadl64->flags = cpu_to_be32(flags);
893 ioadl64->data_len = cpu_to_be32(len);
894 ioadl64->address = cpu_to_be64(dma_addr);
895
896 ipr_cmd->ioarcb.ioadl_len =
897 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
898 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
899 } else {
900 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
901 ioadl->address = cpu_to_be32(dma_addr);
902
903 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
904 ipr_cmd->ioarcb.read_ioadl_len =
905 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
906 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
907 } else {
908 ipr_cmd->ioarcb.ioadl_len =
909 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
910 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
911 }
912 }
913}
914
915/**
750 * ipr_send_blocking_cmd - Send command and sleep on its completion. 916 * ipr_send_blocking_cmd - Send command and sleep on its completion.
751 * @ipr_cmd: ipr command struct 917 * @ipr_cmd: ipr command struct
752 * @timeout_func: function to invoke if command times out 918 * @timeout_func: function to invoke if command times out
@@ -803,11 +969,8 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
803 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 969 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
804 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 970 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
805 971
806 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam)); 972 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
807 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 973 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
808 ipr_cmd->ioadl[0].flags_and_data_len =
809 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
810 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
811 974
812 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 975 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
813 ipr_cmd->done = ipr_process_ccn; 976 ipr_cmd->done = ipr_process_ccn;
@@ -817,22 +980,54 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
817 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 980 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
818 981
819 mb(); 982 mb();
820 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 983
821 ioa_cfg->regs.ioarrin_reg); 984 ipr_send_command(ipr_cmd);
822 } else { 985 } else {
823 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 986 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
824 } 987 }
825} 988}
826 989
827/** 990/**
991 * ipr_update_ata_class - Update the ata class in the resource entry
992 * @res: resource entry struct
993 * @proto: cfgte device bus protocol value
994 *
995 * Return value:
996 * none
997 **/
998static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
999{
1000 switch(proto) {
1001 case IPR_PROTO_SATA:
1002 case IPR_PROTO_SAS_STP:
1003 res->ata_class = ATA_DEV_ATA;
1004 break;
1005 case IPR_PROTO_SATA_ATAPI:
1006 case IPR_PROTO_SAS_STP_ATAPI:
1007 res->ata_class = ATA_DEV_ATAPI;
1008 break;
1009 default:
1010 res->ata_class = ATA_DEV_UNKNOWN;
1011 break;
1012 };
1013}
1014
1015/**
828 * ipr_init_res_entry - Initialize a resource entry struct. 1016 * ipr_init_res_entry - Initialize a resource entry struct.
829 * @res: resource entry struct 1017 * @res: resource entry struct
1018 * @cfgtew: config table entry wrapper struct
830 * 1019 *
831 * Return value: 1020 * Return value:
832 * none 1021 * none
833 **/ 1022 **/
834static void ipr_init_res_entry(struct ipr_resource_entry *res) 1023static void ipr_init_res_entry(struct ipr_resource_entry *res,
1024 struct ipr_config_table_entry_wrapper *cfgtew)
835{ 1025{
1026 int found = 0;
1027 unsigned int proto;
1028 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1029 struct ipr_resource_entry *gscsi_res = NULL;
1030
836 res->needs_sync_complete = 0; 1031 res->needs_sync_complete = 0;
837 res->in_erp = 0; 1032 res->in_erp = 0;
838 res->add_to_ml = 0; 1033 res->add_to_ml = 0;
@@ -840,6 +1035,205 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
840 res->resetting_device = 0; 1035 res->resetting_device = 0;
841 res->sdev = NULL; 1036 res->sdev = NULL;
842 res->sata_port = NULL; 1037 res->sata_port = NULL;
1038
1039 if (ioa_cfg->sis64) {
1040 proto = cfgtew->u.cfgte64->proto;
1041 res->res_flags = cfgtew->u.cfgte64->res_flags;
1042 res->qmodel = IPR_QUEUEING_MODEL64(res);
1043 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1044
1045 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1046 sizeof(res->res_path));
1047
1048 res->bus = 0;
1049 res->lun = scsilun_to_int(&res->dev_lun);
1050
1051 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1052 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1053 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1054 found = 1;
1055 res->target = gscsi_res->target;
1056 break;
1057 }
1058 }
1059 if (!found) {
1060 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1061 ioa_cfg->max_devs_supported);
1062 set_bit(res->target, ioa_cfg->target_ids);
1063 }
1064
1065 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1066 sizeof(res->dev_lun.scsi_lun));
1067 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1068 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1069 res->target = 0;
1070 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1071 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1072 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1073 ioa_cfg->max_devs_supported);
1074 set_bit(res->target, ioa_cfg->array_ids);
1075 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1076 res->bus = IPR_VSET_VIRTUAL_BUS;
1077 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1078 ioa_cfg->max_devs_supported);
1079 set_bit(res->target, ioa_cfg->vset_ids);
1080 } else {
1081 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1082 ioa_cfg->max_devs_supported);
1083 set_bit(res->target, ioa_cfg->target_ids);
1084 }
1085 } else {
1086 proto = cfgtew->u.cfgte->proto;
1087 res->qmodel = IPR_QUEUEING_MODEL(res);
1088 res->flags = cfgtew->u.cfgte->flags;
1089 if (res->flags & IPR_IS_IOA_RESOURCE)
1090 res->type = IPR_RES_TYPE_IOAFP;
1091 else
1092 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1093
1094 res->bus = cfgtew->u.cfgte->res_addr.bus;
1095 res->target = cfgtew->u.cfgte->res_addr.target;
1096 res->lun = cfgtew->u.cfgte->res_addr.lun;
1097 }
1098
1099 ipr_update_ata_class(res, proto);
1100}
1101
1102/**
1103 * ipr_is_same_device - Determine if two devices are the same.
1104 * @res: resource entry struct
1105 * @cfgtew: config table entry wrapper struct
1106 *
1107 * Return value:
1108 * 1 if the devices are the same / 0 otherwise
1109 **/
1110static int ipr_is_same_device(struct ipr_resource_entry *res,
1111 struct ipr_config_table_entry_wrapper *cfgtew)
1112{
1113 if (res->ioa_cfg->sis64) {
1114 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1115 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1116 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1117 sizeof(cfgtew->u.cfgte64->lun))) {
1118 return 1;
1119 }
1120 } else {
1121 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1122 res->target == cfgtew->u.cfgte->res_addr.target &&
1123 res->lun == cfgtew->u.cfgte->res_addr.lun)
1124 return 1;
1125 }
1126
1127 return 0;
1128}
1129
1130/**
1131 * ipr_format_resource_path - Format the resource path for printing.
1132 * @res_path: resource path
1133 * @buf: buffer
1134 *
1135 * Return value:
1136 * pointer to buffer
1137 **/
1138static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1139{
1140 int i;
1141
1142 sprintf(buffer, "%02X", res_path[0]);
1143 for (i=1; res_path[i] != 0xff; i++)
1144 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
1145
1146 return buffer;
1147}
1148
1149/**
1150 * ipr_update_res_entry - Update the resource entry.
1151 * @res: resource entry struct
1152 * @cfgtew: config table entry wrapper struct
1153 *
1154 * Return value:
1155 * none
1156 **/
1157static void ipr_update_res_entry(struct ipr_resource_entry *res,
1158 struct ipr_config_table_entry_wrapper *cfgtew)
1159{
1160 char buffer[IPR_MAX_RES_PATH_LENGTH];
1161 unsigned int proto;
1162 int new_path = 0;
1163
1164 if (res->ioa_cfg->sis64) {
1165 res->flags = cfgtew->u.cfgte64->flags;
1166 res->res_flags = cfgtew->u.cfgte64->res_flags;
1167 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1168
1169 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1170 sizeof(struct ipr_std_inq_data));
1171
1172 res->qmodel = IPR_QUEUEING_MODEL64(res);
1173 proto = cfgtew->u.cfgte64->proto;
1174 res->res_handle = cfgtew->u.cfgte64->res_handle;
1175 res->dev_id = cfgtew->u.cfgte64->dev_id;
1176
1177 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1178 sizeof(res->dev_lun.scsi_lun));
1179
1180 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1181 sizeof(res->res_path))) {
1182 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1183 sizeof(res->res_path));
1184 new_path = 1;
1185 }
1186
1187 if (res->sdev && new_path)
1188 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1189 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1190 } else {
1191 res->flags = cfgtew->u.cfgte->flags;
1192 if (res->flags & IPR_IS_IOA_RESOURCE)
1193 res->type = IPR_RES_TYPE_IOAFP;
1194 else
1195 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1196
1197 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1198 sizeof(struct ipr_std_inq_data));
1199
1200 res->qmodel = IPR_QUEUEING_MODEL(res);
1201 proto = cfgtew->u.cfgte->proto;
1202 res->res_handle = cfgtew->u.cfgte->res_handle;
1203 }
1204
1205 ipr_update_ata_class(res, proto);
1206}
1207
1208/**
1209 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1210 * for the resource.
1211 * @res: resource entry struct
1212 * @cfgtew: config table entry wrapper struct
1213 *
1214 * Return value:
1215 * none
1216 **/
1217static void ipr_clear_res_target(struct ipr_resource_entry *res)
1218{
1219 struct ipr_resource_entry *gscsi_res = NULL;
1220 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1221
1222 if (!ioa_cfg->sis64)
1223 return;
1224
1225 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1226 clear_bit(res->target, ioa_cfg->array_ids);
1227 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1228 clear_bit(res->target, ioa_cfg->vset_ids);
1229 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1230 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1231 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1232 return;
1233 clear_bit(res->target, ioa_cfg->target_ids);
1234
1235 } else if (res->bus == 0)
1236 clear_bit(res->target, ioa_cfg->target_ids);
843} 1237}
844 1238
845/** 1239/**
@@ -851,17 +1245,24 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
851 * none 1245 * none
852 **/ 1246 **/
853static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1247static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
854 struct ipr_hostrcb *hostrcb) 1248 struct ipr_hostrcb *hostrcb)
855{ 1249{
856 struct ipr_resource_entry *res = NULL; 1250 struct ipr_resource_entry *res = NULL;
857 struct ipr_config_table_entry *cfgte; 1251 struct ipr_config_table_entry_wrapper cfgtew;
1252 __be32 cc_res_handle;
1253
858 u32 is_ndn = 1; 1254 u32 is_ndn = 1;
859 1255
860 cfgte = &hostrcb->hcam.u.ccn.cfgte; 1256 if (ioa_cfg->sis64) {
1257 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1258 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1259 } else {
1260 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1261 cc_res_handle = cfgtew.u.cfgte->res_handle;
1262 }
861 1263
862 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1264 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
863 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr, 1265 if (res->res_handle == cc_res_handle) {
864 sizeof(cfgte->res_addr))) {
865 is_ndn = 0; 1266 is_ndn = 0;
866 break; 1267 break;
867 } 1268 }
@@ -879,20 +1280,22 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
879 struct ipr_resource_entry, queue); 1280 struct ipr_resource_entry, queue);
880 1281
881 list_del(&res->queue); 1282 list_del(&res->queue);
882 ipr_init_res_entry(res); 1283 ipr_init_res_entry(res, &cfgtew);
883 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1284 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
884 } 1285 }
885 1286
886 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 1287 ipr_update_res_entry(res, &cfgtew);
887 1288
888 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1289 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
889 if (res->sdev) { 1290 if (res->sdev) {
890 res->del_from_ml = 1; 1291 res->del_from_ml = 1;
891 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 1292 res->res_handle = IPR_INVALID_RES_HANDLE;
892 if (ioa_cfg->allow_ml_add_del) 1293 if (ioa_cfg->allow_ml_add_del)
893 schedule_work(&ioa_cfg->work_q); 1294 schedule_work(&ioa_cfg->work_q);
894 } else 1295 } else {
1296 ipr_clear_res_target(res);
895 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1297 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1298 }
896 } else if (!res->sdev) { 1299 } else if (!res->sdev) {
897 res->add_to_ml = 1; 1300 res->add_to_ml = 1;
898 if (ioa_cfg->allow_ml_add_del) 1301 if (ioa_cfg->allow_ml_add_del)
@@ -1044,8 +1447,12 @@ static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1044static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1447static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1045 struct ipr_hostrcb *hostrcb) 1448 struct ipr_hostrcb *hostrcb)
1046{ 1449{
1047 struct ipr_hostrcb_type_12_error *error = 1450 struct ipr_hostrcb_type_12_error *error;
1048 &hostrcb->hcam.u.error.u.type_12_error; 1451
1452 if (ioa_cfg->sis64)
1453 error = &hostrcb->hcam.u.error64.u.type_12_error;
1454 else
1455 error = &hostrcb->hcam.u.error.u.type_12_error;
1049 1456
1050 ipr_err("-----Current Configuration-----\n"); 1457 ipr_err("-----Current Configuration-----\n");
1051 ipr_err("Cache Directory Card Information:\n"); 1458 ipr_err("Cache Directory Card Information:\n");
@@ -1138,6 +1545,48 @@ static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1138} 1545}
1139 1546
1140/** 1547/**
1548 * ipr_log_sis64_config_error - Log a device error.
1549 * @ioa_cfg: ioa config struct
1550 * @hostrcb: hostrcb struct
1551 *
1552 * Return value:
1553 * none
1554 **/
1555static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1556 struct ipr_hostrcb *hostrcb)
1557{
1558 int errors_logged, i;
1559 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1560 struct ipr_hostrcb_type_23_error *error;
1561 char buffer[IPR_MAX_RES_PATH_LENGTH];
1562
1563 error = &hostrcb->hcam.u.error64.u.type_23_error;
1564 errors_logged = be32_to_cpu(error->errors_logged);
1565
1566 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1567 be32_to_cpu(error->errors_detected), errors_logged);
1568
1569 dev_entry = error->dev;
1570
1571 for (i = 0; i < errors_logged; i++, dev_entry++) {
1572 ipr_err_separator;
1573
1574 ipr_err("Device %d : %s", i + 1,
1575 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1576 ipr_log_ext_vpd(&dev_entry->vpd);
1577
1578 ipr_err("-----New Device Information-----\n");
1579 ipr_log_ext_vpd(&dev_entry->new_vpd);
1580
1581 ipr_err("Cache Directory Card Information:\n");
1582 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1583
1584 ipr_err("Adapter Card Information:\n");
1585 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1586 }
1587}
1588
1589/**
1141 * ipr_log_config_error - Log a configuration error. 1590 * ipr_log_config_error - Log a configuration error.
1142 * @ioa_cfg: ioa config struct 1591 * @ioa_cfg: ioa config struct
1143 * @hostrcb: hostrcb struct 1592 * @hostrcb: hostrcb struct
@@ -1331,9 +1780,13 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1331{ 1780{
1332 struct ipr_hostrcb_type_17_error *error; 1781 struct ipr_hostrcb_type_17_error *error;
1333 1782
1334 error = &hostrcb->hcam.u.error.u.type_17_error; 1783 if (ioa_cfg->sis64)
1784 error = &hostrcb->hcam.u.error64.u.type_17_error;
1785 else
1786 error = &hostrcb->hcam.u.error.u.type_17_error;
1787
1335 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1788 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1336 strstrip(error->failure_reason); 1789 strim(error->failure_reason);
1337 1790
1338 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1791 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1339 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1792 be32_to_cpu(hostrcb->hcam.u.error.prc));
@@ -1359,7 +1812,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1359 1812
1360 error = &hostrcb->hcam.u.error.u.type_07_error; 1813 error = &hostrcb->hcam.u.error.u.type_07_error;
1361 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1814 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1362 strstrip(error->failure_reason); 1815 strim(error->failure_reason);
1363 1816
1364 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1817 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1365 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1818 be32_to_cpu(hostrcb->hcam.u.error.prc));
@@ -1438,6 +1891,42 @@ static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1438 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 1891 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1439} 1892}
1440 1893
1894/**
1895 * ipr_log64_fabric_path - Log a fabric path error
1896 * @hostrcb: hostrcb struct
1897 * @fabric: fabric descriptor
1898 *
1899 * Return value:
1900 * none
1901 **/
1902static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1903 struct ipr_hostrcb64_fabric_desc *fabric)
1904{
1905 int i, j;
1906 u8 path_state = fabric->path_state;
1907 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1908 u8 state = path_state & IPR_PATH_STATE_MASK;
1909 char buffer[IPR_MAX_RES_PATH_LENGTH];
1910
1911 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1912 if (path_active_desc[i].active != active)
1913 continue;
1914
1915 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1916 if (path_state_desc[j].state != state)
1917 continue;
1918
1919 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1920 path_active_desc[i].desc, path_state_desc[j].desc,
1921 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1922 return;
1923 }
1924 }
1925
1926 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1927 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1928}
1929
1441static const struct { 1930static const struct {
1442 u8 type; 1931 u8 type;
1443 char *desc; 1932 char *desc;
@@ -1547,6 +2036,49 @@ static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1547} 2036}
1548 2037
1549/** 2038/**
2039 * ipr_log64_path_elem - Log a fabric path element.
2040 * @hostrcb: hostrcb struct
2041 * @cfg: fabric path element struct
2042 *
2043 * Return value:
2044 * none
2045 **/
2046static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2047 struct ipr_hostrcb64_config_element *cfg)
2048{
2049 int i, j;
2050 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2051 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2052 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2053 char buffer[IPR_MAX_RES_PATH_LENGTH];
2054
2055 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2056 return;
2057
2058 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2059 if (path_type_desc[i].type != type)
2060 continue;
2061
2062 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2063 if (path_status_desc[j].status != status)
2064 continue;
2065
2066 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2067 path_status_desc[j].desc, path_type_desc[i].desc,
2068 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2069 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2070 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2071 return;
2072 }
2073 }
2074 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2075 "WWN=%08X%08X\n", cfg->type_status,
2076 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2077 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079}
2080
2081/**
1550 * ipr_log_fabric_error - Log a fabric error. 2082 * ipr_log_fabric_error - Log a fabric error.
1551 * @ioa_cfg: ioa config struct 2083 * @ioa_cfg: ioa config struct
1552 * @hostrcb: hostrcb struct 2084 * @hostrcb: hostrcb struct
@@ -1584,6 +2116,96 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1584} 2116}
1585 2117
1586/** 2118/**
2119 * ipr_log_sis64_array_error - Log a sis64 array error.
2120 * @ioa_cfg: ioa config struct
2121 * @hostrcb: hostrcb struct
2122 *
2123 * Return value:
2124 * none
2125 **/
2126static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2127 struct ipr_hostrcb *hostrcb)
2128{
2129 int i, num_entries;
2130 struct ipr_hostrcb_type_24_error *error;
2131 struct ipr_hostrcb64_array_data_entry *array_entry;
2132 char buffer[IPR_MAX_RES_PATH_LENGTH];
2133 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2134
2135 error = &hostrcb->hcam.u.error64.u.type_24_error;
2136
2137 ipr_err_separator;
2138
2139 ipr_err("RAID %s Array Configuration: %s\n",
2140 error->protection_level,
2141 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2142
2143 ipr_err_separator;
2144
2145 array_entry = error->array_member;
2146 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2147 sizeof(error->array_member));
2148
2149 for (i = 0; i < num_entries; i++, array_entry++) {
2150
2151 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2152 continue;
2153
2154 if (error->exposed_mode_adn == i)
2155 ipr_err("Exposed Array Member %d:\n", i);
2156 else
2157 ipr_err("Array Member %d:\n", i);
2158
2159 ipr_err("Array Member %d:\n", i);
2160 ipr_log_ext_vpd(&array_entry->vpd);
2161 ipr_err("Current Location: %s",
2162 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2163 ipr_err("Expected Location: %s",
2164 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2165
2166 ipr_err_separator;
2167 }
2168}
2169
2170/**
2171 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2172 * @ioa_cfg: ioa config struct
2173 * @hostrcb: hostrcb struct
2174 *
2175 * Return value:
2176 * none
2177 **/
2178static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2179 struct ipr_hostrcb *hostrcb)
2180{
2181 struct ipr_hostrcb_type_30_error *error;
2182 struct ipr_hostrcb64_fabric_desc *fabric;
2183 struct ipr_hostrcb64_config_element *cfg;
2184 int i, add_len;
2185
2186 error = &hostrcb->hcam.u.error64.u.type_30_error;
2187
2188 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2189 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2190
2191 add_len = be32_to_cpu(hostrcb->hcam.length) -
2192 (offsetof(struct ipr_hostrcb64_error, u) +
2193 offsetof(struct ipr_hostrcb_type_30_error, desc));
2194
2195 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2196 ipr_log64_fabric_path(hostrcb, fabric);
2197 for_each_fabric_cfg(fabric, cfg)
2198 ipr_log64_path_elem(hostrcb, cfg);
2199
2200 add_len -= be16_to_cpu(fabric->length);
2201 fabric = (struct ipr_hostrcb64_fabric_desc *)
2202 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2203 }
2204
2205 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2206}
2207
2208/**
1587 * ipr_log_generic_error - Log an adapter error. 2209 * ipr_log_generic_error - Log an adapter error.
1588 * @ioa_cfg: ioa config struct 2210 * @ioa_cfg: ioa config struct
1589 * @hostrcb: hostrcb struct 2211 * @hostrcb: hostrcb struct
@@ -1642,13 +2264,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1642 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2264 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1643 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2265 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1644 2266
1645 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 2267 if (ioa_cfg->sis64)
2268 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2269 else
2270 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1646 2271
1647 if (ioasc == IPR_IOASC_BUS_WAS_RESET || 2272 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1648 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) { 2273 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1649 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2274 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1650 scsi_report_bus_reset(ioa_cfg->host, 2275 scsi_report_bus_reset(ioa_cfg->host,
1651 hostrcb->hcam.u.error.failing_dev_res_addr.bus); 2276 hostrcb->hcam.u.error.fd_res_addr.bus);
1652 } 2277 }
1653 2278
1654 error_index = ipr_get_error(ioasc); 2279 error_index = ipr_get_error(ioasc);
@@ -1696,6 +2321,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1696 case IPR_HOST_RCB_OVERLAY_ID_20: 2321 case IPR_HOST_RCB_OVERLAY_ID_20:
1697 ipr_log_fabric_error(ioa_cfg, hostrcb); 2322 ipr_log_fabric_error(ioa_cfg, hostrcb);
1698 break; 2323 break;
2324 case IPR_HOST_RCB_OVERLAY_ID_23:
2325 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2326 break;
2327 case IPR_HOST_RCB_OVERLAY_ID_24:
2328 case IPR_HOST_RCB_OVERLAY_ID_26:
2329 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2330 break;
2331 case IPR_HOST_RCB_OVERLAY_ID_30:
2332 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2333 break;
1699 case IPR_HOST_RCB_OVERLAY_ID_1: 2334 case IPR_HOST_RCB_OVERLAY_ID_1:
1700 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2335 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1701 default: 2336 default:
@@ -1720,7 +2355,12 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1721 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2356 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1722 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 2357 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1723 u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 2358 u32 fd_ioasc;
2359
2360 if (ioa_cfg->sis64)
2361 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2362 else
2363 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1724 2364
1725 list_del(&hostrcb->queue); 2365 list_del(&hostrcb->queue);
1726 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 2366 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -1845,12 +2485,14 @@ static const struct ipr_ses_table_entry *
1845ipr_find_ses_entry(struct ipr_resource_entry *res) 2485ipr_find_ses_entry(struct ipr_resource_entry *res)
1846{ 2486{
1847 int i, j, matches; 2487 int i, j, matches;
2488 struct ipr_std_inq_vpids *vpids;
1848 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2489 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1849 2490
1850 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2491 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1851 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2492 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1852 if (ste->compare_product_id_byte[j] == 'X') { 2493 if (ste->compare_product_id_byte[j] == 'X') {
1853 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j]) 2494 vpids = &res->std_inq_data.vpids;
2495 if (vpids->product_id[j] == ste->product_id[j])
1854 matches++; 2496 matches++;
1855 else 2497 else
1856 break; 2498 break;
@@ -1885,10 +2527,10 @@ static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_wi
1885 2527
1886 /* Loop through each config table entry in the config table buffer */ 2528 /* Loop through each config table entry in the config table buffer */
1887 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2529 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1888 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data))) 2530 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1889 continue; 2531 continue;
1890 2532
1891 if (bus != res->cfgte.res_addr.bus) 2533 if (bus != res->bus)
1892 continue; 2534 continue;
1893 2535
1894 if (!(ste = ipr_find_ses_entry(res))) 2536 if (!(ste = ipr_find_ses_entry(res)))
@@ -1934,6 +2576,31 @@ static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1934} 2576}
1935 2577
1936/** 2578/**
2579 * ipr_get_sis64_dump_data_section - Dump IOA memory
2580 * @ioa_cfg: ioa config struct
2581 * @start_addr: adapter address to dump
2582 * @dest: destination kernel buffer
2583 * @length_in_words: length to dump in 4 byte words
2584 *
2585 * Return value:
2586 * 0 on success
2587 **/
2588static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2589 u32 start_addr,
2590 __be32 *dest, u32 length_in_words)
2591{
2592 int i;
2593
2594 for (i = 0; i < length_in_words; i++) {
2595 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2596 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2597 dest++;
2598 }
2599
2600 return 0;
2601}
2602
2603/**
1937 * ipr_get_ldump_data_section - Dump IOA memory 2604 * ipr_get_ldump_data_section - Dump IOA memory
1938 * @ioa_cfg: ioa config struct 2605 * @ioa_cfg: ioa config struct
1939 * @start_addr: adapter address to dump 2606 * @start_addr: adapter address to dump
@@ -1950,9 +2617,13 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1950 volatile u32 temp_pcii_reg; 2617 volatile u32 temp_pcii_reg;
1951 int i, delay = 0; 2618 int i, delay = 0;
1952 2619
2620 if (ioa_cfg->sis64)
2621 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2622 dest, length_in_words);
2623
1953 /* Write IOA interrupt reg starting LDUMP state */ 2624 /* Write IOA interrupt reg starting LDUMP state */
1954 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2625 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1955 ioa_cfg->regs.set_uproc_interrupt_reg); 2626 ioa_cfg->regs.set_uproc_interrupt_reg32);
1956 2627
1957 /* Wait for IO debug acknowledge */ 2628 /* Wait for IO debug acknowledge */
1958 if (ipr_wait_iodbg_ack(ioa_cfg, 2629 if (ipr_wait_iodbg_ack(ioa_cfg,
@@ -1971,7 +2642,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1971 2642
1972 /* Signal address valid - clear IOA Reset alert */ 2643 /* Signal address valid - clear IOA Reset alert */
1973 writel(IPR_UPROCI_RESET_ALERT, 2644 writel(IPR_UPROCI_RESET_ALERT,
1974 ioa_cfg->regs.clr_uproc_interrupt_reg); 2645 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1975 2646
1976 for (i = 0; i < length_in_words; i++) { 2647 for (i = 0; i < length_in_words; i++) {
1977 /* Wait for IO debug acknowledge */ 2648 /* Wait for IO debug acknowledge */
@@ -1996,10 +2667,10 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1996 2667
1997 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2668 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1998 writel(IPR_UPROCI_RESET_ALERT, 2669 writel(IPR_UPROCI_RESET_ALERT,
1999 ioa_cfg->regs.set_uproc_interrupt_reg); 2670 ioa_cfg->regs.set_uproc_interrupt_reg32);
2000 2671
2001 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2672 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2002 ioa_cfg->regs.clr_uproc_interrupt_reg); 2673 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2003 2674
2004 /* Signal dump data received - Clear IO debug Ack */ 2675 /* Signal dump data received - Clear IO debug Ack */
2005 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2676 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
@@ -2008,7 +2679,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2008 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2679 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2009 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2680 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2010 temp_pcii_reg = 2681 temp_pcii_reg =
2011 readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 2682 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2012 2683
2013 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2684 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2014 return 0; 2685 return 0;
@@ -2207,6 +2878,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2207 u32 num_entries, start_off, end_off; 2878 u32 num_entries, start_off, end_off;
2208 u32 bytes_to_copy, bytes_copied, rc; 2879 u32 bytes_to_copy, bytes_copied, rc;
2209 struct ipr_sdt *sdt; 2880 struct ipr_sdt *sdt;
2881 int valid = 1;
2210 int i; 2882 int i;
2211 2883
2212 ENTER; 2884 ENTER;
@@ -2220,7 +2892,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2220 2892
2221 start_addr = readl(ioa_cfg->ioa_mailbox); 2893 start_addr = readl(ioa_cfg->ioa_mailbox);
2222 2894
2223 if (!ipr_sdt_is_fmt2(start_addr)) { 2895 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2224 dev_err(&ioa_cfg->pdev->dev, 2896 dev_err(&ioa_cfg->pdev->dev,
2225 "Invalid dump table format: %lx\n", start_addr); 2897 "Invalid dump table format: %lx\n", start_addr);
2226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -2249,7 +2921,6 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2249 2921
2250 /* IOA Dump entry */ 2922 /* IOA Dump entry */
2251 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 2923 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2252 ioa_dump->format = IPR_SDT_FMT2;
2253 ioa_dump->hdr.len = 0; 2924 ioa_dump->hdr.len = 0;
2254 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2925 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2255 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; 2926 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
@@ -2264,7 +2935,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2264 sizeof(struct ipr_sdt) / sizeof(__be32)); 2935 sizeof(struct ipr_sdt) / sizeof(__be32));
2265 2936
2266 /* Smart Dump table is ready to use and the first entry is valid */ 2937 /* Smart Dump table is ready to use and the first entry is valid */
2267 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) { 2938 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2939 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2268 dev_err(&ioa_cfg->pdev->dev, 2940 dev_err(&ioa_cfg->pdev->dev,
2269 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 2941 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2270 rc, be32_to_cpu(sdt->hdr.state)); 2942 rc, be32_to_cpu(sdt->hdr.state));
@@ -2288,12 +2960,19 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2288 } 2960 }
2289 2961
2290 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 2962 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2291 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset); 2963 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2292 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 2964 if (ioa_cfg->sis64)
2293 end_off = be32_to_cpu(sdt->entry[i].end_offset); 2965 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2294 2966 else {
2295 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) { 2967 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2296 bytes_to_copy = end_off - start_off; 2968 end_off = be32_to_cpu(sdt->entry[i].end_token);
2969
2970 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2971 bytes_to_copy = end_off - start_off;
2972 else
2973 valid = 0;
2974 }
2975 if (valid) {
2297 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { 2976 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2298 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 2977 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2299 continue; 2978 continue;
@@ -2422,9 +3101,9 @@ restart:
2422 3101
2423 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3102 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2424 if (res->add_to_ml) { 3103 if (res->add_to_ml) {
2425 bus = res->cfgte.res_addr.bus; 3104 bus = res->bus;
2426 target = res->cfgte.res_addr.target; 3105 target = res->target;
2427 lun = res->cfgte.res_addr.lun; 3106 lun = res->lun;
2428 res->add_to_ml = 0; 3107 res->add_to_ml = 0;
2429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2430 scsi_add_device(ioa_cfg->host, bus, target, lun); 3109 scsi_add_device(ioa_cfg->host, bus, target, lun);
@@ -2478,105 +3157,6 @@ static struct bin_attribute ipr_trace_attr = {
2478}; 3157};
2479#endif 3158#endif
2480 3159
2481static const struct {
2482 enum ipr_cache_state state;
2483 char *name;
2484} cache_state [] = {
2485 { CACHE_NONE, "none" },
2486 { CACHE_DISABLED, "disabled" },
2487 { CACHE_ENABLED, "enabled" }
2488};
2489
2490/**
2491 * ipr_show_write_caching - Show the write caching attribute
2492 * @dev: device struct
2493 * @buf: buffer
2494 *
2495 * Return value:
2496 * number of bytes printed to buffer
2497 **/
2498static ssize_t ipr_show_write_caching(struct device *dev,
2499 struct device_attribute *attr, char *buf)
2500{
2501 struct Scsi_Host *shost = class_to_shost(dev);
2502 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2503 unsigned long lock_flags = 0;
2504 int i, len = 0;
2505
2506 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2507 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2508 if (cache_state[i].state == ioa_cfg->cache_state) {
2509 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2510 break;
2511 }
2512 }
2513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2514 return len;
2515}
2516
2517
2518/**
2519 * ipr_store_write_caching - Enable/disable adapter write cache
2520 * @dev: device struct
2521 * @buf: buffer
2522 * @count: buffer size
2523 *
2524 * This function will enable/disable adapter write cache.
2525 *
2526 * Return value:
2527 * count on success / other on failure
2528 **/
2529static ssize_t ipr_store_write_caching(struct device *dev,
2530 struct device_attribute *attr,
2531 const char *buf, size_t count)
2532{
2533 struct Scsi_Host *shost = class_to_shost(dev);
2534 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2535 unsigned long lock_flags = 0;
2536 enum ipr_cache_state new_state = CACHE_INVALID;
2537 int i;
2538
2539 if (!capable(CAP_SYS_ADMIN))
2540 return -EACCES;
2541 if (ioa_cfg->cache_state == CACHE_NONE)
2542 return -EINVAL;
2543
2544 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2545 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2546 new_state = cache_state[i].state;
2547 break;
2548 }
2549 }
2550
2551 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2552 return -EINVAL;
2553
2554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2555 if (ioa_cfg->cache_state == new_state) {
2556 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2557 return count;
2558 }
2559
2560 ioa_cfg->cache_state = new_state;
2561 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2562 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2563 if (!ioa_cfg->in_reset_reload)
2564 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2566 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2567
2568 return count;
2569}
2570
2571static struct device_attribute ipr_ioa_cache_attr = {
2572 .attr = {
2573 .name = "write_cache",
2574 .mode = S_IRUGO | S_IWUSR,
2575 },
2576 .show = ipr_show_write_caching,
2577 .store = ipr_store_write_caching
2578};
2579
2580/** 3160/**
2581 * ipr_show_fw_version - Show the firmware version 3161 * ipr_show_fw_version - Show the firmware version
2582 * @dev: class device struct 3162 * @dev: class device struct
@@ -2976,6 +3556,37 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2976} 3556}
2977 3557
2978/** 3558/**
3559 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3560 * @ipr_cmd: ipr command struct
3561 * @sglist: scatter/gather list
3562 *
3563 * Builds a microcode download IOA data list (IOADL).
3564 *
3565 **/
3566static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3567 struct ipr_sglist *sglist)
3568{
3569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3570 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3571 struct scatterlist *scatterlist = sglist->scatterlist;
3572 int i;
3573
3574 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3575 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3576 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3577
3578 ioarcb->ioadl_len =
3579 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3580 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3581 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3582 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3583 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3584 }
3585
3586 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3587}
3588
3589/**
2979 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3590 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2980 * @ipr_cmd: ipr command struct 3591 * @ipr_cmd: ipr command struct
2981 * @sglist: scatter/gather list 3592 * @sglist: scatter/gather list
@@ -2987,14 +3598,15 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2987 struct ipr_sglist *sglist) 3598 struct ipr_sglist *sglist)
2988{ 3599{
2989 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3600 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2990 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 3601 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
2991 struct scatterlist *scatterlist = sglist->scatterlist; 3602 struct scatterlist *scatterlist = sglist->scatterlist;
2992 int i; 3603 int i;
2993 3604
2994 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3605 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2995 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3606 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2996 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len); 3607 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
2997 ioarcb->write_ioadl_len = 3608
3609 ioarcb->ioadl_len =
2998 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 3610 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2999 3611
3000 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3612 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
@@ -3146,7 +3758,6 @@ static struct device_attribute *ipr_ioa_attrs[] = {
3146 &ipr_ioa_state_attr, 3758 &ipr_ioa_state_attr,
3147 &ipr_ioa_reset_attr, 3759 &ipr_ioa_reset_attr,
3148 &ipr_update_fw_attr, 3760 &ipr_update_fw_attr,
3149 &ipr_ioa_cache_attr,
3150 NULL, 3761 NULL,
3151}; 3762};
3152 3763
@@ -3367,16 +3978,21 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3367 * ipr_change_queue_depth - Change the device's queue depth 3978 * ipr_change_queue_depth - Change the device's queue depth
3368 * @sdev: scsi device struct 3979 * @sdev: scsi device struct
3369 * @qdepth: depth to set 3980 * @qdepth: depth to set
3981 * @reason: calling context
3370 * 3982 *
3371 * Return value: 3983 * Return value:
3372 * actual depth set 3984 * actual depth set
3373 **/ 3985 **/
3374static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) 3986static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3987 int reason)
3375{ 3988{
3376 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 3989 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3377 struct ipr_resource_entry *res; 3990 struct ipr_resource_entry *res;
3378 unsigned long lock_flags = 0; 3991 unsigned long lock_flags = 0;
3379 3992
3993 if (reason != SCSI_QDEPTH_DEFAULT)
3994 return -EOPNOTSUPP;
3995
3380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3381 res = (struct ipr_resource_entry *)sdev->hostdata; 3997 res = (struct ipr_resource_entry *)sdev->hostdata;
3382 3998
@@ -3445,7 +4061,7 @@ static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribu
3445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4061 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3446 res = (struct ipr_resource_entry *)sdev->hostdata; 4062 res = (struct ipr_resource_entry *)sdev->hostdata;
3447 if (res) 4063 if (res)
3448 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle); 4064 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
3449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3450 return len; 4066 return len;
3451} 4067}
@@ -3458,8 +4074,43 @@ static struct device_attribute ipr_adapter_handle_attr = {
3458 .show = ipr_show_adapter_handle 4074 .show = ipr_show_adapter_handle
3459}; 4075};
3460 4076
4077/**
4078 * ipr_show_resource_path - Show the resource path for this device.
4079 * @dev: device struct
4080 * @buf: buffer
4081 *
4082 * Return value:
4083 * number of bytes printed to buffer
4084 **/
4085static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4086{
4087 struct scsi_device *sdev = to_scsi_device(dev);
4088 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4089 struct ipr_resource_entry *res;
4090 unsigned long lock_flags = 0;
4091 ssize_t len = -ENXIO;
4092 char buffer[IPR_MAX_RES_PATH_LENGTH];
4093
4094 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4095 res = (struct ipr_resource_entry *)sdev->hostdata;
4096 if (res)
4097 len = snprintf(buf, PAGE_SIZE, "%s\n",
4098 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4100 return len;
4101}
4102
4103static struct device_attribute ipr_resource_path_attr = {
4104 .attr = {
4105 .name = "resource_path",
4106 .mode = S_IRUSR,
4107 },
4108 .show = ipr_show_resource_path
4109};
4110
3461static struct device_attribute *ipr_dev_attrs[] = { 4111static struct device_attribute *ipr_dev_attrs[] = {
3462 &ipr_adapter_handle_attr, 4112 &ipr_adapter_handle_attr,
4113 &ipr_resource_path_attr,
3463 NULL, 4114 NULL,
3464}; 4115};
3465 4116
@@ -3512,9 +4163,9 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3512 struct ipr_resource_entry *res; 4163 struct ipr_resource_entry *res;
3513 4164
3514 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4165 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3515 if ((res->cfgte.res_addr.bus == starget->channel) && 4166 if ((res->bus == starget->channel) &&
3516 (res->cfgte.res_addr.target == starget->id) && 4167 (res->target == starget->id) &&
3517 (res->cfgte.res_addr.lun == 0)) { 4168 (res->lun == 0)) {
3518 return res; 4169 return res;
3519 } 4170 }
3520 } 4171 }
@@ -3584,6 +4235,17 @@ static int ipr_target_alloc(struct scsi_target *starget)
3584static void ipr_target_destroy(struct scsi_target *starget) 4235static void ipr_target_destroy(struct scsi_target *starget)
3585{ 4236{
3586 struct ipr_sata_port *sata_port = starget->hostdata; 4237 struct ipr_sata_port *sata_port = starget->hostdata;
4238 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4239 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4240
4241 if (ioa_cfg->sis64) {
4242 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4243 clear_bit(starget->id, ioa_cfg->array_ids);
4244 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4245 clear_bit(starget->id, ioa_cfg->vset_ids);
4246 else if (starget->channel == 0)
4247 clear_bit(starget->id, ioa_cfg->target_ids);
4248 }
3587 4249
3588 if (sata_port) { 4250 if (sata_port) {
3589 starget->hostdata = NULL; 4251 starget->hostdata = NULL;
@@ -3605,9 +4267,9 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3605 struct ipr_resource_entry *res; 4267 struct ipr_resource_entry *res;
3606 4268
3607 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4269 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3608 if ((res->cfgte.res_addr.bus == sdev->channel) && 4270 if ((res->bus == sdev->channel) &&
3609 (res->cfgte.res_addr.target == sdev->id) && 4271 (res->target == sdev->id) &&
3610 (res->cfgte.res_addr.lun == sdev->lun)) 4272 (res->lun == sdev->lun))
3611 return res; 4273 return res;
3612 } 4274 }
3613 4275
@@ -3656,6 +4318,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3656 struct ipr_resource_entry *res; 4318 struct ipr_resource_entry *res;
3657 struct ata_port *ap = NULL; 4319 struct ata_port *ap = NULL;
3658 unsigned long lock_flags = 0; 4320 unsigned long lock_flags = 0;
4321 char buffer[IPR_MAX_RES_PATH_LENGTH];
3659 4322
3660 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3661 res = sdev->hostdata; 4324 res = sdev->hostdata;
@@ -3669,7 +4332,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3669 if (ipr_is_vset_device(res)) { 4332 if (ipr_is_vset_device(res)) {
3670 blk_queue_rq_timeout(sdev->request_queue, 4333 blk_queue_rq_timeout(sdev->request_queue,
3671 IPR_VSET_RW_TIMEOUT); 4334 IPR_VSET_RW_TIMEOUT);
3672 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4335 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3673 } 4336 }
3674 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 4337 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3675 sdev->allow_restart = 1; 4338 sdev->allow_restart = 1;
@@ -3682,6 +4345,9 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3682 ata_sas_slave_configure(sdev, ap); 4345 ata_sas_slave_configure(sdev, ap);
3683 } else 4346 } else
3684 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 4347 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4348 if (ioa_cfg->sis64)
4349 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4350 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
3685 return 0; 4351 return 0;
3686 } 4352 }
3687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3823,14 +4489,19 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3823 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4489 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3824 ioarcb = &ipr_cmd->ioarcb; 4490 ioarcb = &ipr_cmd->ioarcb;
3825 cmd_pkt = &ioarcb->cmd_pkt; 4491 cmd_pkt = &ioarcb->cmd_pkt;
3826 regs = &ioarcb->add_data.u.regs;
3827 4492
3828 ioarcb->res_handle = res->cfgte.res_handle; 4493 if (ipr_cmd->ioa_cfg->sis64) {
4494 regs = &ipr_cmd->i.ata_ioadl.regs;
4495 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4496 } else
4497 regs = &ioarcb->u.add_data.u.regs;
4498
4499 ioarcb->res_handle = res->res_handle;
3829 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4500 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3830 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 4501 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3831 if (ipr_is_gata(res)) { 4502 if (ipr_is_gata(res)) {
3832 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 4503 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3833 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags)); 4504 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
3834 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 4505 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3835 } 4506 }
3836 4507
@@ -3875,19 +4546,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
3875 res = sata_port->res; 4546 res = sata_port->res;
3876 if (res) { 4547 if (res) {
3877 rc = ipr_device_reset(ioa_cfg, res); 4548 rc = ipr_device_reset(ioa_cfg, res);
3878 switch(res->cfgte.proto) { 4549 *classes = res->ata_class;
3879 case IPR_PROTO_SATA:
3880 case IPR_PROTO_SAS_STP:
3881 *classes = ATA_DEV_ATA;
3882 break;
3883 case IPR_PROTO_SATA_ATAPI:
3884 case IPR_PROTO_SAS_STP_ATAPI:
3885 *classes = ATA_DEV_ATAPI;
3886 break;
3887 default:
3888 *classes = ATA_DEV_UNKNOWN;
3889 break;
3890 };
3891 } 4550 }
3892 4551
3893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3932,7 +4591,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3932 return FAILED; 4591 return FAILED;
3933 4592
3934 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4593 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3935 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 4594 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
3936 if (ipr_cmd->scsi_cmd) 4595 if (ipr_cmd->scsi_cmd)
3937 ipr_cmd->done = ipr_scsi_eh_done; 4596 ipr_cmd->done = ipr_scsi_eh_done;
3938 if (ipr_cmd->qc) 4597 if (ipr_cmd->qc)
@@ -3954,7 +4613,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3954 spin_lock_irq(scsi_cmd->device->host->host_lock); 4613 spin_lock_irq(scsi_cmd->device->host->host_lock);
3955 4614
3956 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4615 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3957 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 4616 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
3958 rc = -EIO; 4617 rc = -EIO;
3959 break; 4618 break;
3960 } 4619 }
@@ -3993,13 +4652,13 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3993 struct ipr_resource_entry *res; 4652 struct ipr_resource_entry *res;
3994 4653
3995 ENTER; 4654 ENTER;
3996 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4655 if (!ioa_cfg->sis64)
3997 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle, 4656 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3998 sizeof(res->cfgte.res_handle))) { 4657 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
3999 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus); 4658 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4000 break; 4659 break;
4660 }
4001 } 4661 }
4002 }
4003 4662
4004 /* 4663 /*
4005 * If abort has not completed, indicate the reset has, else call the 4664 * If abort has not completed, indicate the reset has, else call the
@@ -4097,7 +4756,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4097 return SUCCESS; 4756 return SUCCESS;
4098 4757
4099 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4758 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4100 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 4759 ipr_cmd->ioarcb.res_handle = res->res_handle;
4101 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 4760 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4102 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4761 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4103 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 4762 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
@@ -4234,11 +4893,29 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4234 return IRQ_NONE; 4893 return IRQ_NONE;
4235 } 4894 }
4236 4895
4237 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 4896 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4238 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4897 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4239 4898
4240 /* If an interrupt on the adapter did not occur, ignore it */ 4899 /* If an interrupt on the adapter did not occur, ignore it.
4900 * Or in the case of SIS 64, check for a stage change interrupt.
4901 */
4241 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { 4902 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4903 if (ioa_cfg->sis64) {
4904 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4905 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4906 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4907
4908 /* clear stage change */
4909 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4910 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4911 list_del(&ioa_cfg->reset_cmd->queue);
4912 del_timer(&ioa_cfg->reset_cmd->timer);
4913 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4915 return IRQ_HANDLED;
4916 }
4917 }
4918
4242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4243 return IRQ_NONE; 4920 return IRQ_NONE;
4244 } 4921 }
@@ -4281,8 +4958,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4281 if (ipr_cmd != NULL) { 4958 if (ipr_cmd != NULL) {
4282 /* Clear the PCI interrupt */ 4959 /* Clear the PCI interrupt */
4283 do { 4960 do {
4284 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); 4961 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4285 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4962 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4286 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 4963 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4287 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 4964 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4288 4965
@@ -4304,6 +4981,53 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4304} 4981}
4305 4982
4306/** 4983/**
4984 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4985 * @ioa_cfg: ioa config struct
4986 * @ipr_cmd: ipr command struct
4987 *
4988 * Return value:
4989 * 0 on success / -1 on failure
4990 **/
4991static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4992 struct ipr_cmnd *ipr_cmd)
4993{
4994 int i, nseg;
4995 struct scatterlist *sg;
4996 u32 length;
4997 u32 ioadl_flags = 0;
4998 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5000 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5001
5002 length = scsi_bufflen(scsi_cmd);
5003 if (!length)
5004 return 0;
5005
5006 nseg = scsi_dma_map(scsi_cmd);
5007 if (nseg < 0) {
5008 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5009 return -1;
5010 }
5011
5012 ipr_cmd->dma_use_sg = nseg;
5013
5014 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5015 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5016 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5017 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5018 ioadl_flags = IPR_IOADL_FLAGS_READ;
5019
5020 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5021 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5022 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5023 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5024 }
5025
5026 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5027 return 0;
5028}
5029
5030/**
4307 * ipr_build_ioadl - Build a scatter/gather list and map the buffer 5031 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4308 * @ioa_cfg: ioa config struct 5032 * @ioa_cfg: ioa config struct
4309 * @ipr_cmd: ipr command struct 5033 * @ipr_cmd: ipr command struct
@@ -4320,7 +5044,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4320 u32 ioadl_flags = 0; 5044 u32 ioadl_flags = 0;
4321 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5045 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4322 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5046 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4323 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5047 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4324 5048
4325 length = scsi_bufflen(scsi_cmd); 5049 length = scsi_bufflen(scsi_cmd);
4326 if (!length) 5050 if (!length)
@@ -4337,8 +5061,8 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4337 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5061 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4338 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5062 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4339 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5063 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4340 ioarcb->write_data_transfer_length = cpu_to_be32(length); 5064 ioarcb->data_transfer_length = cpu_to_be32(length);
4341 ioarcb->write_ioadl_len = 5065 ioarcb->ioadl_len =
4342 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5066 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4343 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 5067 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4344 ioadl_flags = IPR_IOADL_FLAGS_READ; 5068 ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -4347,11 +5071,10 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4347 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5071 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4348 } 5072 }
4349 5073
4350 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 5074 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
4351 ioadl = ioarcb->add_data.u.ioadl; 5075 ioadl = ioarcb->u.add_data.u.ioadl;
4352 ioarcb->write_ioadl_addr = 5076 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
4353 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + 5077 offsetof(struct ipr_ioarcb, u.add_data));
4354 offsetof(struct ipr_ioarcb, add_data));
4355 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5078 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4356 } 5079 }
4357 5080
@@ -4441,18 +5164,24 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4441{ 5164{
4442 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5165 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4443 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5166 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4444 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); 5167 dma_addr_t dma_addr = ipr_cmd->dma_addr;
4445 5168
4446 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5169 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4447 ioarcb->write_data_transfer_length = 0; 5170 ioarcb->data_transfer_length = 0;
4448 ioarcb->read_data_transfer_length = 0; 5171 ioarcb->read_data_transfer_length = 0;
4449 ioarcb->write_ioadl_len = 0; 5172 ioarcb->ioadl_len = 0;
4450 ioarcb->read_ioadl_len = 0; 5173 ioarcb->read_ioadl_len = 0;
4451 ioasa->ioasc = 0; 5174 ioasa->ioasc = 0;
4452 ioasa->residual_data_len = 0; 5175 ioasa->residual_data_len = 0;
4453 ioarcb->write_ioadl_addr = 5176
4454 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 5177 if (ipr_cmd->ioa_cfg->sis64)
4455 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5178 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5179 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5180 else {
5181 ioarcb->write_ioadl_addr =
5182 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5183 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5184 }
4456} 5185}
4457 5186
4458/** 5187/**
@@ -4484,15 +5213,8 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4484 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5213 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4485 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 5214 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4486 5215
4487 ipr_cmd->ioadl[0].flags_and_data_len = 5216 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
4488 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE); 5217 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
4489 ipr_cmd->ioadl[0].address =
4490 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4491
4492 ipr_cmd->ioarcb.read_ioadl_len =
4493 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4494 ipr_cmd->ioarcb.read_data_transfer_length =
4495 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4496 5218
4497 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 5219 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4498 IPR_REQUEST_SENSE_TIMEOUT * 2); 5220 IPR_REQUEST_SENSE_TIMEOUT * 2);
@@ -4888,9 +5610,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4888 5610
4889 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5611 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4890 ipr_cmd->scsi_cmd = scsi_cmd; 5612 ipr_cmd->scsi_cmd = scsi_cmd;
4891 ioarcb->res_handle = res->cfgte.res_handle; 5613 ioarcb->res_handle = res->res_handle;
4892 ipr_cmd->done = ipr_scsi_done; 5614 ipr_cmd->done = ipr_scsi_done;
4893 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5615 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
4894 5616
4895 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5617 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4896 if (scsi_cmd->underflow == 0) 5618 if (scsi_cmd->underflow == 0)
@@ -4911,13 +5633,16 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4911 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 5633 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4912 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 5634 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4913 5635
4914 if (likely(rc == 0)) 5636 if (likely(rc == 0)) {
4915 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 5637 if (ioa_cfg->sis64)
5638 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5639 else
5640 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5641 }
4916 5642
4917 if (likely(rc == 0)) { 5643 if (likely(rc == 0)) {
4918 mb(); 5644 mb();
4919 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 5645 ipr_send_command(ipr_cmd);
4920 ioa_cfg->regs.ioarrin_reg);
4921 } else { 5646 } else {
4922 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5647 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4923 return SCSI_MLQUEUE_HOST_BUSY; 5648 return SCSI_MLQUEUE_HOST_BUSY;
@@ -5030,20 +5755,9 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
5030 goto out_unlock; 5755 goto out_unlock;
5031 } 5756 }
5032 5757
5033 switch(res->cfgte.proto) { 5758 ap->link.device[0].class = res->ata_class;
5034 case IPR_PROTO_SATA: 5759 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5035 case IPR_PROTO_SAS_STP:
5036 ap->link.device[0].class = ATA_DEV_ATA;
5037 break;
5038 case IPR_PROTO_SATA_ATAPI:
5039 case IPR_PROTO_SAS_STP_ATAPI:
5040 ap->link.device[0].class = ATA_DEV_ATAPI;
5041 break;
5042 default:
5043 ap->link.device[0].class = ATA_DEV_UNKNOWN;
5044 ata_port_disable(ap); 5760 ata_port_disable(ap);
5045 break;
5046 };
5047 5761
5048out_unlock: 5762out_unlock:
5049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 5763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
@@ -5129,8 +5843,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5129 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 5843 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5130 5844
5131 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 5845 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5132 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus, 5846 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5133 res->cfgte.res_addr.target);
5134 5847
5135 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 5848 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5136 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5849 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
@@ -5141,6 +5854,52 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5141} 5854}
5142 5855
5143/** 5856/**
5857 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5858 * @ipr_cmd: ipr command struct
5859 * @qc: ATA queued command
5860 *
5861 **/
5862static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5863 struct ata_queued_cmd *qc)
5864{
5865 u32 ioadl_flags = 0;
5866 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5867 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5868 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5869 int len = qc->nbytes;
5870 struct scatterlist *sg;
5871 unsigned int si;
5872 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5873
5874 if (len == 0)
5875 return;
5876
5877 if (qc->dma_dir == DMA_TO_DEVICE) {
5878 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5879 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5880 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5881 ioadl_flags = IPR_IOADL_FLAGS_READ;
5882
5883 ioarcb->data_transfer_length = cpu_to_be32(len);
5884 ioarcb->ioadl_len =
5885 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5886 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5887 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5888
5889 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5890 ioadl64->flags = cpu_to_be32(ioadl_flags);
5891 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5892 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5893
5894 last_ioadl64 = ioadl64;
5895 ioadl64++;
5896 }
5897
5898 if (likely(last_ioadl64))
5899 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5900}
5901
5902/**
5144 * ipr_build_ata_ioadl - Build an ATA scatter/gather list 5903 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5145 * @ipr_cmd: ipr command struct 5904 * @ipr_cmd: ipr command struct
5146 * @qc: ATA queued command 5905 * @qc: ATA queued command
@@ -5151,7 +5910,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5151{ 5910{
5152 u32 ioadl_flags = 0; 5911 u32 ioadl_flags = 0;
5153 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5912 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5154 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5913 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5155 struct ipr_ioadl_desc *last_ioadl = NULL; 5914 struct ipr_ioadl_desc *last_ioadl = NULL;
5156 int len = qc->nbytes; 5915 int len = qc->nbytes;
5157 struct scatterlist *sg; 5916 struct scatterlist *sg;
@@ -5163,8 +5922,8 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5163 if (qc->dma_dir == DMA_TO_DEVICE) { 5922 if (qc->dma_dir == DMA_TO_DEVICE) {
5164 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5923 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5165 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5924 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5166 ioarcb->write_data_transfer_length = cpu_to_be32(len); 5925 ioarcb->data_transfer_length = cpu_to_be32(len);
5167 ioarcb->write_ioadl_len = 5926 ioarcb->ioadl_len =
5168 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5927 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5169 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 5928 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5170 ioadl_flags = IPR_IOADL_FLAGS_READ; 5929 ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -5207,25 +5966,34 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5207 5966
5208 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5967 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5209 ioarcb = &ipr_cmd->ioarcb; 5968 ioarcb = &ipr_cmd->ioarcb;
5210 regs = &ioarcb->add_data.u.regs;
5211 5969
5212 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data)); 5970 if (ioa_cfg->sis64) {
5213 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs)); 5971 regs = &ipr_cmd->i.ata_ioadl.regs;
5972 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5973 } else
5974 regs = &ioarcb->u.add_data.u.regs;
5975
5976 memset(regs, 0, sizeof(*regs));
5977 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
5214 5978
5215 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 5979 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5216 ipr_cmd->qc = qc; 5980 ipr_cmd->qc = qc;
5217 ipr_cmd->done = ipr_sata_done; 5981 ipr_cmd->done = ipr_sata_done;
5218 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 5982 ipr_cmd->ioarcb.res_handle = res->res_handle;
5219 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5983 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5220 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5984 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5221 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5985 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5222 ipr_cmd->dma_use_sg = qc->n_elem; 5986 ipr_cmd->dma_use_sg = qc->n_elem;
5223 5987
5224 ipr_build_ata_ioadl(ipr_cmd, qc); 5988 if (ioa_cfg->sis64)
5989 ipr_build_ata_ioadl64(ipr_cmd, qc);
5990 else
5991 ipr_build_ata_ioadl(ipr_cmd, qc);
5992
5225 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5993 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5226 ipr_copy_sata_tf(regs, &qc->tf); 5994 ipr_copy_sata_tf(regs, &qc->tf);
5227 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 5995 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5228 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5996 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5229 5997
5230 switch (qc->tf.protocol) { 5998 switch (qc->tf.protocol) {
5231 case ATA_PROT_NODATA: 5999 case ATA_PROT_NODATA:
@@ -5252,8 +6020,9 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5252 } 6020 }
5253 6021
5254 mb(); 6022 mb();
5255 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr), 6023
5256 ioa_cfg->regs.ioarrin_reg); 6024 ipr_send_command(ipr_cmd);
6025
5257 return 0; 6026 return 0;
5258} 6027}
5259 6028
@@ -5454,7 +6223,7 @@ static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5454 * ipr_set_supported_devs - Send Set Supported Devices for a device 6223 * ipr_set_supported_devs - Send Set Supported Devices for a device
5455 * @ipr_cmd: ipr command struct 6224 * @ipr_cmd: ipr command struct
5456 * 6225 *
5457 * This function send a Set Supported Devices to the adapter 6226 * This function sends a Set Supported Devices to the adapter
5458 * 6227 *
5459 * Return value: 6228 * Return value:
5460 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6229 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
@@ -5463,7 +6232,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5463{ 6232{
5464 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6233 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5465 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 6234 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5466 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5467 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6235 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5468 struct ipr_resource_entry *res = ipr_cmd->u.res; 6236 struct ipr_resource_entry *res = ipr_cmd->u.res;
5469 6237
@@ -5474,28 +6242,28 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5474 continue; 6242 continue;
5475 6243
5476 ipr_cmd->u.res = res; 6244 ipr_cmd->u.res = res;
5477 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids); 6245 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
5478 6246
5479 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6247 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5480 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6248 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5481 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6249 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5482 6250
5483 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 6251 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6252 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
5484 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 6253 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5485 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 6254 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5486 6255
5487 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | 6256 ipr_init_ioadl(ipr_cmd,
5488 sizeof(struct ipr_supported_device)); 6257 ioa_cfg->vpd_cbs_dma +
5489 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma + 6258 offsetof(struct ipr_misc_cbs, supp_dev),
5490 offsetof(struct ipr_misc_cbs, supp_dev)); 6259 sizeof(struct ipr_supported_device),
5491 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6260 IPR_IOADL_FLAGS_WRITE_LAST);
5492 ioarcb->write_data_transfer_length =
5493 cpu_to_be32(sizeof(struct ipr_supported_device));
5494 6261
5495 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 6262 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5496 IPR_SET_SUP_DEVICE_TIMEOUT); 6263 IPR_SET_SUP_DEVICE_TIMEOUT);
5497 6264
5498 ipr_cmd->job_step = ipr_set_supported_devs; 6265 if (!ioa_cfg->sis64)
6266 ipr_cmd->job_step = ipr_set_supported_devs;
5499 return IPR_RC_JOB_RETURN; 6267 return IPR_RC_JOB_RETURN;
5500 } 6268 }
5501 6269
@@ -5503,36 +6271,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5503} 6271}
5504 6272
5505/** 6273/**
5506 * ipr_setup_write_cache - Disable write cache if needed
5507 * @ipr_cmd: ipr command struct
5508 *
5509 * This function sets up adapters write cache to desired setting
5510 *
5511 * Return value:
5512 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5513 **/
5514static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5515{
5516 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5517
5518 ipr_cmd->job_step = ipr_set_supported_devs;
5519 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5520 struct ipr_resource_entry, queue);
5521
5522 if (ioa_cfg->cache_state != CACHE_DISABLED)
5523 return IPR_RC_JOB_CONTINUE;
5524
5525 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5526 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5527 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5528 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5529
5530 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5531
5532 return IPR_RC_JOB_RETURN;
5533}
5534
5535/**
5536 * ipr_get_mode_page - Locate specified mode page 6274 * ipr_get_mode_page - Locate specified mode page
5537 * @mode_pages: mode page buffer 6275 * @mode_pages: mode page buffer
5538 * @page_code: page code to find 6276 * @page_code: page code to find
@@ -5690,10 +6428,9 @@ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5690 * none 6428 * none
5691 **/ 6429 **/
5692static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 6430static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5693 __be32 res_handle, u8 parm, u32 dma_addr, 6431 __be32 res_handle, u8 parm,
5694 u8 xfer_len) 6432 dma_addr_t dma_addr, u8 xfer_len)
5695{ 6433{
5696 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5697 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6434 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5698 6435
5699 ioarcb->res_handle = res_handle; 6436 ioarcb->res_handle = res_handle;
@@ -5703,11 +6440,7 @@ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5703 ioarcb->cmd_pkt.cdb[1] = parm; 6440 ioarcb->cmd_pkt.cdb[1] = parm;
5704 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6441 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5705 6442
5706 ioadl->flags_and_data_len = 6443 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
5707 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5708 ioadl->address = cpu_to_be32(dma_addr);
5709 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5710 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5711} 6444}
5712 6445
5713/** 6446/**
@@ -5737,7 +6470,9 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5737 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 6470 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5738 length); 6471 length);
5739 6472
5740 ipr_cmd->job_step = ipr_setup_write_cache; 6473 ipr_cmd->job_step = ipr_set_supported_devs;
6474 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6475 struct ipr_resource_entry, queue);
5741 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6476 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5742 6477
5743 LEAVE; 6478 LEAVE;
@@ -5757,9 +6492,8 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5757 **/ 6492 **/
5758static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 6493static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5759 __be32 res_handle, 6494 __be32 res_handle,
5760 u8 parm, u32 dma_addr, u8 xfer_len) 6495 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
5761{ 6496{
5762 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5763 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6497 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5764 6498
5765 ioarcb->res_handle = res_handle; 6499 ioarcb->res_handle = res_handle;
@@ -5768,11 +6502,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5768 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6502 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5769 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6503 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5770 6504
5771 ioadl->flags_and_data_len = 6505 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
5772 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5773 ioadl->address = cpu_to_be32(dma_addr);
5774 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5775 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5776} 6506}
5777 6507
5778/** 6508/**
@@ -5810,10 +6540,13 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5810 **/ 6540 **/
5811static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 6541static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5812{ 6542{
6543 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5813 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6544 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5814 6545
5815 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6546 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5816 ipr_cmd->job_step = ipr_setup_write_cache; 6547 ipr_cmd->job_step = ipr_set_supported_devs;
6548 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6549 struct ipr_resource_entry, queue);
5817 return IPR_RC_JOB_CONTINUE; 6550 return IPR_RC_JOB_CONTINUE;
5818 } 6551 }
5819 6552
@@ -5953,24 +6686,36 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5953{ 6686{
5954 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6687 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5955 struct ipr_resource_entry *res, *temp; 6688 struct ipr_resource_entry *res, *temp;
5956 struct ipr_config_table_entry *cfgte; 6689 struct ipr_config_table_entry_wrapper cfgtew;
5957 int found, i; 6690 int entries, found, flag, i;
5958 LIST_HEAD(old_res); 6691 LIST_HEAD(old_res);
5959 6692
5960 ENTER; 6693 ENTER;
5961 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ) 6694 if (ioa_cfg->sis64)
6695 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6696 else
6697 flag = ioa_cfg->u.cfg_table->hdr.flags;
6698
6699 if (flag & IPR_UCODE_DOWNLOAD_REQ)
5962 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 6700 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5963 6701
5964 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 6702 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5965 list_move_tail(&res->queue, &old_res); 6703 list_move_tail(&res->queue, &old_res);
5966 6704
5967 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) { 6705 if (ioa_cfg->sis64)
5968 cfgte = &ioa_cfg->cfg_table->dev[i]; 6706 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6707 else
6708 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6709
6710 for (i = 0; i < entries; i++) {
6711 if (ioa_cfg->sis64)
6712 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6713 else
6714 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
5969 found = 0; 6715 found = 0;
5970 6716
5971 list_for_each_entry_safe(res, temp, &old_res, queue) { 6717 list_for_each_entry_safe(res, temp, &old_res, queue) {
5972 if (!memcmp(&res->cfgte.res_addr, 6718 if (ipr_is_same_device(res, &cfgtew)) {
5973 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5974 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6719 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5975 found = 1; 6720 found = 1;
5976 break; 6721 break;
@@ -5987,24 +6732,27 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5987 res = list_entry(ioa_cfg->free_res_q.next, 6732 res = list_entry(ioa_cfg->free_res_q.next,
5988 struct ipr_resource_entry, queue); 6733 struct ipr_resource_entry, queue);
5989 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6734 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5990 ipr_init_res_entry(res); 6735 ipr_init_res_entry(res, &cfgtew);
5991 res->add_to_ml = 1; 6736 res->add_to_ml = 1;
5992 } 6737 }
5993 6738
5994 if (found) 6739 if (found)
5995 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 6740 ipr_update_res_entry(res, &cfgtew);
5996 } 6741 }
5997 6742
5998 list_for_each_entry_safe(res, temp, &old_res, queue) { 6743 list_for_each_entry_safe(res, temp, &old_res, queue) {
5999 if (res->sdev) { 6744 if (res->sdev) {
6000 res->del_from_ml = 1; 6745 res->del_from_ml = 1;
6001 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 6746 res->res_handle = IPR_INVALID_RES_HANDLE;
6002 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6747 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6003 } else {
6004 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6005 } 6748 }
6006 } 6749 }
6007 6750
6751 list_for_each_entry_safe(res, temp, &old_res, queue) {
6752 ipr_clear_res_target(res);
6753 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6754 }
6755
6008 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 6756 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6009 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 6757 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6010 else 6758 else
@@ -6028,7 +6776,6 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6028{ 6776{
6029 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6777 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6030 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6778 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6031 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6032 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 6779 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6033 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 6780 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6034 6781
@@ -6042,16 +6789,11 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6042 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6789 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6043 6790
6044 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 6791 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6045 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; 6792 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6046 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; 6793 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6047 6794
6048 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6795 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6049 ioarcb->read_data_transfer_length = 6796 IPR_IOADL_FLAGS_READ_LAST);
6050 cpu_to_be32(sizeof(struct ipr_config_table));
6051
6052 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
6053 ioadl->flags_and_data_len =
6054 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
6055 6797
6056 ipr_cmd->job_step = ipr_init_res_table; 6798 ipr_cmd->job_step = ipr_init_res_table;
6057 6799
@@ -6071,10 +6813,9 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6071 * none 6813 * none
6072 **/ 6814 **/
6073static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 6815static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6074 u32 dma_addr, u8 xfer_len) 6816 dma_addr_t dma_addr, u8 xfer_len)
6075{ 6817{
6076 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6818 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6077 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6078 6819
6079 ENTER; 6820 ENTER;
6080 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6821 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
@@ -6085,12 +6826,7 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6085 ioarcb->cmd_pkt.cdb[2] = page; 6826 ioarcb->cmd_pkt.cdb[2] = page;
6086 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6827 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6087 6828
6088 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6829 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6089 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6090
6091 ioadl->address = cpu_to_be32(dma_addr);
6092 ioadl->flags_and_data_len =
6093 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6094 6830
6095 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6831 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6096 LEAVE; 6832 LEAVE;
@@ -6161,13 +6897,9 @@ static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6161static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 6897static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6162{ 6898{
6163 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6899 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6164 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6165 6900
6166 ENTER; 6901 ENTER;
6167 6902
6168 if (!ipr_inquiry_page_supported(page0, 1))
6169 ioa_cfg->cache_state = CACHE_NONE;
6170
6171 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 6903 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6172 6904
6173 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 6905 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
@@ -6235,7 +6967,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6235} 6967}
6236 6968
6237/** 6969/**
6238 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ. 6970 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
6239 * @ipr_cmd: ipr command struct 6971 * @ipr_cmd: ipr command struct
6240 * 6972 *
6241 * This function send an Identify Host Request Response Queue 6973 * This function send an Identify Host Request Response Queue
@@ -6244,7 +6976,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6244 * Return value: 6976 * Return value:
6245 * IPR_RC_JOB_RETURN 6977 * IPR_RC_JOB_RETURN
6246 **/ 6978 **/
6247static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd) 6979static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
6248{ 6980{
6249 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6981 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6250 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6982 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
@@ -6256,19 +6988,32 @@ static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6256 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6988 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6257 6989
6258 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6990 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6991 if (ioa_cfg->sis64)
6992 ioarcb->cmd_pkt.cdb[1] = 0x1;
6259 ioarcb->cmd_pkt.cdb[2] = 6993 ioarcb->cmd_pkt.cdb[2] =
6260 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff; 6994 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6261 ioarcb->cmd_pkt.cdb[3] = 6995 ioarcb->cmd_pkt.cdb[3] =
6262 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff; 6996 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6263 ioarcb->cmd_pkt.cdb[4] = 6997 ioarcb->cmd_pkt.cdb[4] =
6264 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff; 6998 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6265 ioarcb->cmd_pkt.cdb[5] = 6999 ioarcb->cmd_pkt.cdb[5] =
6266 ((u32) ioa_cfg->host_rrq_dma) & 0xff; 7000 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
6267 ioarcb->cmd_pkt.cdb[7] = 7001 ioarcb->cmd_pkt.cdb[7] =
6268 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff; 7002 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6269 ioarcb->cmd_pkt.cdb[8] = 7003 ioarcb->cmd_pkt.cdb[8] =
6270 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff; 7004 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6271 7005
7006 if (ioa_cfg->sis64) {
7007 ioarcb->cmd_pkt.cdb[10] =
7008 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7009 ioarcb->cmd_pkt.cdb[11] =
7010 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7011 ioarcb->cmd_pkt.cdb[12] =
7012 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7013 ioarcb->cmd_pkt.cdb[13] =
7014 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7015 }
7016
6272 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7017 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6273 7018
6274 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7019 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
@@ -6349,7 +7094,58 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6349 ioa_cfg->toggle_bit = 1; 7094 ioa_cfg->toggle_bit = 1;
6350 7095
6351 /* Zero out config table */ 7096 /* Zero out config table */
6352 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table)); 7097 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7098}
7099
7100/**
7101 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7102 * @ipr_cmd: ipr command struct
7103 *
7104 * Return value:
7105 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7106 **/
7107static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7108{
7109 unsigned long stage, stage_time;
7110 u32 feedback;
7111 volatile u32 int_reg;
7112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7113 u64 maskval = 0;
7114
7115 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7116 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7117 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7118
7119 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7120
7121 /* sanity check the stage_time value */
7122 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7123 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7124 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7125 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7126
7127 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7128 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7129 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7130 stage_time = ioa_cfg->transop_timeout;
7131 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7132 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7133 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7134 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7135 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7136 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7137 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7138 return IPR_RC_JOB_CONTINUE;
7139 }
7140
7141 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7142 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7143 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7144 ipr_cmd->done = ipr_reset_ioa_job;
7145 add_timer(&ipr_cmd->timer);
7146 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7147
7148 return IPR_RC_JOB_RETURN;
6353} 7149}
6354 7150
6355/** 7151/**
@@ -6368,7 +7164,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6368 volatile u32 int_reg; 7164 volatile u32 int_reg;
6369 7165
6370 ENTER; 7166 ENTER;
6371 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq; 7167 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
6372 ipr_init_ioa_mem(ioa_cfg); 7168 ipr_init_ioa_mem(ioa_cfg);
6373 7169
6374 ioa_cfg->allow_interrupts = 1; 7170 ioa_cfg->allow_interrupts = 1;
@@ -6376,19 +7172,27 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6376 7172
6377 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7173 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6378 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 7174 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6379 ioa_cfg->regs.clr_interrupt_mask_reg); 7175 ioa_cfg->regs.clr_interrupt_mask_reg32);
6380 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7176 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6381 return IPR_RC_JOB_CONTINUE; 7177 return IPR_RC_JOB_CONTINUE;
6382 } 7178 }
6383 7179
6384 /* Enable destructive diagnostics on IOA */ 7180 /* Enable destructive diagnostics on IOA */
6385 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg); 7181 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7182
7183 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7184 if (ioa_cfg->sis64)
7185 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
6386 7186
6387 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6388 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7187 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6389 7188
6390 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 7189 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6391 7190
7191 if (ioa_cfg->sis64) {
7192 ipr_cmd->job_step = ipr_reset_next_stage;
7193 return IPR_RC_JOB_CONTINUE;
7194 }
7195
6392 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7196 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6393 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 7197 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6394 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7198 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
@@ -6458,7 +7262,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6458 7262
6459 mailbox = readl(ioa_cfg->ioa_mailbox); 7263 mailbox = readl(ioa_cfg->ioa_mailbox);
6460 7264
6461 if (!ipr_sdt_is_fmt2(mailbox)) { 7265 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
6462 ipr_unit_check_no_data(ioa_cfg); 7266 ipr_unit_check_no_data(ioa_cfg);
6463 return; 7267 return;
6464 } 7268 }
@@ -6467,15 +7271,20 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6467 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 7271 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6468 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 7272 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6469 7273
6470 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) || 7274 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
6471 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) { 7275 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7276 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
6472 ipr_unit_check_no_data(ioa_cfg); 7277 ipr_unit_check_no_data(ioa_cfg);
6473 return; 7278 return;
6474 } 7279 }
6475 7280
6476 /* Find length of the first sdt entry (UC buffer) */ 7281 /* Find length of the first sdt entry (UC buffer) */
6477 length = (be32_to_cpu(sdt.entry[0].end_offset) - 7282 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
6478 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK; 7283 length = be32_to_cpu(sdt.entry[0].end_token);
7284 else
7285 length = (be32_to_cpu(sdt.entry[0].end_token) -
7286 be32_to_cpu(sdt.entry[0].start_token)) &
7287 IPR_FMT2_MBX_ADDR_MASK;
6479 7288
6480 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 7289 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6481 struct ipr_hostrcb, queue); 7290 struct ipr_hostrcb, queue);
@@ -6483,13 +7292,13 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6483 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 7292 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6484 7293
6485 rc = ipr_get_ldump_data_section(ioa_cfg, 7294 rc = ipr_get_ldump_data_section(ioa_cfg,
6486 be32_to_cpu(sdt.entry[0].bar_str_offset), 7295 be32_to_cpu(sdt.entry[0].start_token),
6487 (__be32 *)&hostrcb->hcam, 7296 (__be32 *)&hostrcb->hcam,
6488 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 7297 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6489 7298
6490 if (!rc) { 7299 if (!rc) {
6491 ipr_handle_log_data(ioa_cfg, hostrcb); 7300 ipr_handle_log_data(ioa_cfg, hostrcb);
6492 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 7301 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
6493 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 7302 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6494 ioa_cfg->sdt_state == GET_DUMP) 7303 ioa_cfg->sdt_state == GET_DUMP)
6495 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 7304 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
@@ -6516,6 +7325,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6516 int rc; 7325 int rc;
6517 7326
6518 ENTER; 7327 ENTER;
7328 ioa_cfg->pdev->state_saved = true;
6519 rc = pci_restore_state(ioa_cfg->pdev); 7329 rc = pci_restore_state(ioa_cfg->pdev);
6520 7330
6521 if (rc != PCIBIOS_SUCCESSFUL) { 7331 if (rc != PCIBIOS_SUCCESSFUL) {
@@ -6716,7 +7526,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6716 7526
6717 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 7527 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6718 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 7528 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6719 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); 7529 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
6720 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 7530 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6721 } else { 7531 } else {
6722 ipr_cmd->job_step = ioa_cfg->reset; 7532 ipr_cmd->job_step = ioa_cfg->reset;
@@ -6779,7 +7589,10 @@ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6779 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 7589 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6780 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 7590 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6781 7591
6782 ipr_build_ucode_ioadl(ipr_cmd, sglist); 7592 if (ioa_cfg->sis64)
7593 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7594 else
7595 ipr_build_ucode_ioadl(ipr_cmd, sglist);
6783 ipr_cmd->job_step = ipr_reset_ucode_download_done; 7596 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6784 7597
6785 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7598 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
@@ -7148,8 +7961,8 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7148 ipr_free_cmd_blks(ioa_cfg); 7961 ipr_free_cmd_blks(ioa_cfg);
7149 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 7962 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7150 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 7963 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7151 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), 7964 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7152 ioa_cfg->cfg_table, 7965 ioa_cfg->u.cfg_table,
7153 ioa_cfg->cfg_table_dma); 7966 ioa_cfg->cfg_table_dma);
7154 7967
7155 for (i = 0; i < IPR_NUM_HCAMS; i++) { 7968 for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7203,7 +8016,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7203 int i; 8016 int i;
7204 8017
7205 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, 8018 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7206 sizeof(struct ipr_cmnd), 8, 0); 8019 sizeof(struct ipr_cmnd), 16, 0);
7207 8020
7208 if (!ioa_cfg->ipr_cmd_pool) 8021 if (!ioa_cfg->ipr_cmd_pool)
7209 return -ENOMEM; 8022 return -ENOMEM;
@@ -7221,13 +8034,25 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7221 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 8034 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7222 8035
7223 ioarcb = &ipr_cmd->ioarcb; 8036 ioarcb = &ipr_cmd->ioarcb;
7224 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 8037 ipr_cmd->dma_addr = dma_addr;
8038 if (ioa_cfg->sis64)
8039 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8040 else
8041 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8042
7225 ioarcb->host_response_handle = cpu_to_be32(i << 2); 8043 ioarcb->host_response_handle = cpu_to_be32(i << 2);
7226 ioarcb->write_ioadl_addr = 8044 if (ioa_cfg->sis64) {
7227 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 8045 ioarcb->u.sis64_addr_data.data_ioadl_addr =
7228 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 8046 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
7229 ioarcb->ioasa_host_pci_addr = 8047 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
7230 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 8048 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8049 } else {
8050 ioarcb->write_ioadl_addr =
8051 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8052 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8053 ioarcb->ioasa_host_pci_addr =
8054 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8055 }
7231 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 8056 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7232 ipr_cmd->cmd_index = i; 8057 ipr_cmd->cmd_index = i;
7233 ipr_cmd->ioa_cfg = ioa_cfg; 8058 ipr_cmd->ioa_cfg = ioa_cfg;
@@ -7254,13 +8079,24 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7254 8079
7255 ENTER; 8080 ENTER;
7256 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * 8081 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7257 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); 8082 ioa_cfg->max_devs_supported, GFP_KERNEL);
7258 8083
7259 if (!ioa_cfg->res_entries) 8084 if (!ioa_cfg->res_entries)
7260 goto out; 8085 goto out;
7261 8086
7262 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) 8087 if (ioa_cfg->sis64) {
8088 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8089 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8090 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8091 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8092 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8093 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8094 }
8095
8096 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
7263 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 8097 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8098 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8099 }
7264 8100
7265 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, 8101 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7266 sizeof(struct ipr_misc_cbs), 8102 sizeof(struct ipr_misc_cbs),
@@ -7279,11 +8115,11 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7279 if (!ioa_cfg->host_rrq) 8115 if (!ioa_cfg->host_rrq)
7280 goto out_ipr_free_cmd_blocks; 8116 goto out_ipr_free_cmd_blocks;
7281 8117
7282 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 8118 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7283 sizeof(struct ipr_config_table), 8119 ioa_cfg->cfg_table_size,
7284 &ioa_cfg->cfg_table_dma); 8120 &ioa_cfg->cfg_table_dma);
7285 8121
7286 if (!ioa_cfg->cfg_table) 8122 if (!ioa_cfg->u.cfg_table)
7287 goto out_free_host_rrq; 8123 goto out_free_host_rrq;
7288 8124
7289 for (i = 0; i < IPR_NUM_HCAMS; i++) { 8125 for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7317,8 +8153,9 @@ out_free_hostrcb_dma:
7317 ioa_cfg->hostrcb[i], 8153 ioa_cfg->hostrcb[i],
7318 ioa_cfg->hostrcb_dma[i]); 8154 ioa_cfg->hostrcb_dma[i]);
7319 } 8155 }
7320 pci_free_consistent(pdev, sizeof(struct ipr_config_table), 8156 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
7321 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma); 8157 ioa_cfg->u.cfg_table,
8158 ioa_cfg->cfg_table_dma);
7322out_free_host_rrq: 8159out_free_host_rrq:
7323 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8160 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7324 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 8161 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
@@ -7393,15 +8230,21 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7393 init_waitqueue_head(&ioa_cfg->reset_wait_q); 8230 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7394 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8231 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7395 ioa_cfg->sdt_state = INACTIVE; 8232 ioa_cfg->sdt_state = INACTIVE;
7396 if (ipr_enable_cache)
7397 ioa_cfg->cache_state = CACHE_ENABLED;
7398 else
7399 ioa_cfg->cache_state = CACHE_DISABLED;
7400 8233
7401 ipr_initialize_bus_attr(ioa_cfg); 8234 ipr_initialize_bus_attr(ioa_cfg);
8235 ioa_cfg->max_devs_supported = ipr_max_devs;
7402 8236
7403 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 8237 if (ioa_cfg->sis64) {
7404 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 8238 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8239 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8240 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8241 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8242 } else {
8243 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8244 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8245 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8246 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8247 }
7405 host->max_channel = IPR_MAX_BUS_TO_SCAN; 8248 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7406 host->unique_id = host->host_no; 8249 host->unique_id = host->host_no;
7407 host->max_cmd_len = IPR_MAX_CDB_LEN; 8250 host->max_cmd_len = IPR_MAX_CDB_LEN;
@@ -7413,13 +8256,26 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7413 8256
7414 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 8257 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7415 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 8258 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8259 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
7416 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 8260 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8261 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
7417 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 8262 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8263 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
7418 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 8264 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8265 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
7419 t->ioarrin_reg = base + p->ioarrin_reg; 8266 t->ioarrin_reg = base + p->ioarrin_reg;
7420 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 8267 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8268 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
7421 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 8269 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8270 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
7422 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 8271 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8272 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8273
8274 if (ioa_cfg->sis64) {
8275 t->init_feedback_reg = base + p->init_feedback_reg;
8276 t->dump_addr_reg = base + p->dump_addr_reg;
8277 t->dump_data_reg = base + p->dump_data_reg;
8278 }
7423} 8279}
7424 8280
7425/** 8281/**
@@ -7491,7 +8347,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7491 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8347 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7492 ioa_cfg->msi_received = 0; 8348 ioa_cfg->msi_received = 0;
7493 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8349 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7494 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg); 8350 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
7495 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8351 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7496 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7497 8353
@@ -7502,7 +8358,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7502 } else if (ipr_debug) 8358 } else if (ipr_debug)
7503 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 8359 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
7504 8360
7505 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg); 8361 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
7506 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8362 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7507 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 8363 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
7508 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8364 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
@@ -7572,6 +8428,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7572 goto out_scsi_host_put; 8428 goto out_scsi_host_put;
7573 } 8429 }
7574 8430
8431 /* set SIS 32 or SIS 64 */
8432 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
7575 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 8433 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7576 8434
7577 if (ipr_transop_timeout) 8435 if (ipr_transop_timeout)
@@ -7609,7 +8467,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7609 8467
7610 pci_set_master(pdev); 8468 pci_set_master(pdev);
7611 8469
7612 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 8470 if (ioa_cfg->sis64) {
8471 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8472 if (rc < 0) {
8473 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8474 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8475 }
8476
8477 } else
8478 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8479
7613 if (rc < 0) { 8480 if (rc < 0) {
7614 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 8481 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7615 goto cleanup_nomem; 8482 goto cleanup_nomem;
@@ -7651,6 +8518,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7651 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8518 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7652 goto cleanup_nomem; 8519 goto cleanup_nomem;
7653 8520
8521 if (ioa_cfg->sis64)
8522 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8523 + ((sizeof(struct ipr_config_table_entry64)
8524 * ioa_cfg->max_devs_supported)));
8525 else
8526 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8527 + ((sizeof(struct ipr_config_table_entry)
8528 * ioa_cfg->max_devs_supported)));
8529
7654 rc = ipr_alloc_mem(ioa_cfg); 8530 rc = ipr_alloc_mem(ioa_cfg);
7655 if (rc < 0) { 8531 if (rc < 0) {
7656 dev_err(&pdev->dev, 8532 dev_err(&pdev->dev,
@@ -7662,9 +8538,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7662 * If HRRQ updated interrupt is not masked, or reset alert is set, 8538 * If HRRQ updated interrupt is not masked, or reset alert is set,
7663 * the card is in an unknown state and needs a hard reset 8539 * the card is in an unknown state and needs a hard reset
7664 */ 8540 */
7665 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8541 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
7666 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); 8542 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
7667 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 8543 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
7668 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 8544 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7669 ioa_cfg->needs_hard_reset = 1; 8545 ioa_cfg->needs_hard_reset = 1;
7670 if (interrupts & IPR_PCII_ERROR_INTERRUPTS) 8546 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
@@ -7952,9 +8828,6 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7952 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 8828 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7953 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8829 IPR_USE_LONG_TRANSOP_TIMEOUT },
7954 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8830 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7955 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7956 IPR_USE_LONG_TRANSOP_TIMEOUT },
7957 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7958 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 8831 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7959 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8832 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7960 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 8833 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
@@ -7969,9 +8842,22 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7969 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 8842 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7970 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 8843 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7971 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8844 IPR_USE_LONG_TRANSOP_TIMEOUT },
7972 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E, 8845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
7973 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 8846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
7974 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8849 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8850 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8851 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8852 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8853 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8854 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8855 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8856 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8857 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8858 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8859 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8860 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
7975 { } 8861 { }
7976}; 8862};
7977MODULE_DEVICE_TABLE(pci, ipr_pci_table); 8863MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -7991,6 +8877,61 @@ static struct pci_driver ipr_driver = {
7991}; 8877};
7992 8878
7993/** 8879/**
8880 * ipr_halt_done - Shutdown prepare completion
8881 *
8882 * Return value:
8883 * none
8884 **/
8885static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8886{
8887 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8888
8889 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8890}
8891
8892/**
8893 * ipr_halt - Issue shutdown prepare to all adapters
8894 *
8895 * Return value:
8896 * NOTIFY_OK on success / NOTIFY_DONE on failure
8897 **/
8898static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8899{
8900 struct ipr_cmnd *ipr_cmd;
8901 struct ipr_ioa_cfg *ioa_cfg;
8902 unsigned long flags = 0;
8903
8904 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8905 return NOTIFY_DONE;
8906
8907 spin_lock(&ipr_driver_lock);
8908
8909 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8910 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8911 if (!ioa_cfg->allow_cmds) {
8912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8913 continue;
8914 }
8915
8916 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8917 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8918 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8919 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8920 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8921
8922 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8924 }
8925 spin_unlock(&ipr_driver_lock);
8926
8927 return NOTIFY_OK;
8928}
8929
8930static struct notifier_block ipr_notifier = {
8931 ipr_halt, NULL, 0
8932};
8933
8934/**
7994 * ipr_init - Module entry point 8935 * ipr_init - Module entry point
7995 * 8936 *
7996 * Return value: 8937 * Return value:
@@ -8001,6 +8942,7 @@ static int __init ipr_init(void)
8001 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 8942 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8002 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 8943 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8003 8944
8945 register_reboot_notifier(&ipr_notifier);
8004 return pci_register_driver(&ipr_driver); 8946 return pci_register_driver(&ipr_driver);
8005} 8947}
8006 8948
@@ -8014,6 +8956,7 @@ static int __init ipr_init(void)
8014 **/ 8956 **/
8015static void __exit ipr_exit(void) 8957static void __exit ipr_exit(void)
8016{ 8958{
8959 unregister_reboot_notifier(&ipr_notifier);
8017 pci_unregister_driver(&ipr_driver); 8960 pci_unregister_driver(&ipr_driver);
8018} 8961}
8019 8962
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 19bbcf39f0c9..4c267b5e0b96 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.4.3" 40#define IPR_DRIVER_VERSION "2.5.0"
41#define IPR_DRIVER_DATE "(June 10, 2009)" 41#define IPR_DRIVER_DATE "(February 11, 2010)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -55,7 +55,9 @@
55#define IPR_NUM_BASE_CMD_BLKS 100 55#define IPR_NUM_BASE_CMD_BLKS 100
56 56
57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
58#define PCI_DEVICE_ID_IBM_SCAMP_E 0x034A 58
59#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D
60#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2 0x034A
59 61
60#define IPR_SUBS_DEV_ID_2780 0x0264 62#define IPR_SUBS_DEV_ID_2780 0x0264
61#define IPR_SUBS_DEV_ID_5702 0x0266 63#define IPR_SUBS_DEV_ID_5702 0x0266
@@ -70,15 +72,24 @@
70#define IPR_SUBS_DEV_ID_572A 0x02C1 72#define IPR_SUBS_DEV_ID_572A 0x02C1
71#define IPR_SUBS_DEV_ID_572B 0x02C2 73#define IPR_SUBS_DEV_ID_572B 0x02C2
72#define IPR_SUBS_DEV_ID_572F 0x02C3 74#define IPR_SUBS_DEV_ID_572F 0x02C3
73#define IPR_SUBS_DEV_ID_574D 0x030B
74#define IPR_SUBS_DEV_ID_574E 0x030A 75#define IPR_SUBS_DEV_ID_574E 0x030A
75#define IPR_SUBS_DEV_ID_575B 0x030D 76#define IPR_SUBS_DEV_ID_575B 0x030D
76#define IPR_SUBS_DEV_ID_575C 0x0338 77#define IPR_SUBS_DEV_ID_575C 0x0338
77#define IPR_SUBS_DEV_ID_575D 0x033E
78#define IPR_SUBS_DEV_ID_57B3 0x033A 78#define IPR_SUBS_DEV_ID_57B3 0x033A
79#define IPR_SUBS_DEV_ID_57B7 0x0360 79#define IPR_SUBS_DEV_ID_57B7 0x0360
80#define IPR_SUBS_DEV_ID_57B8 0x02C2 80#define IPR_SUBS_DEV_ID_57B8 0x02C2
81 81
82#define IPR_SUBS_DEV_ID_57B4 0x033B
83#define IPR_SUBS_DEV_ID_57B2 0x035F
84#define IPR_SUBS_DEV_ID_57C6 0x0357
85
86#define IPR_SUBS_DEV_ID_57B5 0x033C
87#define IPR_SUBS_DEV_ID_57CE 0x035E
88#define IPR_SUBS_DEV_ID_57B1 0x0355
89
90#define IPR_SUBS_DEV_ID_574D 0x0356
91#define IPR_SUBS_DEV_ID_575D 0x035D
92
82#define IPR_NAME "ipr" 93#define IPR_NAME "ipr"
83 94
84/* 95/*
@@ -118,6 +129,10 @@
118#define IPR_NUM_LOG_HCAMS 2 129#define IPR_NUM_LOG_HCAMS 2
119#define IPR_NUM_CFG_CHG_HCAMS 2 130#define IPR_NUM_CFG_CHG_HCAMS 2
120#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) 131#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
132
133#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024
134#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff
135
121#define IPR_MAX_NUM_TARGETS_PER_BUS 256 136#define IPR_MAX_NUM_TARGETS_PER_BUS 256
122#define IPR_MAX_NUM_LUNS_PER_TARGET 256 137#define IPR_MAX_NUM_LUNS_PER_TARGET 256
123#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8 138#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8
@@ -132,13 +147,15 @@
132 147
133/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */ 148/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */
134#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ 149#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \
135 ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3) 150 ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
136 151
137#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS 152#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS
138#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ 153#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \
139 IPR_NUM_INTERNAL_CMD_BLKS) 154 IPR_NUM_INTERNAL_CMD_BLKS)
140 155
141#define IPR_MAX_PHYSICAL_DEVS 192 156#define IPR_MAX_PHYSICAL_DEVS 192
157#define IPR_DEFAULT_SIS64_DEVS 1024
158#define IPR_MAX_SIS64_DEVS 4096
142 159
143#define IPR_MAX_SGLIST 64 160#define IPR_MAX_SGLIST 64
144#define IPR_IOA_MAX_SECTORS 32767 161#define IPR_IOA_MAX_SECTORS 32767
@@ -173,6 +190,7 @@
173#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01 190#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01
174#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02 191#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02
175#define IPR_SET_SUPPORTED_DEVICES 0xFB 192#define IPR_SET_SUPPORTED_DEVICES 0xFB
193#define IPR_SET_ALL_SUPPORTED_DEVICES 0x80
176#define IPR_IOA_SHUTDOWN 0xF7 194#define IPR_IOA_SHUTDOWN 0xF7
177#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05 195#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05
178 196
@@ -221,9 +239,17 @@
221#define IPR_SDT_FMT2_BAR5_SEL 0x5 239#define IPR_SDT_FMT2_BAR5_SEL 0x5
222#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8 240#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8
223#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2 241#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2
242#define IPR_FMT3_SDT_READY_TO_USE 0xC4D4E3F3
224#define IPR_DOORBELL 0x82800000 243#define IPR_DOORBELL 0x82800000
225#define IPR_RUNTIME_RESET 0x40000000 244#define IPR_RUNTIME_RESET 0x40000000
226 245
246#define IPR_IPL_INIT_MIN_STAGE_TIME 5
247#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
248#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
249#define IPR_IPL_INIT_STAGE_MASK 0xff000000
250#define IPR_IPL_INIT_STAGE_TIME_MASK 0x0000ffff
251#define IPR_PCII_IPL_STAGE_CHANGE (0x80000000 >> 0)
252
227#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0) 253#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0)
228#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3) 254#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3)
229#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4) 255#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4)
@@ -318,27 +344,27 @@ struct ipr_std_inq_data {
318 u8 serial_num[IPR_SERIAL_NUM_LEN]; 344 u8 serial_num[IPR_SERIAL_NUM_LEN];
319}__attribute__ ((packed)); 345}__attribute__ ((packed));
320 346
347#define IPR_RES_TYPE_AF_DASD 0x00
348#define IPR_RES_TYPE_GENERIC_SCSI 0x01
349#define IPR_RES_TYPE_VOLUME_SET 0x02
350#define IPR_RES_TYPE_REMOTE_AF_DASD 0x03
351#define IPR_RES_TYPE_GENERIC_ATA 0x04
352#define IPR_RES_TYPE_ARRAY 0x05
353#define IPR_RES_TYPE_IOAFP 0xff
354
321struct ipr_config_table_entry { 355struct ipr_config_table_entry {
322 u8 proto; 356 u8 proto;
323#define IPR_PROTO_SATA 0x02 357#define IPR_PROTO_SATA 0x02
324#define IPR_PROTO_SATA_ATAPI 0x03 358#define IPR_PROTO_SATA_ATAPI 0x03
325#define IPR_PROTO_SAS_STP 0x06 359#define IPR_PROTO_SAS_STP 0x06
326#define IPR_PROTO_SAS_STP_ATAPI 0x07 360#define IPR_PROTO_SAS_STP_ATAPI 0x07
327 u8 array_id; 361 u8 array_id;
328 u8 flags; 362 u8 flags;
329#define IPR_IS_IOA_RESOURCE 0x80 363#define IPR_IS_IOA_RESOURCE 0x80
330#define IPR_IS_ARRAY_MEMBER 0x20
331#define IPR_IS_HOT_SPARE 0x10
332
333 u8 rsvd_subtype; 364 u8 rsvd_subtype;
334#define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f) 365
335#define IPR_SUBTYPE_AF_DASD 0 366#define IPR_QUEUEING_MODEL(res) ((((res)->flags) & 0x70) >> 4)
336#define IPR_SUBTYPE_GENERIC_SCSI 1 367#define IPR_QUEUE_FROZEN_MODEL 0
337#define IPR_SUBTYPE_VOLUME_SET 2
338#define IPR_SUBTYPE_GENERIC_ATA 4
339
340#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4)
341#define IPR_QUEUE_FROZEN_MODEL 0
342#define IPR_QUEUE_NACA_MODEL 1 368#define IPR_QUEUE_NACA_MODEL 1
343 369
344 struct ipr_res_addr res_addr; 370 struct ipr_res_addr res_addr;
@@ -347,6 +373,28 @@ struct ipr_config_table_entry {
347 struct ipr_std_inq_data std_inq_data; 373 struct ipr_std_inq_data std_inq_data;
348}__attribute__ ((packed, aligned (4))); 374}__attribute__ ((packed, aligned (4)));
349 375
376struct ipr_config_table_entry64 {
377 u8 res_type;
378 u8 proto;
379 u8 vset_num;
380 u8 array_id;
381 __be16 flags;
382 __be16 res_flags;
383#define IPR_QUEUEING_MODEL64(res) ((((res)->res_flags) & 0x7000) >> 12)
384 __be32 res_handle;
385 u8 dev_id_type;
386 u8 reserved[3];
387 __be64 dev_id;
388 __be64 lun;
389 __be64 lun_wwn[2];
390#define IPR_MAX_RES_PATH_LENGTH 24
391 __be64 res_path;
392 struct ipr_std_inq_data std_inq_data;
393 u8 reserved2[4];
394 __be64 reserved3[2]; // description text
395 u8 reserved4[8];
396}__attribute__ ((packed, aligned (8)));
397
350struct ipr_config_table_hdr { 398struct ipr_config_table_hdr {
351 u8 num_entries; 399 u8 num_entries;
352 u8 flags; 400 u8 flags;
@@ -354,13 +402,35 @@ struct ipr_config_table_hdr {
354 __be16 reserved; 402 __be16 reserved;
355}__attribute__((packed, aligned (4))); 403}__attribute__((packed, aligned (4)));
356 404
405struct ipr_config_table_hdr64 {
406 __be16 num_entries;
407 __be16 reserved;
408 u8 flags;
409 u8 reserved2[11];
410}__attribute__((packed, aligned (4)));
411
357struct ipr_config_table { 412struct ipr_config_table {
358 struct ipr_config_table_hdr hdr; 413 struct ipr_config_table_hdr hdr;
359 struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS]; 414 struct ipr_config_table_entry dev[0];
360}__attribute__((packed, aligned (4))); 415}__attribute__((packed, aligned (4)));
361 416
417struct ipr_config_table64 {
418 struct ipr_config_table_hdr64 hdr64;
419 struct ipr_config_table_entry64 dev[0];
420}__attribute__((packed, aligned (8)));
421
422struct ipr_config_table_entry_wrapper {
423 union {
424 struct ipr_config_table_entry *cfgte;
425 struct ipr_config_table_entry64 *cfgte64;
426 } u;
427};
428
362struct ipr_hostrcb_cfg_ch_not { 429struct ipr_hostrcb_cfg_ch_not {
363 struct ipr_config_table_entry cfgte; 430 union {
431 struct ipr_config_table_entry cfgte;
432 struct ipr_config_table_entry64 cfgte64;
433 } u;
364 u8 reserved[936]; 434 u8 reserved[936];
365}__attribute__((packed, aligned (4))); 435}__attribute__((packed, aligned (4)));
366 436
@@ -381,7 +451,7 @@ struct ipr_cmd_pkt {
381#define IPR_RQTYPE_HCAM 0x02 451#define IPR_RQTYPE_HCAM 0x02
382#define IPR_RQTYPE_ATA_PASSTHRU 0x04 452#define IPR_RQTYPE_ATA_PASSTHRU 0x04
383 453
384 u8 luntar_luntrn; 454 u8 reserved2;
385 455
386 u8 flags_hi; 456 u8 flags_hi;
387#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 457#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80
@@ -403,7 +473,7 @@ struct ipr_cmd_pkt {
403 __be16 timeout; 473 __be16 timeout;
404}__attribute__ ((packed, aligned(4))); 474}__attribute__ ((packed, aligned(4)));
405 475
406struct ipr_ioarcb_ata_regs { 476struct ipr_ioarcb_ata_regs { /* 22 bytes */
407 u8 flags; 477 u8 flags;
408#define IPR_ATA_FLAG_PACKET_CMD 0x80 478#define IPR_ATA_FLAG_PACKET_CMD 0x80
409#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40 479#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
@@ -442,28 +512,49 @@ struct ipr_ioadl_desc {
442 __be32 address; 512 __be32 address;
443}__attribute__((packed, aligned (8))); 513}__attribute__((packed, aligned (8)));
444 514
515struct ipr_ioadl64_desc {
516 __be32 flags;
517 __be32 data_len;
518 __be64 address;
519}__attribute__((packed, aligned (16)));
520
521struct ipr_ata64_ioadl {
522 struct ipr_ioarcb_ata_regs regs;
523 u16 reserved[5];
524 struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
525}__attribute__((packed, aligned (16)));
526
445struct ipr_ioarcb_add_data { 527struct ipr_ioarcb_add_data {
446 union { 528 union {
447 struct ipr_ioarcb_ata_regs regs; 529 struct ipr_ioarcb_ata_regs regs;
448 struct ipr_ioadl_desc ioadl[5]; 530 struct ipr_ioadl_desc ioadl[5];
449 __be32 add_cmd_parms[10]; 531 __be32 add_cmd_parms[10];
450 }u; 532 } u;
451}__attribute__ ((packed, aligned(4))); 533}__attribute__ ((packed, aligned (4)));
534
535struct ipr_ioarcb_sis64_add_addr_ecb {
536 __be64 ioasa_host_pci_addr;
537 __be64 data_ioadl_addr;
538 __be64 reserved;
539 __be32 ext_control_buf[4];
540}__attribute__((packed, aligned (8)));
452 541
453/* IOA Request Control Block 128 bytes */ 542/* IOA Request Control Block 128 bytes */
454struct ipr_ioarcb { 543struct ipr_ioarcb {
455 __be32 ioarcb_host_pci_addr; 544 union {
456 __be32 reserved; 545 __be32 ioarcb_host_pci_addr;
546 __be64 ioarcb_host_pci_addr64;
547 } a;
457 __be32 res_handle; 548 __be32 res_handle;
458 __be32 host_response_handle; 549 __be32 host_response_handle;
459 __be32 reserved1; 550 __be32 reserved1;
460 __be32 reserved2; 551 __be32 reserved2;
461 __be32 reserved3; 552 __be32 reserved3;
462 553
463 __be32 write_data_transfer_length; 554 __be32 data_transfer_length;
464 __be32 read_data_transfer_length; 555 __be32 read_data_transfer_length;
465 __be32 write_ioadl_addr; 556 __be32 write_ioadl_addr;
466 __be32 write_ioadl_len; 557 __be32 ioadl_len;
467 __be32 read_ioadl_addr; 558 __be32 read_ioadl_addr;
468 __be32 read_ioadl_len; 559 __be32 read_ioadl_len;
469 560
@@ -473,8 +564,14 @@ struct ipr_ioarcb {
473 564
474 struct ipr_cmd_pkt cmd_pkt; 565 struct ipr_cmd_pkt cmd_pkt;
475 566
476 __be32 add_cmd_parms_len; 567 __be16 add_cmd_parms_offset;
477 struct ipr_ioarcb_add_data add_data; 568 __be16 add_cmd_parms_len;
569
570 union {
571 struct ipr_ioarcb_add_data add_data;
572 struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data;
573 } u;
574
478}__attribute__((packed, aligned (4))); 575}__attribute__((packed, aligned (4)));
479 576
480struct ipr_ioasa_vset { 577struct ipr_ioasa_vset {
@@ -676,12 +773,29 @@ struct ipr_hostrcb_device_data_entry_enhanced {
676 struct ipr_ext_vpd cfc_last_with_dev_vpd; 773 struct ipr_ext_vpd cfc_last_with_dev_vpd;
677}__attribute__((packed, aligned (4))); 774}__attribute__((packed, aligned (4)));
678 775
776struct ipr_hostrcb64_device_data_entry_enhanced {
777 struct ipr_ext_vpd vpd;
778 u8 ccin[4];
779 u8 res_path[8];
780 struct ipr_ext_vpd new_vpd;
781 u8 new_ccin[4];
782 struct ipr_ext_vpd ioa_last_with_dev_vpd;
783 struct ipr_ext_vpd cfc_last_with_dev_vpd;
784}__attribute__((packed, aligned (4)));
785
679struct ipr_hostrcb_array_data_entry { 786struct ipr_hostrcb_array_data_entry {
680 struct ipr_vpd vpd; 787 struct ipr_vpd vpd;
681 struct ipr_res_addr expected_dev_res_addr; 788 struct ipr_res_addr expected_dev_res_addr;
682 struct ipr_res_addr dev_res_addr; 789 struct ipr_res_addr dev_res_addr;
683}__attribute__((packed, aligned (4))); 790}__attribute__((packed, aligned (4)));
684 791
792struct ipr_hostrcb64_array_data_entry {
793 struct ipr_ext_vpd vpd;
794 u8 ccin[4];
795 u8 expected_res_path[8];
796 u8 res_path[8];
797}__attribute__((packed, aligned (4)));
798
685struct ipr_hostrcb_array_data_entry_enhanced { 799struct ipr_hostrcb_array_data_entry_enhanced {
686 struct ipr_ext_vpd vpd; 800 struct ipr_ext_vpd vpd;
687 u8 ccin[4]; 801 u8 ccin[4];
@@ -733,6 +847,14 @@ struct ipr_hostrcb_type_13_error {
733 struct ipr_hostrcb_device_data_entry_enhanced dev[3]; 847 struct ipr_hostrcb_device_data_entry_enhanced dev[3];
734}__attribute__((packed, aligned (4))); 848}__attribute__((packed, aligned (4)));
735 849
850struct ipr_hostrcb_type_23_error {
851 struct ipr_ext_vpd ioa_vpd;
852 struct ipr_ext_vpd cfc_vpd;
853 __be32 errors_detected;
854 __be32 errors_logged;
855 struct ipr_hostrcb64_device_data_entry_enhanced dev[3];
856}__attribute__((packed, aligned (4)));
857
736struct ipr_hostrcb_type_04_error { 858struct ipr_hostrcb_type_04_error {
737 struct ipr_vpd ioa_vpd; 859 struct ipr_vpd ioa_vpd;
738 struct ipr_vpd cfc_vpd; 860 struct ipr_vpd cfc_vpd;
@@ -760,6 +882,22 @@ struct ipr_hostrcb_type_14_error {
760 struct ipr_hostrcb_array_data_entry_enhanced array_member[18]; 882 struct ipr_hostrcb_array_data_entry_enhanced array_member[18];
761}__attribute__((packed, aligned (4))); 883}__attribute__((packed, aligned (4)));
762 884
885struct ipr_hostrcb_type_24_error {
886 struct ipr_ext_vpd ioa_vpd;
887 struct ipr_ext_vpd cfc_vpd;
888 u8 reserved[2];
889 u8 exposed_mode_adn;
890#define IPR_INVALID_ARRAY_DEV_NUM 0xff
891 u8 array_id;
892 u8 last_res_path[8];
893 u8 protection_level[8];
894 struct ipr_ext_vpd array_vpd;
895 u8 description[16];
896 u8 reserved2[3];
897 u8 num_entries;
898 struct ipr_hostrcb64_array_data_entry array_member[32];
899}__attribute__((packed, aligned (4)));
900
763struct ipr_hostrcb_type_07_error { 901struct ipr_hostrcb_type_07_error {
764 u8 failure_reason[64]; 902 u8 failure_reason[64];
765 struct ipr_vpd vpd; 903 struct ipr_vpd vpd;
@@ -797,6 +935,22 @@ struct ipr_hostrcb_config_element {
797 __be32 wwid[2]; 935 __be32 wwid[2];
798}__attribute__((packed, aligned (4))); 936}__attribute__((packed, aligned (4)));
799 937
938struct ipr_hostrcb64_config_element {
939 __be16 length;
940 u8 descriptor_id;
941#define IPR_DESCRIPTOR_MASK 0xC0
942#define IPR_DESCRIPTOR_SIS64 0x00
943
944 u8 reserved;
945 u8 type_status;
946
947 u8 reserved2[2];
948 u8 link_rate;
949
950 u8 res_path[8];
951 __be32 wwid[2];
952}__attribute__((packed, aligned (8)));
953
800struct ipr_hostrcb_fabric_desc { 954struct ipr_hostrcb_fabric_desc {
801 __be16 length; 955 __be16 length;
802 u8 ioa_port; 956 u8 ioa_port;
@@ -818,6 +972,20 @@ struct ipr_hostrcb_fabric_desc {
818 struct ipr_hostrcb_config_element elem[1]; 972 struct ipr_hostrcb_config_element elem[1];
819}__attribute__((packed, aligned (4))); 973}__attribute__((packed, aligned (4)));
820 974
975struct ipr_hostrcb64_fabric_desc {
976 __be16 length;
977 u8 descriptor_id;
978
979 u8 reserved;
980 u8 path_state;
981
982 u8 reserved2[2];
983 u8 res_path[8];
984 u8 reserved3[6];
985 __be16 num_entries;
986 struct ipr_hostrcb64_config_element elem[1];
987}__attribute__((packed, aligned (8)));
988
821#define for_each_fabric_cfg(fabric, cfg) \ 989#define for_each_fabric_cfg(fabric, cfg) \
822 for (cfg = (fabric)->elem; \ 990 for (cfg = (fabric)->elem; \
823 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \ 991 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -830,10 +998,17 @@ struct ipr_hostrcb_type_20_error {
830 struct ipr_hostrcb_fabric_desc desc[1]; 998 struct ipr_hostrcb_fabric_desc desc[1];
831}__attribute__((packed, aligned (4))); 999}__attribute__((packed, aligned (4)));
832 1000
1001struct ipr_hostrcb_type_30_error {
1002 u8 failure_reason[64];
1003 u8 reserved[3];
1004 u8 num_entries;
1005 struct ipr_hostrcb64_fabric_desc desc[1];
1006}__attribute__((packed, aligned (4)));
1007
833struct ipr_hostrcb_error { 1008struct ipr_hostrcb_error {
834 __be32 failing_dev_ioasc; 1009 __be32 fd_ioasc;
835 struct ipr_res_addr failing_dev_res_addr; 1010 struct ipr_res_addr fd_res_addr;
836 __be32 failing_dev_res_handle; 1011 __be32 fd_res_handle;
837 __be32 prc; 1012 __be32 prc;
838 union { 1013 union {
839 struct ipr_hostrcb_type_ff_error type_ff_error; 1014 struct ipr_hostrcb_type_ff_error type_ff_error;
@@ -850,6 +1025,26 @@ struct ipr_hostrcb_error {
850 } u; 1025 } u;
851}__attribute__((packed, aligned (4))); 1026}__attribute__((packed, aligned (4)));
852 1027
1028struct ipr_hostrcb64_error {
1029 __be32 fd_ioasc;
1030 __be32 ioa_fw_level;
1031 __be32 fd_res_handle;
1032 __be32 prc;
1033 __be64 fd_dev_id;
1034 __be64 fd_lun;
1035 u8 fd_res_path[8];
1036 __be64 time_stamp;
1037 u8 reserved[2];
1038 union {
1039 struct ipr_hostrcb_type_ff_error type_ff_error;
1040 struct ipr_hostrcb_type_12_error type_12_error;
1041 struct ipr_hostrcb_type_17_error type_17_error;
1042 struct ipr_hostrcb_type_23_error type_23_error;
1043 struct ipr_hostrcb_type_24_error type_24_error;
1044 struct ipr_hostrcb_type_30_error type_30_error;
1045 } u;
1046}__attribute__((packed, aligned (8)));
1047
853struct ipr_hostrcb_raw { 1048struct ipr_hostrcb_raw {
854 __be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)]; 1049 __be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)];
855}__attribute__((packed, aligned (4))); 1050}__attribute__((packed, aligned (4)));
@@ -887,7 +1082,11 @@ struct ipr_hcam {
887#define IPR_HOST_RCB_OVERLAY_ID_16 0x16 1082#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
888#define IPR_HOST_RCB_OVERLAY_ID_17 0x17 1083#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
889#define IPR_HOST_RCB_OVERLAY_ID_20 0x20 1084#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
890#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF 1085#define IPR_HOST_RCB_OVERLAY_ID_23 0x23
1086#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
1087#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
1088#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
1089#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
891 1090
892 u8 reserved1[3]; 1091 u8 reserved1[3];
893 __be32 ilid; 1092 __be32 ilid;
@@ -897,6 +1096,7 @@ struct ipr_hcam {
897 1096
898 union { 1097 union {
899 struct ipr_hostrcb_error error; 1098 struct ipr_hostrcb_error error;
1099 struct ipr_hostrcb64_error error64;
900 struct ipr_hostrcb_cfg_ch_not ccn; 1100 struct ipr_hostrcb_cfg_ch_not ccn;
901 struct ipr_hostrcb_raw raw; 1101 struct ipr_hostrcb_raw raw;
902 } u; 1102 } u;
@@ -907,14 +1107,14 @@ struct ipr_hostrcb {
907 dma_addr_t hostrcb_dma; 1107 dma_addr_t hostrcb_dma;
908 struct list_head queue; 1108 struct list_head queue;
909 struct ipr_ioa_cfg *ioa_cfg; 1109 struct ipr_ioa_cfg *ioa_cfg;
1110 char rp_buffer[IPR_MAX_RES_PATH_LENGTH];
910}; 1111};
911 1112
912/* IPR smart dump table structures */ 1113/* IPR smart dump table structures */
913struct ipr_sdt_entry { 1114struct ipr_sdt_entry {
914 __be32 bar_str_offset; 1115 __be32 start_token;
915 __be32 end_offset; 1116 __be32 end_token;
916 u8 entry_byte; 1117 u8 reserved[4];
917 u8 reserved[3];
918 1118
919 u8 flags; 1119 u8 flags;
920#define IPR_SDT_ENDIAN 0x80 1120#define IPR_SDT_ENDIAN 0x80
@@ -960,28 +1160,48 @@ struct ipr_sata_port {
960}; 1160};
961 1161
962struct ipr_resource_entry { 1162struct ipr_resource_entry {
963 struct ipr_config_table_entry cfgte;
964 u8 needs_sync_complete:1; 1163 u8 needs_sync_complete:1;
965 u8 in_erp:1; 1164 u8 in_erp:1;
966 u8 add_to_ml:1; 1165 u8 add_to_ml:1;
967 u8 del_from_ml:1; 1166 u8 del_from_ml:1;
968 u8 resetting_device:1; 1167 u8 resetting_device:1;
969 1168
1169 u32 bus; /* AKA channel */
1170 u32 target; /* AKA id */
1171 u32 lun;
1172#define IPR_ARRAY_VIRTUAL_BUS 0x1
1173#define IPR_VSET_VIRTUAL_BUS 0x2
1174#define IPR_IOAFP_VIRTUAL_BUS 0x3
1175
1176#define IPR_GET_RES_PHYS_LOC(res) \
1177 (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
1178
1179 u8 ata_class;
1180
1181 u8 flags;
1182 __be16 res_flags;
1183
1184 __be32 type;
1185
1186 u8 qmodel;
1187 struct ipr_std_inq_data std_inq_data;
1188
1189 __be32 res_handle;
1190 __be64 dev_id;
1191 struct scsi_lun dev_lun;
1192 u8 res_path[8];
1193
1194 struct ipr_ioa_cfg *ioa_cfg;
970 struct scsi_device *sdev; 1195 struct scsi_device *sdev;
971 struct ipr_sata_port *sata_port; 1196 struct ipr_sata_port *sata_port;
972 struct list_head queue; 1197 struct list_head queue;
973}; 1198}; /* struct ipr_resource_entry */
974 1199
975struct ipr_resource_hdr { 1200struct ipr_resource_hdr {
976 u16 num_entries; 1201 u16 num_entries;
977 u16 reserved; 1202 u16 reserved;
978}; 1203};
979 1204
980struct ipr_resource_table {
981 struct ipr_resource_hdr hdr;
982 struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS];
983};
984
985struct ipr_misc_cbs { 1205struct ipr_misc_cbs {
986 struct ipr_ioa_vpd ioa_vpd; 1206 struct ipr_ioa_vpd ioa_vpd;
987 struct ipr_inquiry_page0 page0_data; 1207 struct ipr_inquiry_page0 page0_data;
@@ -994,27 +1214,51 @@ struct ipr_misc_cbs {
994struct ipr_interrupt_offsets { 1214struct ipr_interrupt_offsets {
995 unsigned long set_interrupt_mask_reg; 1215 unsigned long set_interrupt_mask_reg;
996 unsigned long clr_interrupt_mask_reg; 1216 unsigned long clr_interrupt_mask_reg;
1217 unsigned long clr_interrupt_mask_reg32;
997 unsigned long sense_interrupt_mask_reg; 1218 unsigned long sense_interrupt_mask_reg;
1219 unsigned long sense_interrupt_mask_reg32;
998 unsigned long clr_interrupt_reg; 1220 unsigned long clr_interrupt_reg;
1221 unsigned long clr_interrupt_reg32;
999 1222
1000 unsigned long sense_interrupt_reg; 1223 unsigned long sense_interrupt_reg;
1224 unsigned long sense_interrupt_reg32;
1001 unsigned long ioarrin_reg; 1225 unsigned long ioarrin_reg;
1002 unsigned long sense_uproc_interrupt_reg; 1226 unsigned long sense_uproc_interrupt_reg;
1227 unsigned long sense_uproc_interrupt_reg32;
1003 unsigned long set_uproc_interrupt_reg; 1228 unsigned long set_uproc_interrupt_reg;
1229 unsigned long set_uproc_interrupt_reg32;
1004 unsigned long clr_uproc_interrupt_reg; 1230 unsigned long clr_uproc_interrupt_reg;
1231 unsigned long clr_uproc_interrupt_reg32;
1232
1233 unsigned long init_feedback_reg;
1234
1235 unsigned long dump_addr_reg;
1236 unsigned long dump_data_reg;
1005}; 1237};
1006 1238
1007struct ipr_interrupts { 1239struct ipr_interrupts {
1008 void __iomem *set_interrupt_mask_reg; 1240 void __iomem *set_interrupt_mask_reg;
1009 void __iomem *clr_interrupt_mask_reg; 1241 void __iomem *clr_interrupt_mask_reg;
1242 void __iomem *clr_interrupt_mask_reg32;
1010 void __iomem *sense_interrupt_mask_reg; 1243 void __iomem *sense_interrupt_mask_reg;
1244 void __iomem *sense_interrupt_mask_reg32;
1011 void __iomem *clr_interrupt_reg; 1245 void __iomem *clr_interrupt_reg;
1246 void __iomem *clr_interrupt_reg32;
1012 1247
1013 void __iomem *sense_interrupt_reg; 1248 void __iomem *sense_interrupt_reg;
1249 void __iomem *sense_interrupt_reg32;
1014 void __iomem *ioarrin_reg; 1250 void __iomem *ioarrin_reg;
1015 void __iomem *sense_uproc_interrupt_reg; 1251 void __iomem *sense_uproc_interrupt_reg;
1252 void __iomem *sense_uproc_interrupt_reg32;
1016 void __iomem *set_uproc_interrupt_reg; 1253 void __iomem *set_uproc_interrupt_reg;
1254 void __iomem *set_uproc_interrupt_reg32;
1017 void __iomem *clr_uproc_interrupt_reg; 1255 void __iomem *clr_uproc_interrupt_reg;
1256 void __iomem *clr_uproc_interrupt_reg32;
1257
1258 void __iomem *init_feedback_reg;
1259
1260 void __iomem *dump_addr_reg;
1261 void __iomem *dump_data_reg;
1018}; 1262};
1019 1263
1020struct ipr_chip_cfg_t { 1264struct ipr_chip_cfg_t {
@@ -1029,6 +1273,9 @@ struct ipr_chip_t {
1029 u16 intr_type; 1273 u16 intr_type;
1030#define IPR_USE_LSI 0x00 1274#define IPR_USE_LSI 0x00
1031#define IPR_USE_MSI 0x01 1275#define IPR_USE_MSI 0x01
1276 u16 sis_type;
1277#define IPR_SIS32 0x00
1278#define IPR_SIS64 0x01
1032 const struct ipr_chip_cfg_t *cfg; 1279 const struct ipr_chip_cfg_t *cfg;
1033}; 1280};
1034 1281
@@ -1073,13 +1320,6 @@ enum ipr_sdt_state {
1073 DUMP_OBTAINED 1320 DUMP_OBTAINED
1074}; 1321};
1075 1322
1076enum ipr_cache_state {
1077 CACHE_NONE,
1078 CACHE_DISABLED,
1079 CACHE_ENABLED,
1080 CACHE_INVALID
1081};
1082
1083/* Per-controller data */ 1323/* Per-controller data */
1084struct ipr_ioa_cfg { 1324struct ipr_ioa_cfg {
1085 char eye_catcher[8]; 1325 char eye_catcher[8];
@@ -1099,10 +1339,17 @@ struct ipr_ioa_cfg {
1099 u8 dual_raid:1; 1339 u8 dual_raid:1;
1100 u8 needs_warm_reset:1; 1340 u8 needs_warm_reset:1;
1101 u8 msi_received:1; 1341 u8 msi_received:1;
1342 u8 sis64:1;
1102 1343
1103 u8 revid; 1344 u8 revid;
1104 1345
1105 enum ipr_cache_state cache_state; 1346 /*
1347 * Bitmaps for SIS64 generated target values
1348 */
1349 unsigned long *target_ids;
1350 unsigned long *array_ids;
1351 unsigned long *vset_ids;
1352
1106 u16 type; /* CCIN of the card */ 1353 u16 type; /* CCIN of the card */
1107 1354
1108 u8 log_level; 1355 u8 log_level;
@@ -1133,8 +1380,13 @@ struct ipr_ioa_cfg {
1133 1380
1134 char cfg_table_start[8]; 1381 char cfg_table_start[8];
1135#define IPR_CFG_TBL_START "cfg" 1382#define IPR_CFG_TBL_START "cfg"
1136 struct ipr_config_table *cfg_table; 1383 union {
1384 struct ipr_config_table *cfg_table;
1385 struct ipr_config_table64 *cfg_table64;
1386 } u;
1137 dma_addr_t cfg_table_dma; 1387 dma_addr_t cfg_table_dma;
1388 u32 cfg_table_size;
1389 u32 max_devs_supported;
1138 1390
1139 char resource_table_label[8]; 1391 char resource_table_label[8];
1140#define IPR_RES_TABLE_LABEL "res_tbl" 1392#define IPR_RES_TABLE_LABEL "res_tbl"
@@ -1202,13 +1454,17 @@ struct ipr_ioa_cfg {
1202 char ipr_cmd_label[8]; 1454 char ipr_cmd_label[8];
1203#define IPR_CMD_LABEL "ipr_cmd" 1455#define IPR_CMD_LABEL "ipr_cmd"
1204 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; 1456 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
1205 u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; 1457 dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
1206}; 1458}; /* struct ipr_ioa_cfg */
1207 1459
1208struct ipr_cmnd { 1460struct ipr_cmnd {
1209 struct ipr_ioarcb ioarcb; 1461 struct ipr_ioarcb ioarcb;
1462 union {
1463 struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
1464 struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
1465 struct ipr_ata64_ioadl ata_ioadl;
1466 } i;
1210 struct ipr_ioasa ioasa; 1467 struct ipr_ioasa ioasa;
1211 struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
1212 struct list_head queue; 1468 struct list_head queue;
1213 struct scsi_cmnd *scsi_cmd; 1469 struct scsi_cmnd *scsi_cmd;
1214 struct ata_queued_cmd *qc; 1470 struct ata_queued_cmd *qc;
@@ -1221,7 +1477,7 @@ struct ipr_cmnd {
1221 u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; 1477 u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
1222 dma_addr_t sense_buffer_dma; 1478 dma_addr_t sense_buffer_dma;
1223 unsigned short dma_use_sg; 1479 unsigned short dma_use_sg;
1224 dma_addr_t dma_handle; 1480 dma_addr_t dma_addr;
1225 struct ipr_cmnd *sibling; 1481 struct ipr_cmnd *sibling;
1226 union { 1482 union {
1227 enum ipr_shutdown_type shutdown_type; 1483 enum ipr_shutdown_type shutdown_type;
@@ -1314,8 +1570,6 @@ struct ipr_ioa_dump {
1314 u32 next_page_index; 1570 u32 next_page_index;
1315 u32 page_offset; 1571 u32 page_offset;
1316 u32 format; 1572 u32 format;
1317#define IPR_SDT_FMT2 2
1318#define IPR_SDT_UNKNOWN 3
1319}__attribute__((packed, aligned (4))); 1573}__attribute__((packed, aligned (4)));
1320 1574
1321struct ipr_dump { 1575struct ipr_dump {
@@ -1377,6 +1631,13 @@ struct ipr_ucode_image_header {
1377#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__) 1631#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
1378#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)) 1632#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
1379 1633
1634#define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \
1635 printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
1636 bus, target, lun, ##__VA_ARGS__)
1637
1638#define ipr_res_err(ioa_cfg, res, fmt, ...) \
1639 ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__)
1640
1380#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \ 1641#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
1381 printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \ 1642 printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
1382 (ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__) 1643 (ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__)
@@ -1384,9 +1645,6 @@ struct ipr_ucode_image_header {
1384#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \ 1645#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
1385 ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__) 1646 ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
1386 1647
1387#define ipr_res_err(ioa_cfg, res, fmt, ...) \
1388 ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__)
1389
1390#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \ 1648#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \
1391{ \ 1649{ \
1392 if ((res).bus >= IPR_MAX_NUM_BUSES) { \ 1650 if ((res).bus >= IPR_MAX_NUM_BUSES) { \
@@ -1399,14 +1657,21 @@ struct ipr_ucode_image_header {
1399} 1657}
1400 1658
1401#define ipr_hcam_err(hostrcb, fmt, ...) \ 1659#define ipr_hcam_err(hostrcb, fmt, ...) \
1402{ \ 1660{ \
1403 if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \ 1661 if (ipr_is_device(hostrcb)) { \
1404 ipr_ra_err((hostrcb)->ioa_cfg, \ 1662 if ((hostrcb)->ioa_cfg->sis64) { \
1405 (hostrcb)->hcam.u.error.failing_dev_res_addr, \ 1663 printk(KERN_ERR IPR_NAME ": %s: " fmt, \
1406 fmt, ##__VA_ARGS__); \ 1664 ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \
1407 } else { \ 1665 &hostrcb->rp_buffer[0]), \
1408 dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \ 1666 __VA_ARGS__); \
1409 } \ 1667 } else { \
1668 ipr_ra_err((hostrcb)->ioa_cfg, \
1669 (hostrcb)->hcam.u.error.fd_res_addr, \
1670 fmt, __VA_ARGS__); \
1671 } \
1672 } else { \
1673 dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \
1674 } \
1410} 1675}
1411 1676
1412#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ 1677#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
@@ -1432,7 +1697,7 @@ ipr_err("----------------------------------------------------------\n")
1432 **/ 1697 **/
1433static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res) 1698static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
1434{ 1699{
1435 return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0; 1700 return res->type == IPR_RES_TYPE_IOAFP;
1436} 1701}
1437 1702
1438/** 1703/**
@@ -1444,12 +1709,8 @@ static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
1444 **/ 1709 **/
1445static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res) 1710static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
1446{ 1711{
1447 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && 1712 return res->type == IPR_RES_TYPE_AF_DASD ||
1448 !ipr_is_ioa_resource(res) && 1713 res->type == IPR_RES_TYPE_REMOTE_AF_DASD;
1449 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD)
1450 return 1;
1451 else
1452 return 0;
1453} 1714}
1454 1715
1455/** 1716/**
@@ -1461,12 +1722,7 @@ static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
1461 **/ 1722 **/
1462static inline int ipr_is_vset_device(struct ipr_resource_entry *res) 1723static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
1463{ 1724{
1464 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && 1725 return res->type == IPR_RES_TYPE_VOLUME_SET;
1465 !ipr_is_ioa_resource(res) &&
1466 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET)
1467 return 1;
1468 else
1469 return 0;
1470} 1726}
1471 1727
1472/** 1728/**
@@ -1478,11 +1734,7 @@ static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
1478 **/ 1734 **/
1479static inline int ipr_is_gscsi(struct ipr_resource_entry *res) 1735static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
1480{ 1736{
1481 if (!ipr_is_ioa_resource(res) && 1737 return res->type == IPR_RES_TYPE_GENERIC_SCSI;
1482 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI)
1483 return 1;
1484 else
1485 return 0;
1486} 1738}
1487 1739
1488/** 1740/**
@@ -1495,7 +1747,7 @@ static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
1495static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res) 1747static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
1496{ 1748{
1497 if (ipr_is_af_dasd_device(res) || 1749 if (ipr_is_af_dasd_device(res) ||
1498 (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))) 1750 (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->std_inq_data)))
1499 return 1; 1751 return 1;
1500 else 1752 else
1501 return 0; 1753 return 0;
@@ -1510,11 +1762,7 @@ static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
1510 **/ 1762 **/
1511static inline int ipr_is_gata(struct ipr_resource_entry *res) 1763static inline int ipr_is_gata(struct ipr_resource_entry *res)
1512{ 1764{
1513 if (!ipr_is_ioa_resource(res) && 1765 return res->type == IPR_RES_TYPE_GENERIC_ATA;
1514 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA)
1515 return 1;
1516 else
1517 return 0;
1518} 1766}
1519 1767
1520/** 1768/**
@@ -1526,24 +1774,35 @@ static inline int ipr_is_gata(struct ipr_resource_entry *res)
1526 **/ 1774 **/
1527static inline int ipr_is_naca_model(struct ipr_resource_entry *res) 1775static inline int ipr_is_naca_model(struct ipr_resource_entry *res)
1528{ 1776{
1529 if (ipr_is_gscsi(res) && IPR_QUEUEING_MODEL(res) == IPR_QUEUE_NACA_MODEL) 1777 if (ipr_is_gscsi(res) && res->qmodel == IPR_QUEUE_NACA_MODEL)
1530 return 1; 1778 return 1;
1531 return 0; 1779 return 0;
1532} 1780}
1533 1781
1534/** 1782/**
1535 * ipr_is_device - Determine if resource address is that of a device 1783 * ipr_is_device - Determine if the hostrcb structure is related to a device
1536 * @res_addr: resource address struct 1784 * @hostrcb: host resource control blocks struct
1537 * 1785 *
1538 * Return value: 1786 * Return value:
1539 * 1 if AF / 0 if not AF 1787 * 1 if AF / 0 if not AF
1540 **/ 1788 **/
1541static inline int ipr_is_device(struct ipr_res_addr *res_addr) 1789static inline int ipr_is_device(struct ipr_hostrcb *hostrcb)
1542{ 1790{
1543 if ((res_addr->bus < IPR_MAX_NUM_BUSES) && 1791 struct ipr_res_addr *res_addr;
1544 (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1))) 1792 u8 *res_path;
1545 return 1; 1793
1546 1794 if (hostrcb->ioa_cfg->sis64) {
1795 res_path = &hostrcb->hcam.u.error64.fd_res_path[0];
1796 if ((res_path[0] == 0x00 || res_path[0] == 0x80 ||
1797 res_path[0] == 0x81) && res_path[2] != 0xFF)
1798 return 1;
1799 } else {
1800 res_addr = &hostrcb->hcam.u.error.fd_res_addr;
1801
1802 if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
1803 (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1)))
1804 return 1;
1805 }
1547 return 0; 1806 return 0;
1548} 1807}
1549 1808
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index edc49ca49cea..02143af7c1af 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/inet.h> 30#include <linux/inet.h>
31#include <linux/slab.h>
31#include <linux/file.h> 32#include <linux/file.h>
32#include <linux/blkdev.h> 33#include <linux/blkdev.h>
33#include <linux/crypto.h> 34#include <linux/crypto.h>
@@ -584,9 +585,10 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
584 struct iscsi_conn *conn = cls_conn->dd_data; 585 struct iscsi_conn *conn = cls_conn->dd_data;
585 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 586 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
586 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; 587 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
588 struct socket *sock = tcp_sw_conn->sock;
587 589
588 /* userspace may have goofed up and not bound us */ 590 /* userspace may have goofed up and not bound us */
589 if (!tcp_sw_conn->sock) 591 if (!sock)
590 return; 592 return;
591 /* 593 /*
592 * Make sure our recv side is stopped. 594 * Make sure our recv side is stopped.
@@ -597,6 +599,11 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
597 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
598 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); 600 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
599 601
602 if (sock->sk->sk_sleep) {
603 sock->sk->sk_err = EIO;
604 wake_up_interruptible(sock->sk->sk_sleep);
605 }
606
600 iscsi_conn_stop(cls_conn, flag); 607 iscsi_conn_stop(cls_conn, flag);
601 iscsi_sw_tcp_release_conn(conn); 608 iscsi_sw_tcp_release_conn(conn);
602} 609}
@@ -868,7 +875,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
868 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 875 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
869 .eh_abort_handler = iscsi_eh_abort, 876 .eh_abort_handler = iscsi_eh_abort,
870 .eh_device_reset_handler= iscsi_eh_device_reset, 877 .eh_device_reset_handler= iscsi_eh_device_reset,
871 .eh_target_reset_handler= iscsi_eh_target_reset, 878 .eh_target_reset_handler = iscsi_eh_recover_target,
872 .use_clustering = DISABLE_CLUSTERING, 879 .use_clustering = DISABLE_CLUSTERING,
873 .slave_alloc = iscsi_sw_tcp_slave_alloc, 880 .slave_alloc = iscsi_sw_tcp_slave_alloc,
874 .slave_configure = iscsi_sw_tcp_slave_configure, 881 .slave_configure = iscsi_sw_tcp_slave_configure,
@@ -903,7 +910,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
903 ISCSI_USERNAME | ISCSI_PASSWORD | 910 ISCSI_USERNAME | ISCSI_PASSWORD |
904 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 911 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
905 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 912 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
906 ISCSI_LU_RESET_TMO | 913 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
907 ISCSI_PING_TMO | ISCSI_RECV_TMO | 914 ISCSI_PING_TMO | ISCSI_RECV_TMO |
908 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, 915 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
909 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | 916 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index b2d481dd3750..08e26d4e3731 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/gfp.h>
7#include <linux/types.h> 8#include <linux/types.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/init.h> 10#include <linux/init.h>
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index b3d31315ac32..23880f8fe7e4 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -40,6 +40,7 @@
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/ioport.h> 41#include <linux/ioport.h>
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43#include <linux/slab.h>
43 44
44#include <asm/page.h> 45#include <asm/page.h>
45#include <asm/pgtable.h> 46#include <asm/pgtable.h>
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
index 55f982de3a9a..4bb23ac86a5c 100644
--- a/drivers/scsi/libfc/Makefile
+++ b/drivers/scsi/libfc/Makefile
@@ -3,10 +3,12 @@
3obj-$(CONFIG_LIBFC) += libfc.o 3obj-$(CONFIG_LIBFC) += libfc.o
4 4
5libfc-objs := \ 5libfc-objs := \
6 fc_libfc.o \
6 fc_disc.o \ 7 fc_disc.o \
7 fc_exch.o \ 8 fc_exch.o \
8 fc_elsct.o \ 9 fc_elsct.o \
9 fc_frame.o \ 10 fc_frame.o \
10 fc_lport.o \ 11 fc_lport.o \
11 fc_rport.o \ 12 fc_rport.o \
12 fc_fcp.o 13 fc_fcp.o \
14 fc_npiv.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index c48799e9dd8e..1087a7f18e84 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <linux/slab.h>
36#include <linux/err.h> 37#include <linux/err.h>
37#include <asm/unaligned.h> 38#include <asm/unaligned.h>
38 39
@@ -40,6 +41,8 @@
40 41
41#include <scsi/libfc.h> 42#include <scsi/libfc.h>
42 43
44#include "fc_libfc.h"
45
43#define FC_DISC_RETRY_LIMIT 3 /* max retries */ 46#define FC_DISC_RETRY_LIMIT 3 /* max retries */
44#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ 47#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
45 48
@@ -51,8 +54,8 @@ static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
51static void fc_disc_restart(struct fc_disc *); 54static void fc_disc_restart(struct fc_disc *);
52 55
53/** 56/**
54 * fc_disc_stop_rports() - delete all the remote ports associated with the lport 57 * fc_disc_stop_rports() - Delete all the remote ports associated with the lport
55 * @disc: The discovery job to stop rports on 58 * @disc: The discovery job to stop remote ports on
56 * 59 *
57 * Locking Note: This function expects that the lport mutex is locked before 60 * Locking Note: This function expects that the lport mutex is locked before
58 * calling it. 61 * calling it.
@@ -72,9 +75,9 @@ void fc_disc_stop_rports(struct fc_disc *disc)
72 75
73/** 76/**
74 * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) 77 * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
75 * @sp: Current sequence of the RSCN exchange 78 * @sp: The sequence of the RSCN exchange
76 * @fp: RSCN Frame 79 * @fp: The RSCN frame
77 * @lport: Fibre Channel host port instance 80 * @lport: The local port that the request will be sent on
78 * 81 *
79 * Locking Note: This function expects that the disc_mutex is locked 82 * Locking Note: This function expects that the disc_mutex is locked
80 * before it is called. 83 * before it is called.
@@ -183,9 +186,9 @@ reject:
183 186
184/** 187/**
185 * fc_disc_recv_req() - Handle incoming requests 188 * fc_disc_recv_req() - Handle incoming requests
186 * @sp: Current sequence of the request exchange 189 * @sp: The sequence of the request exchange
187 * @fp: The frame 190 * @fp: The request frame
188 * @lport: The FC local port 191 * @lport: The local port receiving the request
189 * 192 *
190 * Locking Note: This function is called from the EM and will lock 193 * Locking Note: This function is called from the EM and will lock
191 * the disc_mutex before calling the handler for the 194 * the disc_mutex before calling the handler for the
@@ -213,7 +216,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
213 216
214/** 217/**
215 * fc_disc_restart() - Restart discovery 218 * fc_disc_restart() - Restart discovery
216 * @lport: FC discovery context 219 * @disc: The discovery object to be restarted
217 * 220 *
218 * Locking Note: This function expects that the disc mutex 221 * Locking Note: This function expects that the disc mutex
219 * is already locked. 222 * is already locked.
@@ -240,9 +243,9 @@ static void fc_disc_restart(struct fc_disc *disc)
240} 243}
241 244
242/** 245/**
243 * fc_disc_start() - Fibre Channel Target discovery 246 * fc_disc_start() - Start discovery on a local port
244 * @lport: FC local port 247 * @lport: The local port to have discovery started on
245 * @disc_callback: function to be called when discovery is complete 248 * @disc_callback: Callback function to be called when discovery is complete
246 */ 249 */
247static void fc_disc_start(void (*disc_callback)(struct fc_lport *, 250static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
248 enum fc_disc_event), 251 enum fc_disc_event),
@@ -263,8 +266,8 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
263 266
264/** 267/**
265 * fc_disc_done() - Discovery has been completed 268 * fc_disc_done() - Discovery has been completed
266 * @disc: FC discovery context 269 * @disc: The discovery context
267 * @event: discovery completion status 270 * @event: The discovery completion status
268 * 271 *
269 * Locking Note: This function expects that the disc mutex is locked before 272 * Locking Note: This function expects that the disc mutex is locked before
270 * it is called. The discovery callback is then made with the lock released, 273 * it is called. The discovery callback is then made with the lock released,
@@ -284,8 +287,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
284 } 287 }
285 288
286 /* 289 /*
287 * Go through all remote ports. If they were found in the latest 290 * Go through all remote ports. If they were found in the latest
288 * discovery, reverify or log them in. Otherwise, log them out. 291 * discovery, reverify or log them in. Otherwise, log them out.
289 * Skip ports which were never discovered. These are the dNS port 292 * Skip ports which were never discovered. These are the dNS port
290 * and ports which were created by PLOGI. 293 * and ports which were created by PLOGI.
291 */ 294 */
@@ -305,8 +308,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
305 308
306/** 309/**
307 * fc_disc_error() - Handle error on dNS request 310 * fc_disc_error() - Handle error on dNS request
308 * @disc: FC discovery context 311 * @disc: The discovery context
309 * @fp: The frame pointer 312 * @fp: The error code encoded as a frame pointer
310 */ 313 */
311static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) 314static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
312{ 315{
@@ -342,7 +345,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
342 345
343/** 346/**
344 * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request 347 * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
345 * @lport: FC discovery context 348 * @lport: The discovery context
346 * 349 *
347 * Locking Note: This function expects that the disc_mutex is locked 350 * Locking Note: This function expects that the disc_mutex is locked
348 * before it is called. 351 * before it is called.
@@ -368,17 +371,17 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc)
368 if (lport->tt.elsct_send(lport, 0, fp, 371 if (lport->tt.elsct_send(lport, 0, fp,
369 FC_NS_GPN_FT, 372 FC_NS_GPN_FT,
370 fc_disc_gpn_ft_resp, 373 fc_disc_gpn_ft_resp,
371 disc, lport->e_d_tov)) 374 disc, 3 * lport->r_a_tov))
372 return; 375 return;
373err: 376err:
374 fc_disc_error(disc, fp); 377 fc_disc_error(disc, NULL);
375} 378}
376 379
377/** 380/**
378 * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. 381 * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
379 * @lport: Fibre Channel host port instance 382 * @lport: The local port the GPN_FT was received on
380 * @buf: GPN_FT response buffer 383 * @buf: The GPN_FT response buffer
381 * @len: size of response buffer 384 * @len: The size of response buffer
382 * 385 *
383 * Goes through the list of IDs and names resulting from a request. 386 * Goes through the list of IDs and names resulting from a request.
384 */ 387 */
@@ -477,10 +480,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
477} 480}
478 481
479/** 482/**
480 * fc_disc_timeout() - Retry handler for the disc component 483 * fc_disc_timeout() - Handler for discovery timeouts
481 * @work: Structure holding disc obj that needs retry discovery 484 * @work: Structure holding discovery context that needs to retry discovery
482 *
483 * Handle retry of memory allocation for remote ports.
484 */ 485 */
485static void fc_disc_timeout(struct work_struct *work) 486static void fc_disc_timeout(struct work_struct *work)
486{ 487{
@@ -494,9 +495,9 @@ static void fc_disc_timeout(struct work_struct *work)
494 495
495/** 496/**
496 * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) 497 * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
497 * @sp: Current sequence of GPN_FT exchange 498 * @sp: The sequence that the GPN_FT response was received on
498 * @fp: response frame 499 * @fp: The GPN_FT response frame
499 * @lp_arg: Fibre Channel host port instance 500 * @lp_arg: The discovery context
500 * 501 *
501 * Locking Note: This function is called without disc mutex held, and 502 * Locking Note: This function is called without disc mutex held, and
502 * should do all its processing with the mutex held 503 * should do all its processing with the mutex held
@@ -567,9 +568,9 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
567 568
568/** 569/**
569 * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID) 570 * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID)
570 * @sp: exchange sequence 571 * @sp: The sequence the GPN_ID is on
571 * @fp: response frame 572 * @fp: The response frame
572 * @rdata_arg: remote port private data 573 * @rdata_arg: The remote port that sent the GPN_ID response
573 * 574 *
574 * Locking Note: This function is called without disc mutex held. 575 * Locking Note: This function is called without disc mutex held.
575 */ 576 */
@@ -637,7 +638,7 @@ out:
637 638
638/** 639/**
639 * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request 640 * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request
640 * @lport: local port 641 * @lport: The local port to initiate discovery on
641 * @rdata: remote port private data 642 * @rdata: remote port private data
642 * 643 *
643 * Locking Note: This function expects that the disc_mutex is locked 644 * Locking Note: This function expects that the disc_mutex is locked
@@ -654,7 +655,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
654 if (!fp) 655 if (!fp)
655 return -ENOMEM; 656 return -ENOMEM;
656 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, 657 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
657 fc_disc_gpn_id_resp, rdata, lport->e_d_tov)) 658 fc_disc_gpn_id_resp, rdata,
659 3 * lport->r_a_tov))
658 return -ENOMEM; 660 return -ENOMEM;
659 kref_get(&rdata->kref); 661 kref_get(&rdata->kref);
660 return 0; 662 return 0;
@@ -662,8 +664,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
662 664
663/** 665/**
664 * fc_disc_single() - Discover the directory information for a single target 666 * fc_disc_single() - Discover the directory information for a single target
665 * @lport: local port 667 * @lport: The local port the remote port is associated with
666 * @dp: The port to rediscover 668 * @dp: The port to rediscover
667 * 669 *
668 * Locking Note: This function expects that the disc_mutex is locked 670 * Locking Note: This function expects that the disc_mutex is locked
669 * before it is called. 671 * before it is called.
@@ -681,7 +683,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
681 683
682/** 684/**
683 * fc_disc_stop() - Stop discovery for a given lport 685 * fc_disc_stop() - Stop discovery for a given lport
684 * @lport: The lport that discovery should stop for 686 * @lport: The local port that discovery should stop on
685 */ 687 */
686void fc_disc_stop(struct fc_lport *lport) 688void fc_disc_stop(struct fc_lport *lport)
687{ 689{
@@ -695,7 +697,7 @@ void fc_disc_stop(struct fc_lport *lport)
695 697
696/** 698/**
697 * fc_disc_stop_final() - Stop discovery for a given lport 699 * fc_disc_stop_final() - Stop discovery for a given lport
698 * @lport: The lport that discovery should stop for 700 * @lport: The lport that discovery should stop on
699 * 701 *
700 * This function will block until discovery has been 702 * This function will block until discovery has been
701 * completely stopped and all rports have been deleted. 703 * completely stopped and all rports have been deleted.
@@ -707,8 +709,8 @@ void fc_disc_stop_final(struct fc_lport *lport)
707} 709}
708 710
709/** 711/**
710 * fc_disc_init() - Initialize the discovery block 712 * fc_disc_init() - Initialize the discovery layer for a local port
711 * @lport: FC local port 713 * @lport: The local port that needs the discovery layer to be initialized
712 */ 714 */
713int fc_disc_init(struct fc_lport *lport) 715int fc_disc_init(struct fc_lport *lport)
714{ 716{
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 5cfa68732e9d..53748724f2c5 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,17 +28,22 @@
28#include <scsi/libfc.h> 28#include <scsi/libfc.h>
29#include <scsi/fc_encode.h> 29#include <scsi/fc_encode.h>
30 30
31/* 31/**
32 * fc_elsct_send - sends ELS/CT frame 32 * fc_elsct_send() - Send an ELS or CT frame
33 * @lport: The local port to send the frame on
34 * @did: The destination ID for the frame
35 * @fp: The frame to be sent
36 * @op: The operational code
37 * @resp: The callback routine when the response is received
38 * @arg: The argument to pass to the response callback routine
39 * @timer_msec: The timeout period for the frame (in msecs)
33 */ 40 */
34static struct fc_seq *fc_elsct_send(struct fc_lport *lport, 41struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
35 u32 did, 42 struct fc_frame *fp, unsigned int op,
36 struct fc_frame *fp, 43 void (*resp)(struct fc_seq *,
37 unsigned int op, 44 struct fc_frame *,
38 void (*resp)(struct fc_seq *, 45 void *),
39 struct fc_frame *fp, 46 void *arg, u32 timer_msec)
40 void *arg),
41 void *arg, u32 timer_msec)
42{ 47{
43 enum fc_rctl r_ctl; 48 enum fc_rctl r_ctl;
44 enum fc_fh_type fh_type; 49 enum fc_fh_type fh_type;
@@ -53,15 +58,22 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
53 did = FC_FID_DIR_SERV; 58 did = FC_FID_DIR_SERV;
54 } 59 }
55 60
56 if (rc) 61 if (rc) {
62 fc_frame_free(fp);
57 return NULL; 63 return NULL;
64 }
58 65
59 fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, 66 fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
60 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 67 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
61 68
62 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); 69 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
63} 70}
71EXPORT_SYMBOL(fc_elsct_send);
64 72
73/**
74 * fc_elsct_init() - Initialize the ELS/CT layer
75 * @lport: The local port to initialize the ELS/CT layer for
76 */
65int fc_elsct_init(struct fc_lport *lport) 77int fc_elsct_init(struct fc_lport *lport)
66{ 78{
67 if (!lport->tt.elsct_send) 79 if (!lport->tt.elsct_send)
@@ -72,12 +84,15 @@ int fc_elsct_init(struct fc_lport *lport)
72EXPORT_SYMBOL(fc_elsct_init); 84EXPORT_SYMBOL(fc_elsct_init);
73 85
74/** 86/**
75 * fc_els_resp_type() - return string describing ELS response for debug. 87 * fc_els_resp_type() - Return a string describing the ELS response
76 * @fp: frame pointer with possible error code. 88 * @fp: The frame pointer or possible error code
77 */ 89 */
78const char *fc_els_resp_type(struct fc_frame *fp) 90const char *fc_els_resp_type(struct fc_frame *fp)
79{ 91{
80 const char *msg; 92 const char *msg;
93 struct fc_frame_header *fh;
94 struct fc_ct_hdr *ct;
95
81 if (IS_ERR(fp)) { 96 if (IS_ERR(fp)) {
82 switch (-PTR_ERR(fp)) { 97 switch (-PTR_ERR(fp)) {
83 case FC_NO_ERR: 98 case FC_NO_ERR:
@@ -94,15 +109,41 @@ const char *fc_els_resp_type(struct fc_frame *fp)
94 break; 109 break;
95 } 110 }
96 } else { 111 } else {
97 switch (fc_frame_payload_op(fp)) { 112 fh = fc_frame_header_get(fp);
98 case ELS_LS_ACC: 113 switch (fh->fh_type) {
99 msg = "accept"; 114 case FC_TYPE_ELS:
115 switch (fc_frame_payload_op(fp)) {
116 case ELS_LS_ACC:
117 msg = "accept";
118 break;
119 case ELS_LS_RJT:
120 msg = "reject";
121 break;
122 default:
123 msg = "response unknown ELS";
124 break;
125 }
100 break; 126 break;
101 case ELS_LS_RJT: 127 case FC_TYPE_CT:
102 msg = "reject"; 128 ct = fc_frame_payload_get(fp, sizeof(*ct));
129 if (ct) {
130 switch (ntohs(ct->ct_cmd)) {
131 case FC_FS_ACC:
132 msg = "CT accept";
133 break;
134 case FC_FS_RJT:
135 msg = "CT reject";
136 break;
137 default:
138 msg = "response unknown CT";
139 break;
140 }
141 } else {
142 msg = "short CT response";
143 }
103 break; 144 break;
104 default: 145 default:
105 msg = "response unknown ELS"; 146 msg = "response not ELS or CT";
106 break; 147 break;
107 } 148 }
108 } 149 }
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index c1c15748220c..e5df0d4db67e 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <linux/gfp.h> 27#include <linux/slab.h>
28#include <linux/err.h> 28#include <linux/err.h>
29 29
30#include <scsi/fc/fc_fc2.h> 30#include <scsi/fc/fc_fc2.h>
@@ -32,10 +32,13 @@
32#include <scsi/libfc.h> 32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h> 33#include <scsi/fc_encode.h>
34 34
35#include "fc_libfc.h"
36
35u16 fc_cpu_mask; /* cpu mask for possible cpus */ 37u16 fc_cpu_mask; /* cpu mask for possible cpus */
36EXPORT_SYMBOL(fc_cpu_mask); 38EXPORT_SYMBOL(fc_cpu_mask);
37static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ 39static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
38static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ 40static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
41struct workqueue_struct *fc_exch_workqueue;
39 42
40/* 43/*
41 * Structure and function definitions for managing Fibre Channel Exchanges 44 * Structure and function definitions for managing Fibre Channel Exchanges
@@ -50,35 +53,46 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
50 * fc_seq holds the state for an individual sequence. 53 * fc_seq holds the state for an individual sequence.
51 */ 54 */
52 55
53/* 56/**
54 * Per cpu exchange pool 57 * struct fc_exch_pool - Per cpu exchange pool
58 * @next_index: Next possible free exchange index
59 * @total_exches: Total allocated exchanges
60 * @lock: Exch pool lock
61 * @ex_list: List of exchanges
55 * 62 *
56 * This structure manages per cpu exchanges in array of exchange pointers. 63 * This structure manages per cpu exchanges in array of exchange pointers.
57 * This array is allocated followed by struct fc_exch_pool memory for 64 * This array is allocated followed by struct fc_exch_pool memory for
58 * assigned range of exchanges to per cpu pool. 65 * assigned range of exchanges to per cpu pool.
59 */ 66 */
60struct fc_exch_pool { 67struct fc_exch_pool {
61 u16 next_index; /* next possible free exchange index */ 68 u16 next_index;
62 u16 total_exches; /* total allocated exchanges */ 69 u16 total_exches;
63 spinlock_t lock; /* exch pool lock */ 70 spinlock_t lock;
64 struct list_head ex_list; /* allocated exchanges list */ 71 struct list_head ex_list;
65}; 72};
66 73
67/* 74/**
68 * Exchange manager. 75 * struct fc_exch_mgr - The Exchange Manager (EM).
76 * @class: Default class for new sequences
77 * @kref: Reference counter
78 * @min_xid: Minimum exchange ID
79 * @max_xid: Maximum exchange ID
80 * @ep_pool: Reserved exchange pointers
81 * @pool_max_index: Max exch array index in exch pool
82 * @pool: Per cpu exch pool
83 * @stats: Statistics structure
69 * 84 *
70 * This structure is the center for creating exchanges and sequences. 85 * This structure is the center for creating exchanges and sequences.
71 * It manages the allocation of exchange IDs. 86 * It manages the allocation of exchange IDs.
72 */ 87 */
73struct fc_exch_mgr { 88struct fc_exch_mgr {
74 enum fc_class class; /* default class for sequences */ 89 enum fc_class class;
75 struct kref kref; /* exchange mgr reference count */ 90 struct kref kref;
76 u16 min_xid; /* min exchange ID */ 91 u16 min_xid;
77 u16 max_xid; /* max exchange ID */ 92 u16 max_xid;
78 struct list_head ex_list; /* allocated exchanges list */ 93 mempool_t *ep_pool;
79 mempool_t *ep_pool; /* reserve ep's */ 94 u16 pool_max_index;
80 u16 pool_max_index; /* max exch array index in exch pool */ 95 struct fc_exch_pool *pool;
81 struct fc_exch_pool *pool; /* per cpu exch pool */
82 96
83 /* 97 /*
84 * currently exchange mgr stats are updated but not used. 98 * currently exchange mgr stats are updated but not used.
@@ -96,6 +110,18 @@ struct fc_exch_mgr {
96}; 110};
97#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) 111#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
98 112
113/**
114 * struct fc_exch_mgr_anchor - primary structure for list of EMs
115 * @ema_list: Exchange Manager Anchor list
116 * @mp: Exchange Manager associated with this anchor
117 * @match: Routine to determine if this anchor's EM should be used
118 *
119 * When walking the list of anchors the match routine will be called
120 * for each anchor to determine if that EM should be used. The last
121 * anchor in the list will always match to handle any exchanges not
122 * handled by other EMs. The non-default EMs would be added to the
123 * anchor list by HW that provides FCoE offloads.
124 */
99struct fc_exch_mgr_anchor { 125struct fc_exch_mgr_anchor {
100 struct list_head ema_list; 126 struct list_head ema_list;
101 struct fc_exch_mgr *mp; 127 struct fc_exch_mgr *mp;
@@ -108,7 +134,6 @@ static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
108 enum fc_els_rjt_explan); 134 enum fc_els_rjt_explan);
109static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); 135static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
110static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); 136static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
111static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
112 137
113/* 138/*
114 * Internal implementation notes. 139 * Internal implementation notes.
@@ -196,6 +221,15 @@ static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
196 221
197#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0])) 222#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
198 223
224/**
225 * fc_exch_name_lookup() - Lookup name by opcode
226 * @op: Opcode to be looked up
227 * @table: Opcode/name table
228 * @max_index: Index not to be exceeded
229 *
230 * This routine is used to determine a human-readable string identifying
231 * a R_CTL opcode.
232 */
199static inline const char *fc_exch_name_lookup(unsigned int op, char **table, 233static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
200 unsigned int max_index) 234 unsigned int max_index)
201{ 235{
@@ -208,25 +242,34 @@ static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
208 return name; 242 return name;
209} 243}
210 244
245/**
246 * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
247 * @op: The opcode to be looked up
248 */
211static const char *fc_exch_rctl_name(unsigned int op) 249static const char *fc_exch_rctl_name(unsigned int op)
212{ 250{
213 return fc_exch_name_lookup(op, fc_exch_rctl_names, 251 return fc_exch_name_lookup(op, fc_exch_rctl_names,
214 FC_TABLE_SIZE(fc_exch_rctl_names)); 252 FC_TABLE_SIZE(fc_exch_rctl_names));
215} 253}
216 254
217/* 255/**
218 * Hold an exchange - keep it from being freed. 256 * fc_exch_hold() - Increment an exchange's reference count
257 * @ep: Echange to be held
219 */ 258 */
220static void fc_exch_hold(struct fc_exch *ep) 259static inline void fc_exch_hold(struct fc_exch *ep)
221{ 260{
222 atomic_inc(&ep->ex_refcnt); 261 atomic_inc(&ep->ex_refcnt);
223} 262}
224 263
225/* 264/**
226 * setup fc hdr by initializing few more FC header fields and sof/eof. 265 * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
227 * Initialized fields by this func: 266 * and determine SOF and EOF.
228 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt 267 * @ep: The exchange to that will use the header
229 * - sof and eof 268 * @fp: The frame whose header is to be modified
269 * @f_ctl: F_CTL bits that will be used for the frame header
270 *
271 * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
272 * fh_seq_id, fh_seq_cnt and the SOF and EOF.
230 */ 273 */
231static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, 274static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
232 u32 f_ctl) 275 u32 f_ctl)
@@ -243,7 +286,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
243 if (fc_sof_needs_ack(ep->class)) 286 if (fc_sof_needs_ack(ep->class))
244 fr_eof(fp) = FC_EOF_N; 287 fr_eof(fp) = FC_EOF_N;
245 /* 288 /*
246 * Form f_ctl. 289 * From F_CTL.
247 * The number of fill bytes to make the length a 4-byte 290 * The number of fill bytes to make the length a 4-byte
248 * multiple is the low order 2-bits of the f_ctl. 291 * multiple is the low order 2-bits of the f_ctl.
249 * The fill itself will have been cleared by the frame 292 * The fill itself will have been cleared by the frame
@@ -273,10 +316,12 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
273 fh->fh_seq_cnt = htons(ep->seq.cnt); 316 fh->fh_seq_cnt = htons(ep->seq.cnt);
274} 317}
275 318
276 319/**
277/* 320 * fc_exch_release() - Decrement an exchange's reference count
278 * Release a reference to an exchange. 321 * @ep: Exchange to be released
279 * If the refcnt goes to zero and the exchange is complete, it is freed. 322 *
323 * If the reference count reaches zero and the exchange is complete,
324 * it is freed.
280 */ 325 */
281static void fc_exch_release(struct fc_exch *ep) 326static void fc_exch_release(struct fc_exch *ep)
282{ 327{
@@ -291,6 +336,10 @@ static void fc_exch_release(struct fc_exch *ep)
291 } 336 }
292} 337}
293 338
339/**
340 * fc_exch_done_locked() - Complete an exchange with the exchange lock held
341 * @ep: The exchange that is complete
342 */
294static int fc_exch_done_locked(struct fc_exch *ep) 343static int fc_exch_done_locked(struct fc_exch *ep)
295{ 344{
296 int rc = 1; 345 int rc = 1;
@@ -315,6 +364,15 @@ static int fc_exch_done_locked(struct fc_exch *ep)
315 return rc; 364 return rc;
316} 365}
317 366
367/**
368 * fc_exch_ptr_get() - Return an exchange from an exchange pool
369 * @pool: Exchange Pool to get an exchange from
370 * @index: Index of the exchange within the pool
371 *
372 * Use the index to get an exchange from within an exchange pool. exches
373 * will point to an array of exchange pointers. The index will select
374 * the exchange within the array.
375 */
318static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, 376static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
319 u16 index) 377 u16 index)
320{ 378{
@@ -322,12 +380,22 @@ static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
322 return exches[index]; 380 return exches[index];
323} 381}
324 382
383/**
384 * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
385 * @pool: The pool to assign the exchange to
386 * @index: The index in the pool where the exchange will be assigned
387 * @ep: The exchange to assign to the pool
388 */
325static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, 389static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
326 struct fc_exch *ep) 390 struct fc_exch *ep)
327{ 391{
328 ((struct fc_exch **)(pool + 1))[index] = ep; 392 ((struct fc_exch **)(pool + 1))[index] = ep;
329} 393}
330 394
395/**
396 * fc_exch_delete() - Delete an exchange
397 * @ep: The exchange to be deleted
398 */
331static void fc_exch_delete(struct fc_exch *ep) 399static void fc_exch_delete(struct fc_exch *ep)
332{ 400{
333 struct fc_exch_pool *pool; 401 struct fc_exch_pool *pool;
@@ -343,8 +411,14 @@ static void fc_exch_delete(struct fc_exch *ep)
343 fc_exch_release(ep); /* drop hold for exch in mp */ 411 fc_exch_release(ep); /* drop hold for exch in mp */
344} 412}
345 413
346/* 414/**
347 * Internal version of fc_exch_timer_set - used with lock held. 415 * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
416 * the exchange lock held
417 * @ep: The exchange whose timer will start
418 * @timer_msec: The timeout period
419 *
420 * Used for upper level protocols to time out the exchange.
421 * The timer is cancelled when it fires or when the exchange completes.
348 */ 422 */
349static inline void fc_exch_timer_set_locked(struct fc_exch *ep, 423static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
350 unsigned int timer_msec) 424 unsigned int timer_msec)
@@ -354,17 +428,15 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
354 428
355 FC_EXCH_DBG(ep, "Exchange timer armed\n"); 429 FC_EXCH_DBG(ep, "Exchange timer armed\n");
356 430
357 if (schedule_delayed_work(&ep->timeout_work, 431 if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
358 msecs_to_jiffies(timer_msec))) 432 msecs_to_jiffies(timer_msec)))
359 fc_exch_hold(ep); /* hold for timer */ 433 fc_exch_hold(ep); /* hold for timer */
360} 434}
361 435
362/* 436/**
363 * Set timer for an exchange. 437 * fc_exch_timer_set() - Lock the exchange and set the timer
364 * The time is a minimum delay in milliseconds until the timer fires. 438 * @ep: The exchange whose timer will start
365 * Used for upper level protocols to time out the exchange. 439 * @timer_msec: The timeout period
366 * The timer is cancelled when it fires or when the exchange completes.
367 * Returns non-zero if a timer couldn't be allocated.
368 */ 440 */
369static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) 441static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
370{ 442{
@@ -373,7 +445,115 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
373 spin_unlock_bh(&ep->ex_lock); 445 spin_unlock_bh(&ep->ex_lock);
374} 446}
375 447
376int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) 448/**
449 * fc_seq_send() - Send a frame using existing sequence/exchange pair
450 * @lport: The local port that the exchange will be sent on
451 * @sp: The sequence to be sent
452 * @fp: The frame to be sent on the exchange
453 */
454static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
455 struct fc_frame *fp)
456{
457 struct fc_exch *ep;
458 struct fc_frame_header *fh = fc_frame_header_get(fp);
459 int error;
460 u32 f_ctl;
461
462 ep = fc_seq_exch(sp);
463 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
464
465 f_ctl = ntoh24(fh->fh_f_ctl);
466 fc_exch_setup_hdr(ep, fp, f_ctl);
467
468 /*
469 * update sequence count if this frame is carrying
470 * multiple FC frames when sequence offload is enabled
471 * by LLD.
472 */
473 if (fr_max_payload(fp))
474 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
475 fr_max_payload(fp));
476 else
477 sp->cnt++;
478
479 /*
480 * Send the frame.
481 */
482 error = lport->tt.frame_send(lport, fp);
483
484 /*
485 * Update the exchange and sequence flags,
486 * assuming all frames for the sequence have been sent.
487 * We can only be called to send once for each sequence.
488 */
489 spin_lock_bh(&ep->ex_lock);
490 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
491 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
492 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
493 spin_unlock_bh(&ep->ex_lock);
494 return error;
495}
496
497/**
498 * fc_seq_alloc() - Allocate a sequence for a given exchange
499 * @ep: The exchange to allocate a new sequence for
500 * @seq_id: The sequence ID to be used
501 *
502 * We don't support multiple originated sequences on the same exchange.
503 * By implication, any previously originated sequence on this exchange
504 * is complete, and we reallocate the same sequence.
505 */
506static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
507{
508 struct fc_seq *sp;
509
510 sp = &ep->seq;
511 sp->ssb_stat = 0;
512 sp->cnt = 0;
513 sp->id = seq_id;
514 return sp;
515}
516
517/**
518 * fc_seq_start_next_locked() - Allocate a new sequence on the same
519 * exchange as the supplied sequence
520 * @sp: The sequence/exchange to get a new sequence for
521 */
522static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
523{
524 struct fc_exch *ep = fc_seq_exch(sp);
525
526 sp = fc_seq_alloc(ep, ep->seq_id++);
527 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
528 ep->f_ctl, sp->id);
529 return sp;
530}
531
532/**
533 * fc_seq_start_next() - Lock the exchange and get a new sequence
534 * for a given sequence/exchange pair
535 * @sp: The sequence/exchange to get a new exchange for
536 */
537static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
538{
539 struct fc_exch *ep = fc_seq_exch(sp);
540
541 spin_lock_bh(&ep->ex_lock);
542 sp = fc_seq_start_next_locked(sp);
543 spin_unlock_bh(&ep->ex_lock);
544
545 return sp;
546}
547
548/**
549 * fc_seq_exch_abort() - Abort an exchange and sequence
550 * @req_sp: The sequence to be aborted
551 * @timer_msec: The period of time to wait before aborting
552 *
553 * Generally called because of a timeout or an abort from the upper layer.
554 */
555static int fc_seq_exch_abort(const struct fc_seq *req_sp,
556 unsigned int timer_msec)
377{ 557{
378 struct fc_seq *sp; 558 struct fc_seq *sp;
379 struct fc_exch *ep; 559 struct fc_exch *ep;
@@ -422,11 +602,10 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
422 error = -ENOBUFS; 602 error = -ENOBUFS;
423 return error; 603 return error;
424} 604}
425EXPORT_SYMBOL(fc_seq_exch_abort);
426 605
427/* 606/**
428 * Exchange timeout - handle exchange timer expiration. 607 * fc_exch_timeout() - Handle exchange timer expiration
429 * The timer will have been cancelled before this is called. 608 * @work: The work_struct identifying the exchange that timed out
430 */ 609 */
431static void fc_exch_timeout(struct work_struct *work) 610static void fc_exch_timeout(struct work_struct *work)
432{ 611{
@@ -474,28 +653,10 @@ done:
474 fc_exch_release(ep); 653 fc_exch_release(ep);
475} 654}
476 655
477/*
478 * Allocate a sequence.
479 *
480 * We don't support multiple originated sequences on the same exchange.
481 * By implication, any previously originated sequence on this exchange
482 * is complete, and we reallocate the same sequence.
483 */
484static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
485{
486 struct fc_seq *sp;
487
488 sp = &ep->seq;
489 sp->ssb_stat = 0;
490 sp->cnt = 0;
491 sp->id = seq_id;
492 return sp;
493}
494
495/** 656/**
496 * fc_exch_em_alloc() - allocate an exchange from a specified EM. 657 * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
497 * @lport: ptr to the local port 658 * @lport: The local port that the exchange is for
498 * @mp: ptr to the exchange manager 659 * @mp: The exchange manager that will allocate the exchange
499 * 660 *
500 * Returns pointer to allocated fc_exch with exch lock held. 661 * Returns pointer to allocated fc_exch with exch lock held.
501 */ 662 */
@@ -563,16 +724,18 @@ err:
563} 724}
564 725
565/** 726/**
566 * fc_exch_alloc() - allocate an exchange. 727 * fc_exch_alloc() - Allocate an exchange from an EM on a
567 * @lport: ptr to the local port 728 * local port's list of EMs.
568 * @fp: ptr to the FC frame 729 * @lport: The local port that will own the exchange
730 * @fp: The FC frame that the exchange will be for
569 * 731 *
570 * This function walks the list of the exchange manager(EM) 732 * This function walks the list of exchange manager(EM)
571 * anchors to select a EM for new exchange allocation. The 733 * anchors to select an EM for a new exchange allocation. The
572 * EM is selected having either a NULL match function pointer 734 * EM is selected when a NULL match function pointer is encountered
573 * or call to match function returning true. 735 * or when a call to a match function returns true.
574 */ 736 */
575struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) 737static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
738 struct fc_frame *fp)
576{ 739{
577 struct fc_exch_mgr_anchor *ema; 740 struct fc_exch_mgr_anchor *ema;
578 struct fc_exch *ep; 741 struct fc_exch *ep;
@@ -586,10 +749,11 @@ struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp)
586 } 749 }
587 return NULL; 750 return NULL;
588} 751}
589EXPORT_SYMBOL(fc_exch_alloc);
590 752
591/* 753/**
592 * Lookup and hold an exchange. 754 * fc_exch_find() - Lookup and hold an exchange
755 * @mp: The exchange manager to lookup the exchange from
756 * @xid: The XID of the exchange to look up
593 */ 757 */
594static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) 758static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
595{ 759{
@@ -609,7 +773,13 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
609 return ep; 773 return ep;
610} 774}
611 775
612void fc_exch_done(struct fc_seq *sp) 776
777/**
778 * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
779 * the memory allocated for the related objects may be freed.
780 * @sp: The sequence that has completed
781 */
782static void fc_exch_done(struct fc_seq *sp)
613{ 783{
614 struct fc_exch *ep = fc_seq_exch(sp); 784 struct fc_exch *ep = fc_seq_exch(sp);
615 int rc; 785 int rc;
@@ -620,10 +790,13 @@ void fc_exch_done(struct fc_seq *sp)
620 if (!rc) 790 if (!rc)
621 fc_exch_delete(ep); 791 fc_exch_delete(ep);
622} 792}
623EXPORT_SYMBOL(fc_exch_done);
624 793
625/* 794/**
626 * Allocate a new exchange as responder. 795 * fc_exch_resp() - Allocate a new exchange for a response frame
796 * @lport: The local port that the exchange was for
797 * @mp: The exchange manager to allocate the exchange from
798 * @fp: The response frame
799 *
627 * Sets the responder ID in the frame header. 800 * Sets the responder ID in the frame header.
628 */ 801 */
629static struct fc_exch *fc_exch_resp(struct fc_lport *lport, 802static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
@@ -664,8 +837,13 @@ static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
664 return ep; 837 return ep;
665} 838}
666 839
667/* 840/**
668 * Find a sequence for receive where the other end is originating the sequence. 841 * fc_seq_lookup_recip() - Find a sequence where the other end
842 * originated the sequence
843 * @lport: The local port that the frame was sent to
844 * @mp: The Exchange Manager to lookup the exchange from
845 * @fp: The frame associated with the sequence we're looking for
846 *
669 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold 847 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
670 * on the ep that should be released by the caller. 848 * on the ep that should be released by the caller.
671 */ 849 */
@@ -771,10 +949,12 @@ rel:
771 return reject; 949 return reject;
772} 950}
773 951
774/* 952/**
775 * Find the sequence for a frame being received. 953 * fc_seq_lookup_orig() - Find a sequence where this end
776 * We originated the sequence, so it should be found. 954 * originated the sequence
777 * We may or may not have originated the exchange. 955 * @mp: The Exchange Manager to lookup the exchange from
956 * @fp: The frame associated with the sequence we're looking for
957 *
778 * Does not hold the sequence for the caller. 958 * Does not hold the sequence for the caller.
779 */ 959 */
780static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, 960static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
@@ -806,8 +986,12 @@ static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
806 return sp; 986 return sp;
807} 987}
808 988
809/* 989/**
810 * Set addresses for an exchange. 990 * fc_exch_set_addr() - Set the source and destination IDs for an exchange
991 * @ep: The exchange to set the addresses for
992 * @orig_id: The originator's ID
993 * @resp_id: The responder's ID
994 *
811 * Note this must be done before the first sequence of the exchange is sent. 995 * Note this must be done before the first sequence of the exchange is sent.
812 */ 996 */
813static void fc_exch_set_addr(struct fc_exch *ep, 997static void fc_exch_set_addr(struct fc_exch *ep,
@@ -823,76 +1007,15 @@ static void fc_exch_set_addr(struct fc_exch *ep,
823 } 1007 }
824} 1008}
825 1009
826static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) 1010/**
827{ 1011 * fc_seq_els_rsp_send() - Send an ELS response using infomation from
828 struct fc_exch *ep = fc_seq_exch(sp); 1012 * the existing sequence/exchange.
829 1013 * @sp: The sequence/exchange to get information from
830 sp = fc_seq_alloc(ep, ep->seq_id++); 1014 * @els_cmd: The ELS command to be sent
831 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", 1015 * @els_data: The ELS data to be sent
832 ep->f_ctl, sp->id);
833 return sp;
834}
835/*
836 * Allocate a new sequence on the same exchange as the supplied sequence.
837 * This will never return NULL.
838 */ 1016 */
839struct fc_seq *fc_seq_start_next(struct fc_seq *sp) 1017static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
840{ 1018 struct fc_seq_els_data *els_data)
841 struct fc_exch *ep = fc_seq_exch(sp);
842
843 spin_lock_bh(&ep->ex_lock);
844 sp = fc_seq_start_next_locked(sp);
845 spin_unlock_bh(&ep->ex_lock);
846
847 return sp;
848}
849EXPORT_SYMBOL(fc_seq_start_next);
850
851int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
852{
853 struct fc_exch *ep;
854 struct fc_frame_header *fh = fc_frame_header_get(fp);
855 int error;
856 u32 f_ctl;
857
858 ep = fc_seq_exch(sp);
859 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
860
861 f_ctl = ntoh24(fh->fh_f_ctl);
862 fc_exch_setup_hdr(ep, fp, f_ctl);
863
864 /*
865 * update sequence count if this frame is carrying
866 * multiple FC frames when sequence offload is enabled
867 * by LLD.
868 */
869 if (fr_max_payload(fp))
870 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
871 fr_max_payload(fp));
872 else
873 sp->cnt++;
874
875 /*
876 * Send the frame.
877 */
878 error = lp->tt.frame_send(lp, fp);
879
880 /*
881 * Update the exchange and sequence flags,
882 * assuming all frames for the sequence have been sent.
883 * We can only be called to send once for each sequence.
884 */
885 spin_lock_bh(&ep->ex_lock);
886 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
887 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
888 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
889 spin_unlock_bh(&ep->ex_lock);
890 return error;
891}
892EXPORT_SYMBOL(fc_seq_send);
893
894void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
895 struct fc_seq_els_data *els_data)
896{ 1019{
897 switch (els_cmd) { 1020 switch (els_cmd) {
898 case ELS_LS_RJT: 1021 case ELS_LS_RJT:
@@ -911,10 +1034,13 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
911 FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); 1034 FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd);
912 } 1035 }
913} 1036}
914EXPORT_SYMBOL(fc_seq_els_rsp_send);
915 1037
916/* 1038/**
917 * Send a sequence, which is also the last sequence in the exchange. 1039 * fc_seq_send_last() - Send a sequence that is the last in the exchange
1040 * @sp: The sequence that is to be sent
1041 * @fp: The frame that will be sent on the sequence
1042 * @rctl: The R_CTL information to be sent
1043 * @fh_type: The frame header type
918 */ 1044 */
919static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, 1045static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
920 enum fc_rctl rctl, enum fc_fh_type fh_type) 1046 enum fc_rctl rctl, enum fc_fh_type fh_type)
@@ -928,9 +1054,12 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
928 fc_seq_send(ep->lp, sp, fp); 1054 fc_seq_send(ep->lp, sp, fp);
929} 1055}
930 1056
931/* 1057/**
1058 * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
1059 * @sp: The sequence to send the ACK on
1060 * @rx_fp: The received frame that is being acknoledged
1061 *
932 * Send ACK_1 (or equiv.) indicating we received something. 1062 * Send ACK_1 (or equiv.) indicating we received something.
933 * The frame we're acking is supplied.
934 */ 1063 */
935static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) 1064static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
936{ 1065{
@@ -938,14 +1067,14 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
938 struct fc_frame_header *rx_fh; 1067 struct fc_frame_header *rx_fh;
939 struct fc_frame_header *fh; 1068 struct fc_frame_header *fh;
940 struct fc_exch *ep = fc_seq_exch(sp); 1069 struct fc_exch *ep = fc_seq_exch(sp);
941 struct fc_lport *lp = ep->lp; 1070 struct fc_lport *lport = ep->lp;
942 unsigned int f_ctl; 1071 unsigned int f_ctl;
943 1072
944 /* 1073 /*
945 * Don't send ACKs for class 3. 1074 * Don't send ACKs for class 3.
946 */ 1075 */
947 if (fc_sof_needs_ack(fr_sof(rx_fp))) { 1076 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
948 fp = fc_frame_alloc(lp, 0); 1077 fp = fc_frame_alloc(lport, 0);
949 if (!fp) 1078 if (!fp)
950 return; 1079 return;
951 1080
@@ -980,12 +1109,16 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
980 else 1109 else
981 fr_eof(fp) = FC_EOF_N; 1110 fr_eof(fp) = FC_EOF_N;
982 1111
983 (void) lp->tt.frame_send(lp, fp); 1112 lport->tt.frame_send(lport, fp);
984 } 1113 }
985} 1114}
986 1115
987/* 1116/**
988 * Send BLS Reject. 1117 * fc_exch_send_ba_rjt() - Send BLS Reject
1118 * @rx_fp: The frame being rejected
1119 * @reason: The reason the frame is being rejected
1120 * @explan: The explaination for the rejection
1121 *
989 * This is for rejecting BA_ABTS only. 1122 * This is for rejecting BA_ABTS only.
990 */ 1123 */
991static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, 1124static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
@@ -996,11 +1129,11 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
996 struct fc_frame_header *rx_fh; 1129 struct fc_frame_header *rx_fh;
997 struct fc_frame_header *fh; 1130 struct fc_frame_header *fh;
998 struct fc_ba_rjt *rp; 1131 struct fc_ba_rjt *rp;
999 struct fc_lport *lp; 1132 struct fc_lport *lport;
1000 unsigned int f_ctl; 1133 unsigned int f_ctl;
1001 1134
1002 lp = fr_dev(rx_fp); 1135 lport = fr_dev(rx_fp);
1003 fp = fc_frame_alloc(lp, sizeof(*rp)); 1136 fp = fc_frame_alloc(lport, sizeof(*rp));
1004 if (!fp) 1137 if (!fp)
1005 return; 1138 return;
1006 fh = fc_frame_header_get(fp); 1139 fh = fc_frame_header_get(fp);
@@ -1045,13 +1178,17 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1045 if (fc_sof_needs_ack(fr_sof(fp))) 1178 if (fc_sof_needs_ack(fr_sof(fp)))
1046 fr_eof(fp) = FC_EOF_N; 1179 fr_eof(fp) = FC_EOF_N;
1047 1180
1048 (void) lp->tt.frame_send(lp, fp); 1181 lport->tt.frame_send(lport, fp);
1049} 1182}
1050 1183
1051/* 1184/**
1052 * Handle an incoming ABTS. This would be for target mode usually, 1185 * fc_exch_recv_abts() - Handle an incoming ABTS
1053 * but could be due to lost FCP transfer ready, confirm or RRQ. 1186 * @ep: The exchange the abort was on
1054 * We always handle this as an exchange abort, ignoring the parameter. 1187 * @rx_fp: The ABTS frame
1188 *
1189 * This would be for target mode usually, but could be due to lost
1190 * FCP transfer ready, confirm or RRQ. We always handle this as an
1191 * exchange abort, ignoring the parameter.
1055 */ 1192 */
1056static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) 1193static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1057{ 1194{
@@ -1100,10 +1237,14 @@ free:
1100 fc_frame_free(rx_fp); 1237 fc_frame_free(rx_fp);
1101} 1238}
1102 1239
1103/* 1240/**
1104 * Handle receive where the other end is originating the sequence. 1241 * fc_exch_recv_req() - Handler for an incoming request where is other
1242 * end is originating the sequence
1243 * @lport: The local port that received the request
1244 * @mp: The EM that the exchange is on
1245 * @fp: The request frame
1105 */ 1246 */
1106static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, 1247static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1107 struct fc_frame *fp) 1248 struct fc_frame *fp)
1108{ 1249{
1109 struct fc_frame_header *fh = fc_frame_header_get(fp); 1250 struct fc_frame_header *fh = fc_frame_header_get(fp);
@@ -1114,8 +1255,17 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1114 u32 f_ctl; 1255 u32 f_ctl;
1115 enum fc_pf_rjt_reason reject; 1256 enum fc_pf_rjt_reason reject;
1116 1257
1258 /* We can have the wrong fc_lport at this point with NPIV, which is a
1259 * problem now that we know a new exchange needs to be allocated
1260 */
1261 lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1262 if (!lport) {
1263 fc_frame_free(fp);
1264 return;
1265 }
1266
1117 fr_seq(fp) = NULL; 1267 fr_seq(fp) = NULL;
1118 reject = fc_seq_lookup_recip(lp, mp, fp); 1268 reject = fc_seq_lookup_recip(lport, mp, fp);
1119 if (reject == FC_RJT_NONE) { 1269 if (reject == FC_RJT_NONE) {
1120 sp = fr_seq(fp); /* sequence will be held */ 1270 sp = fr_seq(fp); /* sequence will be held */
1121 ep = fc_seq_exch(sp); 1271 ep = fc_seq_exch(sp);
@@ -1138,17 +1288,21 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1138 if (ep->resp) 1288 if (ep->resp)
1139 ep->resp(sp, fp, ep->arg); 1289 ep->resp(sp, fp, ep->arg);
1140 else 1290 else
1141 lp->tt.lport_recv(lp, sp, fp); 1291 lport->tt.lport_recv(lport, sp, fp);
1142 fc_exch_release(ep); /* release from lookup */ 1292 fc_exch_release(ep); /* release from lookup */
1143 } else { 1293 } else {
1144 FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject); 1294 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1295 reject);
1145 fc_frame_free(fp); 1296 fc_frame_free(fp);
1146 } 1297 }
1147} 1298}
1148 1299
1149/* 1300/**
1150 * Handle receive where the other end is originating the sequence in 1301 * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
1151 * response to our exchange. 1302 * end is the originator of the sequence that is a
1303 * response to our initial exchange
1304 * @mp: The EM that the exchange is on
1305 * @fp: The response frame
1152 */ 1306 */
1153static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) 1307static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1154{ 1308{
@@ -1239,8 +1393,11 @@ out:
1239 fc_frame_free(fp); 1393 fc_frame_free(fp);
1240} 1394}
1241 1395
1242/* 1396/**
1243 * Handle receive for a sequence where other end is responding to our sequence. 1397 * fc_exch_recv_resp() - Handler for a sequence where other end is
1398 * responding to our sequence
1399 * @mp: The EM that the exchange is on
1400 * @fp: The response frame
1244 */ 1401 */
1245static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) 1402static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1246{ 1403{
@@ -1256,9 +1413,13 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1256 fc_frame_free(fp); 1413 fc_frame_free(fp);
1257} 1414}
1258 1415
1259/* 1416/**
1260 * Handle the response to an ABTS for exchange or sequence. 1417 * fc_exch_abts_resp() - Handler for a response to an ABT
1261 * This can be BA_ACC or BA_RJT. 1418 * @ep: The exchange that the frame is on
1419 * @fp: The response frame
1420 *
1421 * This response would be to an ABTS cancelling an exchange or sequence.
1422 * The response can be either BA_ACC or BA_RJT
1262 */ 1423 */
1263static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) 1424static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1264{ 1425{
@@ -1333,9 +1494,12 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1333 1494
1334} 1495}
1335 1496
1336/* 1497/**
1337 * Receive BLS sequence. 1498 * fc_exch_recv_bls() - Handler for a BLS sequence
1338 * This is always a sequence initiated by the remote side. 1499 * @mp: The EM that the exchange is on
1500 * @fp: The request frame
1501 *
1502 * The BLS frame is always a sequence initiated by the remote side.
1339 * We may be either the originator or recipient of the exchange. 1503 * We may be either the originator or recipient of the exchange.
1340 */ 1504 */
1341static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) 1505static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
@@ -1392,8 +1556,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1392 fc_exch_release(ep); /* release hold taken by fc_exch_find */ 1556 fc_exch_release(ep); /* release hold taken by fc_exch_find */
1393} 1557}
1394 1558
1395/* 1559/**
1396 * Accept sequence with LS_ACC. 1560 * fc_seq_ls_acc() - Accept sequence with LS_ACC
1561 * @req_sp: The request sequence
1562 *
1397 * If this fails due to allocation or transmit congestion, assume the 1563 * If this fails due to allocation or transmit congestion, assume the
1398 * originator will repeat the sequence. 1564 * originator will repeat the sequence.
1399 */ 1565 */
@@ -1413,8 +1579,12 @@ static void fc_seq_ls_acc(struct fc_seq *req_sp)
1413 } 1579 }
1414} 1580}
1415 1581
1416/* 1582/**
1417 * Reject sequence with ELS LS_RJT. 1583 * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
1584 * @req_sp: The request sequence
1585 * @reason: The reason the sequence is being rejected
1586 * @explan: The explaination for the rejection
1587 *
1418 * If this fails due to allocation or transmit congestion, assume the 1588 * If this fails due to allocation or transmit congestion, assume the
1419 * originator will repeat the sequence. 1589 * originator will repeat the sequence.
1420 */ 1590 */
@@ -1437,6 +1607,10 @@ static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
1437 } 1607 }
1438} 1608}
1439 1609
1610/**
1611 * fc_exch_reset() - Reset an exchange
1612 * @ep: The exchange to be reset
1613 */
1440static void fc_exch_reset(struct fc_exch *ep) 1614static void fc_exch_reset(struct fc_exch *ep)
1441{ 1615{
1442 struct fc_seq *sp; 1616 struct fc_seq *sp;
@@ -1446,12 +1620,6 @@ static void fc_exch_reset(struct fc_exch *ep)
1446 1620
1447 spin_lock_bh(&ep->ex_lock); 1621 spin_lock_bh(&ep->ex_lock);
1448 ep->state |= FC_EX_RST_CLEANUP; 1622 ep->state |= FC_EX_RST_CLEANUP;
1449 /*
1450 * we really want to call del_timer_sync, but cannot due
1451 * to the lport calling with the lport lock held (some resp
1452 * functions can also grab the lport lock which could cause
1453 * a deadlock).
1454 */
1455 if (cancel_delayed_work(&ep->timeout_work)) 1623 if (cancel_delayed_work(&ep->timeout_work))
1456 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ 1624 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1457 resp = ep->resp; 1625 resp = ep->resp;
@@ -1471,16 +1639,16 @@ static void fc_exch_reset(struct fc_exch *ep)
1471} 1639}
1472 1640
1473/** 1641/**
1474 * fc_exch_pool_reset() - Resets an per cpu exches pool. 1642 * fc_exch_pool_reset() - Reset a per cpu exchange pool
1475 * @lport: ptr to the local port 1643 * @lport: The local port that the exchange pool is on
1476 * @pool: ptr to the per cpu exches pool 1644 * @pool: The exchange pool to be reset
1477 * @sid: source FC ID 1645 * @sid: The source ID
1478 * @did: destination FC ID 1646 * @did: The destination ID
1479 * 1647 *
1480 * Resets an per cpu exches pool, releasing its all sequences 1648 * Resets a per cpu exches pool, releasing all of its sequences
1481 * and exchanges. If sid is non-zero, then reset only exchanges 1649 * and exchanges. If sid is non-zero then reset only exchanges
1482 * we sourced from that FID. If did is non-zero, reset only 1650 * we sourced from the local port's FID. If did is non-zero then
1483 * exchanges destined to that FID. 1651 * only reset exchanges destined for the local port's FID.
1484 */ 1652 */
1485static void fc_exch_pool_reset(struct fc_lport *lport, 1653static void fc_exch_pool_reset(struct fc_lport *lport,
1486 struct fc_exch_pool *pool, 1654 struct fc_exch_pool *pool,
@@ -1514,15 +1682,15 @@ restart:
1514} 1682}
1515 1683
1516/** 1684/**
1517 * fc_exch_mgr_reset() - Resets all EMs of a lport 1685 * fc_exch_mgr_reset() - Reset all EMs of a local port
1518 * @lport: ptr to the local port 1686 * @lport: The local port whose EMs are to be reset
1519 * @sid: source FC ID 1687 * @sid: The source ID
1520 * @did: destination FC ID 1688 * @did: The destination ID
1521 * 1689 *
1522 * Reset all EMs of a lport, releasing its all sequences and 1690 * Reset all EMs associated with a given local port. Release all
1523 * exchanges. If sid is non-zero, then reset only exchanges 1691 * sequences and exchanges. If sid is non-zero then reset only the
1524 * we sourced from that FID. If did is non-zero, reset only 1692 * exchanges sent from the local port's FID. If did is non-zero then
1525 * exchanges destined to that FID. 1693 * reset only exchanges destined for the local port's FID.
1526 */ 1694 */
1527void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) 1695void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1528{ 1696{
@@ -1538,8 +1706,11 @@ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1538} 1706}
1539EXPORT_SYMBOL(fc_exch_mgr_reset); 1707EXPORT_SYMBOL(fc_exch_mgr_reset);
1540 1708
1541/* 1709/**
1542 * Handle incoming ELS REC - Read Exchange Concise. 1710 * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
1711 * @sp: The sequence the REC is on
1712 * @rfp: The REC frame
1713 *
1543 * Note that the requesting port may be different than the S_ID in the request. 1714 * Note that the requesting port may be different than the S_ID in the request.
1544 */ 1715 */
1545static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) 1716static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
@@ -1621,10 +1792,11 @@ reject:
1621 fc_frame_free(rfp); 1792 fc_frame_free(rfp);
1622} 1793}
1623 1794
1624/* 1795/**
1625 * Handle response from RRQ. 1796 * fc_exch_rrq_resp() - Handler for RRQ responses
1626 * Not much to do here, really. 1797 * @sp: The sequence that the RRQ is on
1627 * Should report errors. 1798 * @fp: The RRQ frame
1799 * @arg: The exchange that the RRQ is on
1628 * 1800 *
1629 * TODO: fix error handler. 1801 * TODO: fix error handler.
1630 */ 1802 */
@@ -1664,21 +1836,99 @@ cleanup:
1664 fc_exch_release(aborted_ep); 1836 fc_exch_release(aborted_ep);
1665} 1837}
1666 1838
1667/* 1839
1668 * Send ELS RRQ - Reinstate Recovery Qualifier. 1840/**
1841 * fc_exch_seq_send() - Send a frame using a new exchange and sequence
1842 * @lport: The local port to send the frame on
1843 * @fp: The frame to be sent
1844 * @resp: The response handler for this request
1845 * @destructor: The destructor for the exchange
1846 * @arg: The argument to be passed to the response handler
1847 * @timer_msec: The timeout period for the exchange
1848 *
1849 * The frame pointer with some of the header's fields must be
1850 * filled before calling this routine, those fields are:
1851 *
1852 * - routing control
1853 * - FC port did
1854 * - FC port sid
1855 * - FC header type
1856 * - frame control
1857 * - parameter or relative offset
1858 */
1859static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1860 struct fc_frame *fp,
1861 void (*resp)(struct fc_seq *,
1862 struct fc_frame *fp,
1863 void *arg),
1864 void (*destructor)(struct fc_seq *,
1865 void *),
1866 void *arg, u32 timer_msec)
1867{
1868 struct fc_exch *ep;
1869 struct fc_seq *sp = NULL;
1870 struct fc_frame_header *fh;
1871 int rc = 1;
1872
1873 ep = fc_exch_alloc(lport, fp);
1874 if (!ep) {
1875 fc_frame_free(fp);
1876 return NULL;
1877 }
1878 ep->esb_stat |= ESB_ST_SEQ_INIT;
1879 fh = fc_frame_header_get(fp);
1880 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1881 ep->resp = resp;
1882 ep->destructor = destructor;
1883 ep->arg = arg;
1884 ep->r_a_tov = FC_DEF_R_A_TOV;
1885 ep->lp = lport;
1886 sp = &ep->seq;
1887
1888 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
1889 ep->f_ctl = ntoh24(fh->fh_f_ctl);
1890 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1891 sp->cnt++;
1892
1893 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
1894 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
1895
1896 if (unlikely(lport->tt.frame_send(lport, fp)))
1897 goto err;
1898
1899 if (timer_msec)
1900 fc_exch_timer_set_locked(ep, timer_msec);
1901 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1902
1903 if (ep->f_ctl & FC_FC_SEQ_INIT)
1904 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1905 spin_unlock_bh(&ep->ex_lock);
1906 return sp;
1907err:
1908 rc = fc_exch_done_locked(ep);
1909 spin_unlock_bh(&ep->ex_lock);
1910 if (!rc)
1911 fc_exch_delete(ep);
1912 return NULL;
1913}
1914
1915/**
1916 * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
1917 * @ep: The exchange to send the RRQ on
1918 *
1669 * This tells the remote port to stop blocking the use of 1919 * This tells the remote port to stop blocking the use of
1670 * the exchange and the seq_cnt range. 1920 * the exchange and the seq_cnt range.
1671 */ 1921 */
1672static void fc_exch_rrq(struct fc_exch *ep) 1922static void fc_exch_rrq(struct fc_exch *ep)
1673{ 1923{
1674 struct fc_lport *lp; 1924 struct fc_lport *lport;
1675 struct fc_els_rrq *rrq; 1925 struct fc_els_rrq *rrq;
1676 struct fc_frame *fp; 1926 struct fc_frame *fp;
1677 u32 did; 1927 u32 did;
1678 1928
1679 lp = ep->lp; 1929 lport = ep->lp;
1680 1930
1681 fp = fc_frame_alloc(lp, sizeof(*rrq)); 1931 fp = fc_frame_alloc(lport, sizeof(*rrq));
1682 if (!fp) 1932 if (!fp)
1683 goto retry; 1933 goto retry;
1684 1934
@@ -1694,10 +1944,11 @@ static void fc_exch_rrq(struct fc_exch *ep)
1694 did = ep->sid; 1944 did = ep->sid;
1695 1945
1696 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, 1946 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
1697 fc_host_port_id(lp->host), FC_TYPE_ELS, 1947 fc_host_port_id(lport->host), FC_TYPE_ELS,
1698 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1948 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1699 1949
1700 if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov)) 1950 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
1951 lport->e_d_tov))
1701 return; 1952 return;
1702 1953
1703retry: 1954retry:
@@ -1714,12 +1965,14 @@ retry:
1714} 1965}
1715 1966
1716 1967
1717/* 1968/**
1718 * Handle incoming ELS RRQ - Reset Recovery Qualifier. 1969 * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
1970 * @sp: The sequence that the RRQ is on
1971 * @fp: The RRQ frame
1719 */ 1972 */
1720static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) 1973static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1721{ 1974{
1722 struct fc_exch *ep; /* request or subject exchange */ 1975 struct fc_exch *ep = NULL; /* request or subject exchange */
1723 struct fc_els_rrq *rp; 1976 struct fc_els_rrq *rp;
1724 u32 sid; 1977 u32 sid;
1725 u16 xid; 1978 u16 xid;
@@ -1769,17 +2022,24 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1769 * Send LS_ACC. 2022 * Send LS_ACC.
1770 */ 2023 */
1771 fc_seq_ls_acc(sp); 2024 fc_seq_ls_acc(sp);
1772 fc_frame_free(fp); 2025 goto out;
1773 return;
1774 2026
1775unlock_reject: 2027unlock_reject:
1776 spin_unlock_bh(&ep->ex_lock); 2028 spin_unlock_bh(&ep->ex_lock);
1777 fc_exch_release(ep); /* drop hold from fc_exch_find */
1778reject: 2029reject:
1779 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan); 2030 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
2031out:
1780 fc_frame_free(fp); 2032 fc_frame_free(fp);
2033 if (ep)
2034 fc_exch_release(ep); /* drop hold from fc_exch_find */
1781} 2035}
1782 2036
2037/**
2038 * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
2039 * @lport: The local port to add the exchange manager to
2040 * @mp: The exchange manager to be added to the local port
2041 * @match: The match routine that indicates when this EM should be used
2042 */
1783struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, 2043struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
1784 struct fc_exch_mgr *mp, 2044 struct fc_exch_mgr *mp,
1785 bool (*match)(struct fc_frame *)) 2045 bool (*match)(struct fc_frame *))
@@ -1799,6 +2059,10 @@ struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
1799} 2059}
1800EXPORT_SYMBOL(fc_exch_mgr_add); 2060EXPORT_SYMBOL(fc_exch_mgr_add);
1801 2061
2062/**
2063 * fc_exch_mgr_destroy() - Destroy an exchange manager
2064 * @kref: The reference to the EM to be destroyed
2065 */
1802static void fc_exch_mgr_destroy(struct kref *kref) 2066static void fc_exch_mgr_destroy(struct kref *kref)
1803{ 2067{
1804 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); 2068 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
@@ -1808,6 +2072,10 @@ static void fc_exch_mgr_destroy(struct kref *kref)
1808 kfree(mp); 2072 kfree(mp);
1809} 2073}
1810 2074
2075/**
2076 * fc_exch_mgr_del() - Delete an EM from a local port's list
2077 * @ema: The exchange manager anchor identifying the EM to be deleted
2078 */
1811void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) 2079void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
1812{ 2080{
1813 /* remove EM anchor from EM anchors list */ 2081 /* remove EM anchor from EM anchors list */
@@ -1817,7 +2085,35 @@ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
1817} 2085}
1818EXPORT_SYMBOL(fc_exch_mgr_del); 2086EXPORT_SYMBOL(fc_exch_mgr_del);
1819 2087
1820struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, 2088/**
2089 * fc_exch_mgr_list_clone() - Share all exchange manager objects
2090 * @src: Source lport to clone exchange managers from
2091 * @dst: New lport that takes references to all the exchange managers
2092 */
2093int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2094{
2095 struct fc_exch_mgr_anchor *ema, *tmp;
2096
2097 list_for_each_entry(ema, &src->ema_list, ema_list) {
2098 if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2099 goto err;
2100 }
2101 return 0;
2102err:
2103 list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2104 fc_exch_mgr_del(ema);
2105 return -ENOMEM;
2106}
2107
2108/**
2109 * fc_exch_mgr_alloc() - Allocate an exchange manager
2110 * @lport: The local port that the new EM will be associated with
2111 * @class: The default FC class for new exchanges
2112 * @min_xid: The minimum XID for exchanges from the new EM
2113 * @max_xid: The maximum XID for exchanges from the new EM
2114 * @match: The match routine for the new EM
2115 */
2116struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
1821 enum fc_class class, 2117 enum fc_class class,
1822 u16 min_xid, u16 max_xid, 2118 u16 min_xid, u16 max_xid,
1823 bool (*match)(struct fc_frame *)) 2119 bool (*match)(struct fc_frame *))
@@ -1830,7 +2126,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1830 2126
1831 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || 2127 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
1832 (min_xid & fc_cpu_mask) != 0) { 2128 (min_xid & fc_cpu_mask) != 0) {
1833 FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", 2129 FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1834 min_xid, max_xid); 2130 min_xid, max_xid);
1835 return NULL; 2131 return NULL;
1836 } 2132 }
@@ -1873,7 +2169,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1873 } 2169 }
1874 2170
1875 kref_init(&mp->kref); 2171 kref_init(&mp->kref);
1876 if (!fc_exch_mgr_add(lp, mp, match)) { 2172 if (!fc_exch_mgr_add(lport, mp, match)) {
1877 free_percpu(mp->pool); 2173 free_percpu(mp->pool);
1878 goto free_mempool; 2174 goto free_mempool;
1879 } 2175 }
@@ -1894,76 +2190,26 @@ free_mp:
1894} 2190}
1895EXPORT_SYMBOL(fc_exch_mgr_alloc); 2191EXPORT_SYMBOL(fc_exch_mgr_alloc);
1896 2192
2193/**
2194 * fc_exch_mgr_free() - Free all exchange managers on a local port
2195 * @lport: The local port whose EMs are to be freed
2196 */
1897void fc_exch_mgr_free(struct fc_lport *lport) 2197void fc_exch_mgr_free(struct fc_lport *lport)
1898{ 2198{
1899 struct fc_exch_mgr_anchor *ema, *next; 2199 struct fc_exch_mgr_anchor *ema, *next;
1900 2200
2201 flush_workqueue(fc_exch_workqueue);
1901 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) 2202 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
1902 fc_exch_mgr_del(ema); 2203 fc_exch_mgr_del(ema);
1903} 2204}
1904EXPORT_SYMBOL(fc_exch_mgr_free); 2205EXPORT_SYMBOL(fc_exch_mgr_free);
1905 2206
1906 2207/**
1907struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, 2208 * fc_exch_recv() - Handler for received frames
1908 struct fc_frame *fp, 2209 * @lport: The local port the frame was received on
1909 void (*resp)(struct fc_seq *, 2210 * @fp: The received frame
1910 struct fc_frame *fp,
1911 void *arg),
1912 void (*destructor)(struct fc_seq *, void *),
1913 void *arg, u32 timer_msec)
1914{
1915 struct fc_exch *ep;
1916 struct fc_seq *sp = NULL;
1917 struct fc_frame_header *fh;
1918 int rc = 1;
1919
1920 ep = fc_exch_alloc(lp, fp);
1921 if (!ep) {
1922 fc_frame_free(fp);
1923 return NULL;
1924 }
1925 ep->esb_stat |= ESB_ST_SEQ_INIT;
1926 fh = fc_frame_header_get(fp);
1927 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1928 ep->resp = resp;
1929 ep->destructor = destructor;
1930 ep->arg = arg;
1931 ep->r_a_tov = FC_DEF_R_A_TOV;
1932 ep->lp = lp;
1933 sp = &ep->seq;
1934
1935 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
1936 ep->f_ctl = ntoh24(fh->fh_f_ctl);
1937 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1938 sp->cnt++;
1939
1940 if (ep->xid <= lp->lro_xid)
1941 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
1942
1943 if (unlikely(lp->tt.frame_send(lp, fp)))
1944 goto err;
1945
1946 if (timer_msec)
1947 fc_exch_timer_set_locked(ep, timer_msec);
1948 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1949
1950 if (ep->f_ctl & FC_FC_SEQ_INIT)
1951 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1952 spin_unlock_bh(&ep->ex_lock);
1953 return sp;
1954err:
1955 rc = fc_exch_done_locked(ep);
1956 spin_unlock_bh(&ep->ex_lock);
1957 if (!rc)
1958 fc_exch_delete(ep);
1959 return NULL;
1960}
1961EXPORT_SYMBOL(fc_exch_seq_send);
1962
1963/*
1964 * Receive a frame
1965 */ 2211 */
1966void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) 2212void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
1967{ 2213{
1968 struct fc_frame_header *fh = fc_frame_header_get(fp); 2214 struct fc_frame_header *fh = fc_frame_header_get(fp);
1969 struct fc_exch_mgr_anchor *ema; 2215 struct fc_exch_mgr_anchor *ema;
@@ -1971,8 +2217,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
1971 u16 oxid; 2217 u16 oxid;
1972 2218
1973 /* lport lock ? */ 2219 /* lport lock ? */
1974 if (!lp || lp->state == LPORT_ST_DISABLED) { 2220 if (!lport || lport->state == LPORT_ST_DISABLED) {
1975 FC_LPORT_DBG(lp, "Receiving frames for an lport that " 2221 FC_LPORT_DBG(lport, "Receiving frames for an lport that "
1976 "has not been initialized correctly\n"); 2222 "has not been initialized correctly\n");
1977 fc_frame_free(fp); 2223 fc_frame_free(fp);
1978 return; 2224 return;
@@ -1981,7 +2227,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
1981 f_ctl = ntoh24(fh->fh_f_ctl); 2227 f_ctl = ntoh24(fh->fh_f_ctl);
1982 oxid = ntohs(fh->fh_ox_id); 2228 oxid = ntohs(fh->fh_ox_id);
1983 if (f_ctl & FC_FC_EX_CTX) { 2229 if (f_ctl & FC_FC_EX_CTX) {
1984 list_for_each_entry(ema, &lp->ema_list, ema_list) { 2230 list_for_each_entry(ema, &lport->ema_list, ema_list) {
1985 if ((oxid >= ema->mp->min_xid) && 2231 if ((oxid >= ema->mp->min_xid) &&
1986 (oxid <= ema->mp->max_xid)) { 2232 (oxid <= ema->mp->max_xid)) {
1987 found = 1; 2233 found = 1;
@@ -1990,13 +2236,13 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
1990 } 2236 }
1991 2237
1992 if (!found) { 2238 if (!found) {
1993 FC_LPORT_DBG(lp, "Received response for out " 2239 FC_LPORT_DBG(lport, "Received response for out "
1994 "of range oxid:%hx\n", oxid); 2240 "of range oxid:%hx\n", oxid);
1995 fc_frame_free(fp); 2241 fc_frame_free(fp);
1996 return; 2242 return;
1997 } 2243 }
1998 } else 2244 } else
1999 ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list); 2245 ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list);
2000 2246
2001 /* 2247 /*
2002 * If frame is marked invalid, just drop it. 2248 * If frame is marked invalid, just drop it.
@@ -2015,37 +2261,56 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
2015 else if (f_ctl & FC_FC_SEQ_CTX) 2261 else if (f_ctl & FC_FC_SEQ_CTX)
2016 fc_exch_recv_resp(ema->mp, fp); 2262 fc_exch_recv_resp(ema->mp, fp);
2017 else 2263 else
2018 fc_exch_recv_req(lp, ema->mp, fp); 2264 fc_exch_recv_req(lport, ema->mp, fp);
2019 break; 2265 break;
2020 default: 2266 default:
2021 FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); 2267 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2268 fr_eof(fp));
2022 fc_frame_free(fp); 2269 fc_frame_free(fp);
2023 } 2270 }
2024} 2271}
2025EXPORT_SYMBOL(fc_exch_recv); 2272EXPORT_SYMBOL(fc_exch_recv);
2026 2273
2027int fc_exch_init(struct fc_lport *lp) 2274/**
2275 * fc_exch_init() - Initialize the exchange layer for a local port
2276 * @lport: The local port to initialize the exchange layer for
2277 */
2278int fc_exch_init(struct fc_lport *lport)
2028{ 2279{
2029 if (!lp->tt.seq_start_next) 2280 if (!lport->tt.seq_start_next)
2030 lp->tt.seq_start_next = fc_seq_start_next; 2281 lport->tt.seq_start_next = fc_seq_start_next;
2031 2282
2032 if (!lp->tt.exch_seq_send) 2283 if (!lport->tt.exch_seq_send)
2033 lp->tt.exch_seq_send = fc_exch_seq_send; 2284 lport->tt.exch_seq_send = fc_exch_seq_send;
2034 2285
2035 if (!lp->tt.seq_send) 2286 if (!lport->tt.seq_send)
2036 lp->tt.seq_send = fc_seq_send; 2287 lport->tt.seq_send = fc_seq_send;
2037 2288
2038 if (!lp->tt.seq_els_rsp_send) 2289 if (!lport->tt.seq_els_rsp_send)
2039 lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send; 2290 lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
2040 2291
2041 if (!lp->tt.exch_done) 2292 if (!lport->tt.exch_done)
2042 lp->tt.exch_done = fc_exch_done; 2293 lport->tt.exch_done = fc_exch_done;
2043 2294
2044 if (!lp->tt.exch_mgr_reset) 2295 if (!lport->tt.exch_mgr_reset)
2045 lp->tt.exch_mgr_reset = fc_exch_mgr_reset; 2296 lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2046 2297
2047 if (!lp->tt.seq_exch_abort) 2298 if (!lport->tt.seq_exch_abort)
2048 lp->tt.seq_exch_abort = fc_seq_exch_abort; 2299 lport->tt.seq_exch_abort = fc_seq_exch_abort;
2300
2301 return 0;
2302}
2303EXPORT_SYMBOL(fc_exch_init);
2304
2305/**
2306 * fc_setup_exch_mgr() - Setup an exchange manager
2307 */
2308int fc_setup_exch_mgr()
2309{
2310 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2311 0, SLAB_HWCACHE_ALIGN, NULL);
2312 if (!fc_em_cachep)
2313 return -ENOMEM;
2049 2314
2050 /* 2315 /*
2051 * Initialize fc_cpu_mask and fc_cpu_order. The 2316 * Initialize fc_cpu_mask and fc_cpu_order. The
@@ -2069,20 +2334,17 @@ int fc_exch_init(struct fc_lport *lp)
2069 } 2334 }
2070 fc_cpu_mask--; 2335 fc_cpu_mask--;
2071 2336
2072 return 0; 2337 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2073} 2338 if (!fc_exch_workqueue)
2074EXPORT_SYMBOL(fc_exch_init);
2075
2076int fc_setup_exch_mgr(void)
2077{
2078 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2079 0, SLAB_HWCACHE_ALIGN, NULL);
2080 if (!fc_em_cachep)
2081 return -ENOMEM; 2339 return -ENOMEM;
2082 return 0; 2340 return 0;
2083} 2341}
2084 2342
2085void fc_destroy_exch_mgr(void) 2343/**
2344 * fc_destroy_exch_mgr() - Destroy an exchange manager
2345 */
2346void fc_destroy_exch_mgr()
2086{ 2347{
2348 destroy_workqueue(fc_exch_workqueue);
2087 kmem_cache_destroy(fc_em_cachep); 2349 kmem_cache_destroy(fc_em_cachep);
2088} 2350}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 59a4408b27b5..17396c708b08 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -27,6 +27,7 @@
27#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/slab.h>
30 31
31#include <scsi/scsi_tcq.h> 32#include <scsi/scsi_tcq.h>
32#include <scsi/scsi.h> 33#include <scsi/scsi.h>
@@ -39,26 +40,19 @@
39#include <scsi/libfc.h> 40#include <scsi/libfc.h>
40#include <scsi/fc_encode.h> 41#include <scsi/fc_encode.h>
41 42
42MODULE_AUTHOR("Open-FCoE.org"); 43#include "fc_libfc.h"
43MODULE_DESCRIPTION("libfc");
44MODULE_LICENSE("GPL v2");
45 44
46unsigned int fc_debug_logging; 45struct kmem_cache *scsi_pkt_cachep;
47module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
48MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
49
50static struct kmem_cache *scsi_pkt_cachep;
51 46
52/* SRB state definitions */ 47/* SRB state definitions */
53#define FC_SRB_FREE 0 /* cmd is free */ 48#define FC_SRB_FREE 0 /* cmd is free */
54#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ 49#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
55#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ 50#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
56#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ 51#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
57#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */ 52#define FC_SRB_ABORTED (1 << 3) /* abort acknowledged */
58#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ 53#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
59#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ 54#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
60#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ 55#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
61#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
62 56
63#define FC_SRB_READ (1 << 1) 57#define FC_SRB_READ (1 << 1)
64#define FC_SRB_WRITE (1 << 0) 58#define FC_SRB_WRITE (1 << 0)
@@ -73,10 +67,22 @@ static struct kmem_cache *scsi_pkt_cachep;
73#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) 67#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
74#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) 68#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
75 69
70/**
71 * struct fc_fcp_internal - FCP layer internal data
72 * @scsi_pkt_pool: Memory pool to draw FCP packets from
73 * @scsi_queue_lock: Protects the scsi_pkt_queue
74 * @scsi_pkt_queue: Current FCP packets
75 * @last_can_queue_ramp_down_time: ramp down time
76 * @last_can_queue_ramp_up_time: ramp up time
77 * @max_can_queue: max can_queue size
78 */
76struct fc_fcp_internal { 79struct fc_fcp_internal {
77 mempool_t *scsi_pkt_pool; 80 mempool_t *scsi_pkt_pool;
78 struct list_head scsi_pkt_queue; 81 spinlock_t scsi_queue_lock;
79 u8 throttled; 82 struct list_head scsi_pkt_queue;
83 unsigned long last_can_queue_ramp_down_time;
84 unsigned long last_can_queue_ramp_up_time;
85 int max_can_queue;
80}; 86};
81 87
82#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 88#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -90,9 +96,9 @@ static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
90static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); 96static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
91static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 97static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
92static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 98static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
93static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); 99static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
94static void fc_timeout_error(struct fc_fcp_pkt *); 100static void fc_timeout_error(struct fc_fcp_pkt *);
95static void fc_fcp_timeout(unsigned long data); 101static void fc_fcp_timeout(unsigned long);
96static void fc_fcp_rec(struct fc_fcp_pkt *); 102static void fc_fcp_rec(struct fc_fcp_pkt *);
97static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 103static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
98static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); 104static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
@@ -124,6 +130,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
124#define FC_SCSI_TM_TOV (10 * HZ) 130#define FC_SCSI_TM_TOV (10 * HZ)
125#define FC_SCSI_REC_TOV (2 * HZ) 131#define FC_SCSI_REC_TOV (2 * HZ)
126#define FC_HOST_RESET_TIMEOUT (30 * HZ) 132#define FC_HOST_RESET_TIMEOUT (30 * HZ)
133#define FC_CAN_QUEUE_PERIOD (60 * HZ)
127 134
128#define FC_MAX_ERROR_CNT 5 135#define FC_MAX_ERROR_CNT 5
129#define FC_MAX_RECOV_RETRY 3 136#define FC_MAX_RECOV_RETRY 3
@@ -131,23 +138,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
131#define FC_FCP_DFLT_QUEUE_DEPTH 32 138#define FC_FCP_DFLT_QUEUE_DEPTH 32
132 139
133/** 140/**
134 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet 141 * fc_fcp_pkt_alloc() - Allocate a fcp_pkt
135 * @lp: fc lport struct 142 * @lport: The local port that the FCP packet is for
136 * @gfp: gfp flags for allocation 143 * @gfp: GFP flags for allocation
137 * 144 *
138 * This is used by upper layer scsi driver. 145 * Return value: fcp_pkt structure or null on allocation failure.
139 * Return Value : scsi_pkt structure or null on allocation failure. 146 * Context: Can be called from process context, no lock is required.
140 * Context : call from process context. no locking required.
141 */ 147 */
142static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) 148static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
143{ 149{
144 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 150 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
145 struct fc_fcp_pkt *fsp; 151 struct fc_fcp_pkt *fsp;
146 152
147 fsp = mempool_alloc(si->scsi_pkt_pool, gfp); 153 fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
148 if (fsp) { 154 if (fsp) {
149 memset(fsp, 0, sizeof(*fsp)); 155 memset(fsp, 0, sizeof(*fsp));
150 fsp->lp = lp; 156 fsp->lp = lport;
151 atomic_set(&fsp->ref_cnt, 1); 157 atomic_set(&fsp->ref_cnt, 1);
152 init_timer(&fsp->timer); 158 init_timer(&fsp->timer);
153 INIT_LIST_HEAD(&fsp->list); 159 INIT_LIST_HEAD(&fsp->list);
@@ -157,12 +163,11 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
157} 163}
158 164
159/** 165/**
160 * fc_fcp_pkt_release() - release hold on scsi_pkt packet 166 * fc_fcp_pkt_release() - Release hold on a fcp_pkt
161 * @fsp: fcp packet struct 167 * @fsp: The FCP packet to be released
162 * 168 *
163 * This is used by upper layer scsi driver. 169 * Context: Can be called from process or interrupt context,
164 * Context : call from process and interrupt context. 170 * no lock is required.
165 * no locking required
166 */ 171 */
167static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) 172static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
168{ 173{
@@ -173,20 +178,25 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
173 } 178 }
174} 179}
175 180
181/**
182 * fc_fcp_pkt_hold() - Hold a fcp_pkt
183 * @fsp: The FCP packet to be held
184 */
176static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) 185static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
177{ 186{
178 atomic_inc(&fsp->ref_cnt); 187 atomic_inc(&fsp->ref_cnt);
179} 188}
180 189
181/** 190/**
182 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet 191 * fc_fcp_pkt_destory() - Release hold on a fcp_pkt
183 * @seq: exchange sequence 192 * @seq: The sequence that the FCP packet is on (required by destructor API)
184 * @fsp: fcp packet struct 193 * @fsp: The FCP packet to be released
194 *
195 * This routine is called by a destructor callback in the exch_seq_send()
196 * routine of the libfc Transport Template. The 'struct fc_seq' is a required
197 * argument even though it is not used by this routine.
185 * 198 *
186 * Release hold on scsi_pkt packet set to keep scsi_pkt 199 * Context: No locking required.
187 * till EM layer exch resource is not freed.
188 * Context : called from from EM layer.
189 * no locking required
190 */ 200 */
191static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) 201static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
192{ 202{
@@ -194,10 +204,10 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
194} 204}
195 205
196/** 206/**
197 * fc_fcp_lock_pkt() - lock a packet and get a ref to it. 207 * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count
198 * @fsp: fcp packet 208 * @fsp: The FCP packet to be locked and incremented
199 * 209 *
200 * We should only return error if we return a command to scsi-ml before 210 * We should only return error if we return a command to SCSI-ml before
201 * getting a response. This can happen in cases where we send a abort, but 211 * getting a response. This can happen in cases where we send a abort, but
202 * do not wait for the response and the abort and command can be passing 212 * do not wait for the response and the abort and command can be passing
203 * each other on the wire/network-layer. 213 * each other on the wire/network-layer.
@@ -222,18 +232,33 @@ static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
222 return 0; 232 return 0;
223} 233}
224 234
235/**
236 * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its
237 * reference count
238 * @fsp: The FCP packet to be unlocked and decremented
239 */
225static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) 240static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
226{ 241{
227 spin_unlock_bh(&fsp->scsi_pkt_lock); 242 spin_unlock_bh(&fsp->scsi_pkt_lock);
228 fc_fcp_pkt_release(fsp); 243 fc_fcp_pkt_release(fsp);
229} 244}
230 245
246/**
247 * fc_fcp_timer_set() - Start a timer for a fcp_pkt
248 * @fsp: The FCP packet to start a timer for
249 * @delay: The timeout period for the timer
250 */
231static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 251static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
232{ 252{
233 if (!(fsp->state & FC_SRB_COMPL)) 253 if (!(fsp->state & FC_SRB_COMPL))
234 mod_timer(&fsp->timer, jiffies + delay); 254 mod_timer(&fsp->timer, jiffies + delay);
235} 255}
236 256
257/**
258 * fc_fcp_send_abort() - Send an abort for exchanges associated with a
259 * fcp_pkt
260 * @fsp: The FCP packet to abort exchanges on
261 */
237static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) 262static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
238{ 263{
239 if (!fsp->seq_ptr) 264 if (!fsp->seq_ptr)
@@ -243,9 +268,14 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
243 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); 268 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
244} 269}
245 270
246/* 271/**
247 * Retry command. 272 * fc_fcp_retry_cmd() - Retry a fcp_pkt
248 * An abort isn't needed. 273 * @fsp: The FCP packet to be retried
274 *
275 * Sets the status code to be FC_ERROR and then calls
276 * fc_fcp_complete_locked() which in turn calls fc_io_compl().
277 * fc_io_compl() will notify the SCSI-ml that the I/O is done.
278 * The SCSI-ml will retry the command.
249 */ 279 */
250static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) 280static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
251{ 281{
@@ -260,64 +290,145 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
260 fc_fcp_complete_locked(fsp); 290 fc_fcp_complete_locked(fsp);
261} 291}
262 292
263/* 293/**
264 * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP 294 * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context
265 * transfer for a read I/O indicated by the fc_fcp_pkt. 295 * @fsp: The FCP packet that will manage the DDP frames
266 * @fsp: ptr to the fc_fcp_pkt 296 * @xid: The XID that will be used for the DDP exchange
267 *
268 * This is called in exch_seq_send() when we have a newly allocated
269 * exchange with a valid exchange id to setup ddp.
270 *
271 * returns: none
272 */ 297 */
273void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) 298void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
274{ 299{
275 struct fc_lport *lp; 300 struct fc_lport *lport;
276
277 if (!fsp)
278 return;
279 301
280 lp = fsp->lp; 302 lport = fsp->lp;
281 if ((fsp->req_flags & FC_SRB_READ) && 303 if ((fsp->req_flags & FC_SRB_READ) &&
282 (lp->lro_enabled) && (lp->tt.ddp_setup)) { 304 (lport->lro_enabled) && (lport->tt.ddp_setup)) {
283 if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), 305 if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd),
284 scsi_sg_count(fsp->cmd))) 306 scsi_sg_count(fsp->cmd)))
285 fsp->xfer_ddp = xid; 307 fsp->xfer_ddp = xid;
286 } 308 }
287} 309}
288EXPORT_SYMBOL(fc_fcp_ddp_setup);
289 310
290/* 311/**
291 * fc_fcp_ddp_done - calls to LLD's ddp_done to release any 312 * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any
292 * DDP related resources for this I/O if it is initialized 313 * DDP related resources for a fcp_pkt
293 * as a ddp transfer 314 * @fsp: The FCP packet that DDP had been used on
294 * @fsp: ptr to the fc_fcp_pkt
295 *
296 * returns: none
297 */ 315 */
298static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 316static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
299{ 317{
300 struct fc_lport *lp; 318 struct fc_lport *lport;
301 319
302 if (!fsp) 320 if (!fsp)
303 return; 321 return;
304 322
305 lp = fsp->lp; 323 if (fsp->xfer_ddp == FC_XID_UNKNOWN)
306 if (fsp->xfer_ddp && lp->tt.ddp_done) { 324 return;
307 fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); 325
308 fsp->xfer_ddp = 0; 326 lport = fsp->lp;
327 if (lport->tt.ddp_done) {
328 fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp);
329 fsp->xfer_ddp = FC_XID_UNKNOWN;
330 }
331}
332
333/**
334 * fc_fcp_can_queue_ramp_up() - increases can_queue
335 * @lport: lport to ramp up can_queue
336 *
337 * Locking notes: Called with Scsi_Host lock held
338 */
339static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
340{
341 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
342 int can_queue;
343
344 if (si->last_can_queue_ramp_up_time &&
345 (time_before(jiffies, si->last_can_queue_ramp_up_time +
346 FC_CAN_QUEUE_PERIOD)))
347 return;
348
349 if (time_before(jiffies, si->last_can_queue_ramp_down_time +
350 FC_CAN_QUEUE_PERIOD))
351 return;
352
353 si->last_can_queue_ramp_up_time = jiffies;
354
355 can_queue = lport->host->can_queue << 1;
356 if (can_queue >= si->max_can_queue) {
357 can_queue = si->max_can_queue;
358 si->last_can_queue_ramp_down_time = 0;
309 } 359 }
360 lport->host->can_queue = can_queue;
361 shost_printk(KERN_ERR, lport->host, "libfc: increased "
362 "can_queue to %d.\n", can_queue);
310} 363}
311 364
365/**
366 * fc_fcp_can_queue_ramp_down() - reduces can_queue
367 * @lport: lport to reduce can_queue
368 *
369 * If we are getting memory allocation failures, then we may
370 * be trying to execute too many commands. We let the running
371 * commands complete or timeout, then try again with a reduced
372 * can_queue. Eventually we will hit the point where we run
373 * on all reserved structs.
374 *
375 * Locking notes: Called with Scsi_Host lock held
376 */
377static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
378{
379 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
380 int can_queue;
381
382 if (si->last_can_queue_ramp_down_time &&
383 (time_before(jiffies, si->last_can_queue_ramp_down_time +
384 FC_CAN_QUEUE_PERIOD)))
385 return;
386
387 si->last_can_queue_ramp_down_time = jiffies;
388
389 can_queue = lport->host->can_queue;
390 can_queue >>= 1;
391 if (!can_queue)
392 can_queue = 1;
393 lport->host->can_queue = can_queue;
394 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
395 "Reducing can_queue to %d.\n", can_queue);
396}
312 397
313/* 398/*
314 * Receive SCSI data from target. 399 * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer.
315 * Called after receiving solicited data. 400 * @lport: fc lport struct
401 * @len: payload length
402 *
403 * Allocates fc_frame structure and buffer but if fails to allocate
404 * then reduce can_queue.
405 */
406static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
407 size_t len)
408{
409 struct fc_frame *fp;
410 unsigned long flags;
411
412 fp = fc_frame_alloc(lport, len);
413 if (likely(fp))
414 return fp;
415
416 /* error case */
417 spin_lock_irqsave(lport->host->host_lock, flags);
418 fc_fcp_can_queue_ramp_down(lport);
419 spin_unlock_irqrestore(lport->host->host_lock, flags);
420 return NULL;
421}
422
423/**
424 * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
425 * @fsp: The FCP packet the data is on
426 * @fp: The data frame
316 */ 427 */
317static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 428static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
318{ 429{
319 struct scsi_cmnd *sc = fsp->cmd; 430 struct scsi_cmnd *sc = fsp->cmd;
320 struct fc_lport *lp = fsp->lp; 431 struct fc_lport *lport = fsp->lp;
321 struct fcoe_dev_stats *stats; 432 struct fcoe_dev_stats *stats;
322 struct fc_frame_header *fh; 433 struct fc_frame_header *fh;
323 size_t start_offset; 434 size_t start_offset;
@@ -327,7 +438,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
327 size_t len; 438 size_t len;
328 void *buf; 439 void *buf;
329 struct scatterlist *sg; 440 struct scatterlist *sg;
330 size_t remaining; 441 u32 nents;
331 442
332 fh = fc_frame_header_get(fp); 443 fh = fc_frame_header_get(fp);
333 offset = ntohl(fh->fh_parm_offset); 444 offset = ntohl(fh->fh_parm_offset);
@@ -351,65 +462,29 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
351 if (offset != fsp->xfer_len) 462 if (offset != fsp->xfer_len)
352 fsp->state |= FC_SRB_DISCONTIG; 463 fsp->state |= FC_SRB_DISCONTIG;
353 464
354 crc = 0;
355 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
356 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
357
358 sg = scsi_sglist(sc); 465 sg = scsi_sglist(sc);
359 remaining = len; 466 nents = scsi_sg_count(sc);
360
361 while (remaining > 0 && sg) {
362 size_t off;
363 void *page_addr;
364 size_t sg_bytes;
365
366 if (offset >= sg->length) {
367 offset -= sg->length;
368 sg = sg_next(sg);
369 continue;
370 }
371 sg_bytes = min(remaining, sg->length - offset);
372
373 /*
374 * The scatterlist item may be bigger than PAGE_SIZE,
375 * but we are limited to mapping PAGE_SIZE at a time.
376 */
377 off = offset + sg->offset;
378 sg_bytes = min(sg_bytes, (size_t)
379 (PAGE_SIZE - (off & ~PAGE_MASK)));
380 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
381 KM_SOFTIRQ0);
382 if (!page_addr)
383 break; /* XXX panic? */
384
385 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
386 crc = crc32(crc, buf, sg_bytes);
387 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
388 sg_bytes);
389
390 kunmap_atomic(page_addr, KM_SOFTIRQ0);
391 buf += sg_bytes;
392 offset += sg_bytes;
393 remaining -= sg_bytes;
394 copy_len += sg_bytes;
395 }
396 467
397 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 468 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
469 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
470 &offset, KM_SOFTIRQ0, NULL);
471 } else {
472 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
473 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
474 &offset, KM_SOFTIRQ0, &crc);
398 buf = fc_frame_payload_get(fp, 0); 475 buf = fc_frame_payload_get(fp, 0);
399 if (len % 4) { 476 if (len % 4)
400 crc = crc32(crc, buf + len, 4 - (len % 4)); 477 crc = crc32(crc, buf + len, 4 - (len % 4));
401 len += 4 - (len % 4);
402 }
403 478
404 if (~crc != le32_to_cpu(fr_crc(fp))) { 479 if (~crc != le32_to_cpu(fr_crc(fp))) {
405crc_err: 480crc_err:
406 stats = fc_lport_get_stats(lp); 481 stats = fc_lport_get_stats(lport);
407 stats->ErrorFrames++; 482 stats->ErrorFrames++;
408 /* FIXME - per cpu count, not total count! */ 483 /* FIXME - per cpu count, not total count! */
409 if (stats->InvalidCRCCount++ < 5) 484 if (stats->InvalidCRCCount++ < 5)
410 printk(KERN_WARNING "libfc: CRC error on data " 485 printk(KERN_WARNING "libfc: CRC error on data "
411 "frame for port (%6x)\n", 486 "frame for port (%6x)\n",
412 fc_host_port_id(lp->host)); 487 fc_host_port_id(lport->host));
413 /* 488 /*
414 * Assume the frame is total garbage. 489 * Assume the frame is total garbage.
415 * We may have copied it over the good part 490 * We may have copied it over the good part
@@ -437,18 +512,17 @@ crc_err:
437} 512}
438 513
439/** 514/**
440 * fc_fcp_send_data() - Send SCSI data to target. 515 * fc_fcp_send_data() - Send SCSI data to a target
441 * @fsp: ptr to fc_fcp_pkt 516 * @fsp: The FCP packet the data is on
442 * @sp: ptr to this sequence 517 * @sp: The sequence the data is to be sent on
443 * @offset: starting offset for this data request 518 * @offset: The starting offset for this data request
444 * @seq_blen: the burst length for this data request 519 * @seq_blen: The burst length for this data request
445 * 520 *
446 * Called after receiving a Transfer Ready data descriptor. 521 * Called after receiving a Transfer Ready data descriptor.
447 * if LLD is capable of seq offload then send down seq_blen 522 * If the LLD is capable of sequence offload then send down the
448 * size of data in single frame, otherwise send multiple FC 523 * seq_blen amount of data in single frame, otherwise send
449 * frames of max FC frame payload supported by target port. 524 * multiple frames of the maximum frame payload supported by
450 * 525 * the target port.
451 * Returns : 0 for success.
452 */ 526 */
453static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, 527static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
454 size_t offset, size_t seq_blen) 528 size_t offset, size_t seq_blen)
@@ -457,16 +531,18 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
457 struct scsi_cmnd *sc; 531 struct scsi_cmnd *sc;
458 struct scatterlist *sg; 532 struct scatterlist *sg;
459 struct fc_frame *fp = NULL; 533 struct fc_frame *fp = NULL;
460 struct fc_lport *lp = fsp->lp; 534 struct fc_lport *lport = fsp->lp;
535 struct page *page;
461 size_t remaining; 536 size_t remaining;
462 size_t t_blen; 537 size_t t_blen;
463 size_t tlen; 538 size_t tlen;
464 size_t sg_bytes; 539 size_t sg_bytes;
465 size_t frame_offset, fh_parm_offset; 540 size_t frame_offset, fh_parm_offset;
541 size_t off;
466 int error; 542 int error;
467 void *data = NULL; 543 void *data = NULL;
468 void *page_addr; 544 void *page_addr;
469 int using_sg = lp->sg_supp; 545 int using_sg = lport->sg_supp;
470 u32 f_ctl; 546 u32 f_ctl;
471 547
472 WARN_ON(seq_blen <= 0); 548 WARN_ON(seq_blen <= 0);
@@ -488,10 +564,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
488 * to max FC frame payload previously set in fsp->max_payload. 564 * to max FC frame payload previously set in fsp->max_payload.
489 */ 565 */
490 t_blen = fsp->max_payload; 566 t_blen = fsp->max_payload;
491 if (lp->seq_offload) { 567 if (lport->seq_offload) {
492 t_blen = min(seq_blen, (size_t)lp->lso_max); 568 t_blen = min(seq_blen, (size_t)lport->lso_max);
493 FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 569 FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
494 fsp, seq_blen, lp->lso_max, t_blen); 570 fsp, seq_blen, lport->lso_max, t_blen);
495 } 571 }
496 572
497 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); 573 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
@@ -503,7 +579,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
503 remaining = seq_blen; 579 remaining = seq_blen;
504 fh_parm_offset = frame_offset = offset; 580 fh_parm_offset = frame_offset = offset;
505 tlen = 0; 581 tlen = 0;
506 seq = lp->tt.seq_start_next(seq); 582 seq = lport->tt.seq_start_next(seq);
507 f_ctl = FC_FC_REL_OFF; 583 f_ctl = FC_FC_REL_OFF;
508 WARN_ON(!seq); 584 WARN_ON(!seq);
509 585
@@ -525,43 +601,34 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
525 */ 601 */
526 if (tlen % 4) 602 if (tlen % 4)
527 using_sg = 0; 603 using_sg = 0;
528 if (using_sg) { 604 fp = fc_frame_alloc(lport, using_sg ? 0 : tlen);
529 fp = _fc_frame_alloc(lp, 0); 605 if (!fp)
530 if (!fp) 606 return -ENOMEM;
531 return -ENOMEM;
532 } else {
533 fp = fc_frame_alloc(lp, tlen);
534 if (!fp)
535 return -ENOMEM;
536 607
537 data = (void *)(fr_hdr(fp)) + 608 data = fc_frame_header_get(fp) + 1;
538 sizeof(struct fc_frame_header);
539 }
540 fh_parm_offset = frame_offset; 609 fh_parm_offset = frame_offset;
541 fr_max_payload(fp) = fsp->max_payload; 610 fr_max_payload(fp) = fsp->max_payload;
542 } 611 }
612
613 off = offset + sg->offset;
543 sg_bytes = min(tlen, sg->length - offset); 614 sg_bytes = min(tlen, sg->length - offset);
615 sg_bytes = min(sg_bytes,
616 (size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
617 page = sg_page(sg) + (off >> PAGE_SHIFT);
544 if (using_sg) { 618 if (using_sg) {
545 get_page(sg_page(sg)); 619 get_page(page);
546 skb_fill_page_desc(fp_skb(fp), 620 skb_fill_page_desc(fp_skb(fp),
547 skb_shinfo(fp_skb(fp))->nr_frags, 621 skb_shinfo(fp_skb(fp))->nr_frags,
548 sg_page(sg), sg->offset + offset, 622 page, off & ~PAGE_MASK, sg_bytes);
549 sg_bytes);
550 fp_skb(fp)->data_len += sg_bytes; 623 fp_skb(fp)->data_len += sg_bytes;
551 fr_len(fp) += sg_bytes; 624 fr_len(fp) += sg_bytes;
552 fp_skb(fp)->truesize += PAGE_SIZE; 625 fp_skb(fp)->truesize += PAGE_SIZE;
553 } else { 626 } else {
554 size_t off = offset + sg->offset;
555
556 /* 627 /*
557 * The scatterlist item may be bigger than PAGE_SIZE, 628 * The scatterlist item may be bigger than PAGE_SIZE,
558 * but we must not cross pages inside the kmap. 629 * but we must not cross pages inside the kmap.
559 */ 630 */
560 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - 631 page_addr = kmap_atomic(page, KM_SOFTIRQ0);
561 (off & ~PAGE_MASK)));
562 page_addr = kmap_atomic(sg_page(sg) +
563 (off >> PAGE_SHIFT),
564 KM_SOFTIRQ0);
565 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 632 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
566 sg_bytes); 633 sg_bytes);
567 kunmap_atomic(page_addr, KM_SOFTIRQ0); 634 kunmap_atomic(page_addr, KM_SOFTIRQ0);
@@ -572,7 +639,8 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
572 tlen -= sg_bytes; 639 tlen -= sg_bytes;
573 remaining -= sg_bytes; 640 remaining -= sg_bytes;
574 641
575 if (tlen) 642 if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
643 (tlen))
576 continue; 644 continue;
577 645
578 /* 646 /*
@@ -589,7 +657,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
589 /* 657 /*
590 * send fragment using for a sequence. 658 * send fragment using for a sequence.
591 */ 659 */
592 error = lp->tt.seq_send(lp, seq, fp); 660 error = lport->tt.seq_send(lport, seq, fp);
593 if (error) { 661 if (error) {
594 WARN_ON(1); /* send error should be rare */ 662 WARN_ON(1); /* send error should be rare */
595 fc_fcp_retry_cmd(fsp); 663 fc_fcp_retry_cmd(fsp);
@@ -601,6 +669,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
601 return 0; 669 return 0;
602} 670}
603 671
672/**
673 * fc_fcp_abts_resp() - Send an ABTS response
674 * @fsp: The FCP packet that is being aborted
675 * @fp: The response frame
676 */
604static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 677static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
605{ 678{
606 int ba_done = 1; 679 int ba_done = 1;
@@ -637,46 +710,13 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
637} 710}
638 711
639/** 712/**
640 * fc_fcp_reduce_can_queue() - drop can_queue 713 * fc_fcp_recv() - Reveive an FCP frame
641 * @lp: lport to drop queueing for
642 *
643 * If we are getting memory allocation failures, then we may
644 * be trying to execute too many commands. We let the running
645 * commands complete or timeout, then try again with a reduced
646 * can_queue. Eventually we will hit the point where we run
647 * on all reserved structs.
648 */
649static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
650{
651 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
652 unsigned long flags;
653 int can_queue;
654
655 spin_lock_irqsave(lp->host->host_lock, flags);
656 if (si->throttled)
657 goto done;
658 si->throttled = 1;
659
660 can_queue = lp->host->can_queue;
661 can_queue >>= 1;
662 if (!can_queue)
663 can_queue = 1;
664 lp->host->can_queue = can_queue;
665 shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
666 "Reducing can_queue to %d.\n", can_queue);
667done:
668 spin_unlock_irqrestore(lp->host->host_lock, flags);
669}
670
671/**
672 * fc_fcp_recv() - Reveive FCP frames
673 * @seq: The sequence the frame is on 714 * @seq: The sequence the frame is on
674 * @fp: The FC frame 715 * @fp: The received frame
675 * @arg: The related FCP packet 716 * @arg: The related FCP packet
676 * 717 *
677 * Return : None 718 * Context: Called from Soft IRQ context. Can not be called
678 * Context : called from Soft IRQ context 719 * holding the FCP packet list lock.
679 * can not called holding list lock
680 */ 720 */
681static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) 721static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
682{ 722{
@@ -687,8 +727,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
687 u8 r_ctl; 727 u8 r_ctl;
688 int rc = 0; 728 int rc = 0;
689 729
690 if (IS_ERR(fp)) 730 if (IS_ERR(fp)) {
691 goto errout; 731 fc_fcp_error(fsp, fp);
732 return;
733 }
692 734
693 fh = fc_frame_header_get(fp); 735 fh = fc_frame_header_get(fp);
694 r_ctl = fh->fh_r_ctl; 736 r_ctl = fh->fh_r_ctl;
@@ -721,8 +763,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
721 (size_t) ntohl(dd->ft_burst_len)); 763 (size_t) ntohl(dd->ft_burst_len));
722 if (!rc) 764 if (!rc)
723 seq->rec_data = fsp->xfer_len; 765 seq->rec_data = fsp->xfer_len;
724 else if (rc == -ENOMEM)
725 fsp->state |= FC_SRB_NOMEM;
726 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { 766 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
727 /* 767 /*
728 * received a DATA frame 768 * received a DATA frame
@@ -742,13 +782,13 @@ unlock:
742 fc_fcp_unlock_pkt(fsp); 782 fc_fcp_unlock_pkt(fsp);
743out: 783out:
744 fc_frame_free(fp); 784 fc_frame_free(fp);
745errout:
746 if (IS_ERR(fp))
747 fc_fcp_error(fsp, fp);
748 else if (rc == -ENOMEM)
749 fc_fcp_reduce_can_queue(lport);
750} 785}
751 786
787/**
788 * fc_fcp_resp() - Handler for FCP responses
789 * @fsp: The FCP packet the response is for
790 * @fp: The response frame
791 */
752static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 792static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
753{ 793{
754 struct fc_frame_header *fh; 794 struct fc_frame_header *fh;
@@ -862,15 +902,16 @@ err:
862} 902}
863 903
864/** 904/**
865 * fc_fcp_complete_locked() - complete processing of a fcp packet 905 * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the
866 * @fsp: fcp packet 906 * fcp_pkt lock held
907 * @fsp: The FCP packet to be completed
867 * 908 *
868 * This function may sleep if a timer is pending. The packet lock must be 909 * This function may sleep if a timer is pending. The packet lock must be
869 * held, and the host lock must not be held. 910 * held, and the host lock must not be held.
870 */ 911 */
871static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) 912static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
872{ 913{
873 struct fc_lport *lp = fsp->lp; 914 struct fc_lport *lport = fsp->lp;
874 struct fc_seq *seq; 915 struct fc_seq *seq;
875 struct fc_exch *ep; 916 struct fc_exch *ep;
876 u32 f_ctl; 917 u32 f_ctl;
@@ -901,8 +942,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
901 struct fc_frame *conf_frame; 942 struct fc_frame *conf_frame;
902 struct fc_seq *csp; 943 struct fc_seq *csp;
903 944
904 csp = lp->tt.seq_start_next(seq); 945 csp = lport->tt.seq_start_next(seq);
905 conf_frame = fc_frame_alloc(fsp->lp, 0); 946 conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
906 if (conf_frame) { 947 if (conf_frame) {
907 f_ctl = FC_FC_SEQ_INIT; 948 f_ctl = FC_FC_SEQ_INIT;
908 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 949 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
@@ -910,43 +951,48 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
910 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, 951 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
911 ep->did, ep->sid, 952 ep->did, ep->sid,
912 FC_TYPE_FCP, f_ctl, 0); 953 FC_TYPE_FCP, f_ctl, 0);
913 lp->tt.seq_send(lp, csp, conf_frame); 954 lport->tt.seq_send(lport, csp, conf_frame);
914 } 955 }
915 } 956 }
916 lp->tt.exch_done(seq); 957 lport->tt.exch_done(seq);
917 } 958 }
918 fc_io_compl(fsp); 959 fc_io_compl(fsp);
919} 960}
920 961
962/**
963 * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt
964 * @fsp: The FCP packet whose exchanges should be canceled
965 * @error: The reason for the cancellation
966 */
921static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) 967static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
922{ 968{
923 struct fc_lport *lp = fsp->lp; 969 struct fc_lport *lport = fsp->lp;
924 970
925 if (fsp->seq_ptr) { 971 if (fsp->seq_ptr) {
926 lp->tt.exch_done(fsp->seq_ptr); 972 lport->tt.exch_done(fsp->seq_ptr);
927 fsp->seq_ptr = NULL; 973 fsp->seq_ptr = NULL;
928 } 974 }
929 fsp->status_code = error; 975 fsp->status_code = error;
930} 976}
931 977
932/** 978/**
933 * fc_fcp_cleanup_each_cmd() - Cleanup active commads 979 * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port
934 * @lp: logical port 980 * @lport: The local port whose exchanges should be canceled
935 * @id: target id 981 * @id: The target's ID
936 * @lun: lun 982 * @lun: The LUN
937 * @error: fsp status code 983 * @error: The reason for cancellation
938 * 984 *
939 * If lun or id is -1, they are ignored. 985 * If lun or id is -1, they are ignored.
940 */ 986 */
941static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, 987static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
942 unsigned int lun, int error) 988 unsigned int lun, int error)
943{ 989{
944 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 990 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
945 struct fc_fcp_pkt *fsp; 991 struct fc_fcp_pkt *fsp;
946 struct scsi_cmnd *sc_cmd; 992 struct scsi_cmnd *sc_cmd;
947 unsigned long flags; 993 unsigned long flags;
948 994
949 spin_lock_irqsave(lp->host->host_lock, flags); 995 spin_lock_irqsave(&si->scsi_queue_lock, flags);
950restart: 996restart:
951 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 997 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
952 sc_cmd = fsp->cmd; 998 sc_cmd = fsp->cmd;
@@ -957,7 +1003,7 @@ restart:
957 continue; 1003 continue;
958 1004
959 fc_fcp_pkt_hold(fsp); 1005 fc_fcp_pkt_hold(fsp);
960 spin_unlock_irqrestore(lp->host->host_lock, flags); 1006 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
961 1007
962 if (!fc_fcp_lock_pkt(fsp)) { 1008 if (!fc_fcp_lock_pkt(fsp)) {
963 fc_fcp_cleanup_cmd(fsp, error); 1009 fc_fcp_cleanup_cmd(fsp, error);
@@ -966,35 +1012,37 @@ restart:
966 } 1012 }
967 1013
968 fc_fcp_pkt_release(fsp); 1014 fc_fcp_pkt_release(fsp);
969 spin_lock_irqsave(lp->host->host_lock, flags); 1015 spin_lock_irqsave(&si->scsi_queue_lock, flags);
970 /* 1016 /*
971 * while we dropped the lock multiple pkts could 1017 * while we dropped the lock multiple pkts could
972 * have been released, so we have to start over. 1018 * have been released, so we have to start over.
973 */ 1019 */
974 goto restart; 1020 goto restart;
975 } 1021 }
976 spin_unlock_irqrestore(lp->host->host_lock, flags); 1022 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
977} 1023}
978 1024
979static void fc_fcp_abort_io(struct fc_lport *lp) 1025/**
1026 * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port
1027 * @lport: The local port whose exchanges are to be aborted
1028 */
1029static void fc_fcp_abort_io(struct fc_lport *lport)
980{ 1030{
981 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); 1031 fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR);
982} 1032}
983 1033
984/** 1034/**
985 * fc_fcp_pkt_send() - send a fcp packet to the lower level. 1035 * fc_fcp_pkt_send() - Send a fcp_pkt
986 * @lp: fc lport 1036 * @lport: The local port to send the FCP packet on
987 * @fsp: fc packet. 1037 * @fsp: The FCP packet to send
988 * 1038 *
989 * This is called by upper layer protocol. 1039 * Return: Zero for success and -1 for failure
990 * Return : zero for success and -1 for failure 1040 * Locks: Called without locks held
991 * Context : called from queuecommand which can be called from process
992 * or scsi soft irq.
993 * Locks : called with the host lock and irqs disabled.
994 */ 1041 */
995static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1042static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
996{ 1043{
997 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 1044 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
1045 unsigned long flags;
998 int rc; 1046 int rc;
999 1047
1000 fsp->cmd->SCp.ptr = (char *)fsp; 1048 fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1004,18 +1052,27 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1004 int_to_scsilun(fsp->cmd->device->lun, 1052 int_to_scsilun(fsp->cmd->device->lun,
1005 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1053 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1006 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1054 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
1007 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1008 1055
1009 spin_unlock_irq(lp->host->host_lock); 1056 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1010 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); 1057 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1011 spin_lock_irq(lp->host->host_lock); 1058 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1012 if (rc) 1059 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1060 if (unlikely(rc)) {
1061 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1013 list_del(&fsp->list); 1062 list_del(&fsp->list);
1063 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1064 }
1014 1065
1015 return rc; 1066 return rc;
1016} 1067}
1017 1068
1018static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1069/**
1070 * fc_fcp_cmd_send() - Send a FCP command
1071 * @lport: The local port to send the command on
1072 * @fsp: The FCP packet the command is on
1073 * @resp: The handler for the response
1074 */
1075static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1019 void (*resp)(struct fc_seq *, 1076 void (*resp)(struct fc_seq *,
1020 struct fc_frame *fp, 1077 struct fc_frame *fp,
1021 void *arg)) 1078 void *arg))
@@ -1023,14 +1080,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1023 struct fc_frame *fp; 1080 struct fc_frame *fp;
1024 struct fc_seq *seq; 1081 struct fc_seq *seq;
1025 struct fc_rport *rport; 1082 struct fc_rport *rport;
1026 struct fc_rport_libfc_priv *rp; 1083 struct fc_rport_libfc_priv *rpriv;
1027 const size_t len = sizeof(fsp->cdb_cmd); 1084 const size_t len = sizeof(fsp->cdb_cmd);
1028 int rc = 0; 1085 int rc = 0;
1029 1086
1030 if (fc_fcp_lock_pkt(fsp)) 1087 if (fc_fcp_lock_pkt(fsp))
1031 return 0; 1088 return 0;
1032 1089
1033 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); 1090 fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd));
1034 if (!fp) { 1091 if (!fp) {
1035 rc = -1; 1092 rc = -1;
1036 goto unlock; 1093 goto unlock;
@@ -1040,15 +1097,15 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1040 fr_fsp(fp) = fsp; 1097 fr_fsp(fp) = fsp;
1041 rport = fsp->rport; 1098 rport = fsp->rport;
1042 fsp->max_payload = rport->maxframe_size; 1099 fsp->max_payload = rport->maxframe_size;
1043 rp = rport->dd_data; 1100 rpriv = rport->dd_data;
1044 1101
1045 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1102 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1046 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1103 fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
1047 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1104 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1048 1105
1049 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); 1106 seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
1107 fsp, 0);
1050 if (!seq) { 1108 if (!seq) {
1051 fc_frame_free(fp);
1052 rc = -1; 1109 rc = -1;
1053 goto unlock; 1110 goto unlock;
1054 } 1111 }
@@ -1065,8 +1122,10 @@ unlock:
1065 return rc; 1122 return rc;
1066} 1123}
1067 1124
1068/* 1125/**
1069 * transport error handler 1126 * fc_fcp_error() - Handler for FCP layer errors
1127 * @fsp: The FCP packet the error is on
1128 * @fp: The frame that has errored
1070 */ 1129 */
1071static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1130static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1072{ 1131{
@@ -1091,11 +1150,13 @@ unlock:
1091 fc_fcp_unlock_pkt(fsp); 1150 fc_fcp_unlock_pkt(fsp);
1092} 1151}
1093 1152
1094/* 1153/**
1095 * Scsi abort handler- calls to send an abort 1154 * fc_fcp_pkt_abort() - Abort a fcp_pkt
1096 * and then wait for abort completion 1155 * @fsp: The FCP packet to abort on
1156 *
1157 * Called to send an abort and then wait for abort completion
1097 */ 1158 */
1098static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1159static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
1099{ 1160{
1100 int rc = FAILED; 1161 int rc = FAILED;
1101 1162
@@ -1122,14 +1183,15 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1122 return rc; 1183 return rc;
1123} 1184}
1124 1185
1125/* 1186/**
1126 * Retry LUN reset after resource allocation failed. 1187 * fc_lun_reset_send() - Send LUN reset command
1188 * @data: The FCP packet that identifies the LUN to be reset
1127 */ 1189 */
1128static void fc_lun_reset_send(unsigned long data) 1190static void fc_lun_reset_send(unsigned long data)
1129{ 1191{
1130 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1192 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1131 struct fc_lport *lp = fsp->lp; 1193 struct fc_lport *lport = fsp->lp;
1132 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { 1194 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
1133 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1195 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1134 return; 1196 return;
1135 if (fc_fcp_lock_pkt(fsp)) 1197 if (fc_fcp_lock_pkt(fsp))
@@ -1140,11 +1202,15 @@ static void fc_lun_reset_send(unsigned long data)
1140 } 1202 }
1141} 1203}
1142 1204
1143/* 1205/**
1144 * Scsi device reset handler- send a LUN RESET to the device 1206 * fc_lun_reset() - Send a LUN RESET command to a device
1145 * and wait for reset reply 1207 * and wait for the reply
1208 * @lport: The local port to sent the comand on
1209 * @fsp: The FCP packet that identifies the LUN to be reset
1210 * @id: The SCSI command ID
1211 * @lun: The LUN ID to be reset
1146 */ 1212 */
1147static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1213static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1148 unsigned int id, unsigned int lun) 1214 unsigned int id, unsigned int lun)
1149{ 1215{
1150 int rc; 1216 int rc;
@@ -1172,14 +1238,14 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1172 1238
1173 spin_lock_bh(&fsp->scsi_pkt_lock); 1239 spin_lock_bh(&fsp->scsi_pkt_lock);
1174 if (fsp->seq_ptr) { 1240 if (fsp->seq_ptr) {
1175 lp->tt.exch_done(fsp->seq_ptr); 1241 lport->tt.exch_done(fsp->seq_ptr);
1176 fsp->seq_ptr = NULL; 1242 fsp->seq_ptr = NULL;
1177 } 1243 }
1178 fsp->wait_for_comp = 0; 1244 fsp->wait_for_comp = 0;
1179 spin_unlock_bh(&fsp->scsi_pkt_lock); 1245 spin_unlock_bh(&fsp->scsi_pkt_lock);
1180 1246
1181 if (!rc) { 1247 if (!rc) {
1182 FC_SCSI_DBG(lp, "lun reset failed\n"); 1248 FC_SCSI_DBG(lport, "lun reset failed\n");
1183 return FAILED; 1249 return FAILED;
1184 } 1250 }
1185 1251
@@ -1187,13 +1253,16 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1187 if (fsp->cdb_status != FCP_TMF_CMPL) 1253 if (fsp->cdb_status != FCP_TMF_CMPL)
1188 return FAILED; 1254 return FAILED;
1189 1255
1190 FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); 1256 FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun);
1191 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1257 fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED);
1192 return SUCCESS; 1258 return SUCCESS;
1193} 1259}
1194 1260
1195/* 1261/**
1196 * Task Managment response handler 1262 * fc_tm_done() - Task Managment response handler
1263 * @seq: The sequence that the response is on
1264 * @fp: The response frame
1265 * @arg: The FCP packet the response is for
1197 */ 1266 */
1198static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1267static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1199{ 1268{
@@ -1230,34 +1299,31 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1230 fc_fcp_unlock_pkt(fsp); 1299 fc_fcp_unlock_pkt(fsp);
1231} 1300}
1232 1301
1233static void fc_fcp_cleanup(struct fc_lport *lp) 1302/**
1303 * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port
1304 * @lport: The local port to be cleaned up
1305 */
1306static void fc_fcp_cleanup(struct fc_lport *lport)
1234{ 1307{
1235 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); 1308 fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR);
1236} 1309}
1237 1310
1238/* 1311/**
1239 * fc_fcp_timeout: called by OS timer function. 1312 * fc_fcp_timeout() - Handler for fcp_pkt timeouts
1240 * 1313 * @data: The FCP packet that has timed out
1241 * The timer has been inactivated and must be reactivated if desired
1242 * using fc_fcp_timer_set().
1243 *
1244 * Algorithm:
1245 *
1246 * If REC is supported, just issue it, and return. The REC exchange will
1247 * complete or time out, and recovery can continue at that point.
1248 *
1249 * Otherwise, if the response has been received without all the data,
1250 * it has been ER_TIMEOUT since the response was received.
1251 * 1314 *
1252 * If the response has not been received, 1315 * If REC is supported then just issue it and return. The REC exchange will
1253 * we see if data was received recently. If it has been, we continue waiting, 1316 * complete or time out and recovery can continue at that point. Otherwise,
1254 * otherwise, we abort the command. 1317 * if the response has been received without all the data it has been
1318 * ER_TIMEOUT since the response was received. If the response has not been
1319 * received we see if data was received recently. If it has been then we
1320 * continue waiting, otherwise, we abort the command.
1255 */ 1321 */
1256static void fc_fcp_timeout(unsigned long data) 1322static void fc_fcp_timeout(unsigned long data)
1257{ 1323{
1258 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1324 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1259 struct fc_rport *rport = fsp->rport; 1325 struct fc_rport *rport = fsp->rport;
1260 struct fc_rport_libfc_priv *rp = rport->dd_data; 1326 struct fc_rport_libfc_priv *rpriv = rport->dd_data;
1261 1327
1262 if (fc_fcp_lock_pkt(fsp)) 1328 if (fc_fcp_lock_pkt(fsp))
1263 return; 1329 return;
@@ -1267,7 +1333,7 @@ static void fc_fcp_timeout(unsigned long data)
1267 1333
1268 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1334 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1269 1335
1270 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) 1336 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1271 fc_fcp_rec(fsp); 1337 fc_fcp_rec(fsp);
1272 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), 1338 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
1273 jiffies)) 1339 jiffies))
@@ -1281,39 +1347,40 @@ unlock:
1281 fc_fcp_unlock_pkt(fsp); 1347 fc_fcp_unlock_pkt(fsp);
1282} 1348}
1283 1349
1284/* 1350/**
1285 * Send a REC ELS request 1351 * fc_fcp_rec() - Send a REC ELS request
1352 * @fsp: The FCP packet to send the REC request on
1286 */ 1353 */
1287static void fc_fcp_rec(struct fc_fcp_pkt *fsp) 1354static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1288{ 1355{
1289 struct fc_lport *lp; 1356 struct fc_lport *lport;
1290 struct fc_frame *fp; 1357 struct fc_frame *fp;
1291 struct fc_rport *rport; 1358 struct fc_rport *rport;
1292 struct fc_rport_libfc_priv *rp; 1359 struct fc_rport_libfc_priv *rpriv;
1293 1360
1294 lp = fsp->lp; 1361 lport = fsp->lp;
1295 rport = fsp->rport; 1362 rport = fsp->rport;
1296 rp = rport->dd_data; 1363 rpriv = rport->dd_data;
1297 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { 1364 if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) {
1298 fsp->status_code = FC_HRD_ERROR; 1365 fsp->status_code = FC_HRD_ERROR;
1299 fsp->io_status = 0; 1366 fsp->io_status = 0;
1300 fc_fcp_complete_locked(fsp); 1367 fc_fcp_complete_locked(fsp);
1301 return; 1368 return;
1302 } 1369 }
1303 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); 1370 fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
1304 if (!fp) 1371 if (!fp)
1305 goto retry; 1372 goto retry;
1306 1373
1307 fr_seq(fp) = fsp->seq_ptr; 1374 fr_seq(fp) = fsp->seq_ptr;
1308 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1375 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1309 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, 1376 fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS,
1310 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1377 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1311 if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, 1378 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
1312 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1379 fc_fcp_rec_resp, fsp,
1380 jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1313 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1381 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1314 return; 1382 return;
1315 } 1383 }
1316 fc_frame_free(fp);
1317retry: 1384retry:
1318 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1385 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1319 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1386 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
@@ -1321,12 +1388,16 @@ retry:
1321 fc_timeout_error(fsp); 1388 fc_timeout_error(fsp);
1322} 1389}
1323 1390
1324/* 1391/**
1325 * Receive handler for REC ELS frame 1392 * fc_fcp_rec_resp() - Handler for REC ELS responses
1326 * if it is a reject then let the scsi layer to handle 1393 * @seq: The sequence the response is on
1327 * the timeout. if it is a LS_ACC then if the io was not completed 1394 * @fp: The response frame
1328 * then set the timeout and return otherwise complete the exchange 1395 * @arg: The FCP packet the response is on
1329 * and tell the scsi layer to restart the I/O. 1396 *
1397 * If the response is a reject then the scsi layer will handle
1398 * the timeout. If the response is a LS_ACC then if the I/O was not completed
1399 * set the timeout and return. If the I/O was completed then complete the
1400 * exchange and tell the SCSI layer.
1330 */ 1401 */
1331static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1402static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1332{ 1403{
@@ -1338,7 +1409,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1338 u32 offset; 1409 u32 offset;
1339 enum dma_data_direction data_dir; 1410 enum dma_data_direction data_dir;
1340 enum fc_rctl r_ctl; 1411 enum fc_rctl r_ctl;
1341 struct fc_rport_libfc_priv *rp; 1412 struct fc_rport_libfc_priv *rpriv;
1342 1413
1343 if (IS_ERR(fp)) { 1414 if (IS_ERR(fp)) {
1344 fc_fcp_rec_error(fsp, fp); 1415 fc_fcp_rec_error(fsp, fp);
@@ -1361,13 +1432,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1361 /* fall through */ 1432 /* fall through */
1362 case ELS_RJT_UNSUP: 1433 case ELS_RJT_UNSUP:
1363 FC_FCP_DBG(fsp, "device does not support REC\n"); 1434 FC_FCP_DBG(fsp, "device does not support REC\n");
1364 rp = fsp->rport->dd_data; 1435 rpriv = fsp->rport->dd_data;
1365 /* 1436 /*
1366 * if we do not spport RECs or got some bogus 1437 * if we do not spport RECs or got some bogus
1367 * reason then resetup timer so we check for 1438 * reason then resetup timer so we check for
1368 * making progress. 1439 * making progress.
1369 */ 1440 */
1370 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; 1441 rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1371 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1442 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1372 break; 1443 break;
1373 case ELS_RJT_LOGIC: 1444 case ELS_RJT_LOGIC:
@@ -1464,8 +1535,10 @@ out:
1464 fc_frame_free(fp); 1535 fc_frame_free(fp);
1465} 1536}
1466 1537
1467/* 1538/**
1468 * Handle error response or timeout for REC exchange. 1539 * fc_fcp_rec_error() - Handler for REC errors
1540 * @fsp: The FCP packet the error is on
1541 * @fp: The REC frame
1469 */ 1542 */
1470static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1543static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1471{ 1544{
@@ -1504,10 +1577,9 @@ out:
1504 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1577 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1505} 1578}
1506 1579
1507/* 1580/**
1508 * Time out error routine: 1581 * fc_timeout_error() - Handler for fcp_pkt timeouts
1509 * abort's the I/O close the exchange and 1582 * @fsp: The FCP packt that has timed out
1510 * send completion notification to scsi layer
1511 */ 1583 */
1512static void fc_timeout_error(struct fc_fcp_pkt *fsp) 1584static void fc_timeout_error(struct fc_fcp_pkt *fsp)
1513{ 1585{
@@ -1521,16 +1593,18 @@ static void fc_timeout_error(struct fc_fcp_pkt *fsp)
1521 fc_fcp_send_abort(fsp); 1593 fc_fcp_send_abort(fsp);
1522} 1594}
1523 1595
1524/* 1596/**
1525 * Sequence retransmission request. 1597 * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request)
1598 * @fsp: The FCP packet the SRR is to be sent on
1599 * @r_ctl: The R_CTL field for the SRR request
1526 * This is called after receiving status but insufficient data, or 1600 * This is called after receiving status but insufficient data, or
1527 * when expecting status but the request has timed out. 1601 * when expecting status but the request has timed out.
1528 */ 1602 */
1529static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) 1603static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1530{ 1604{
1531 struct fc_lport *lp = fsp->lp; 1605 struct fc_lport *lport = fsp->lp;
1532 struct fc_rport *rport; 1606 struct fc_rport *rport;
1533 struct fc_rport_libfc_priv *rp; 1607 struct fc_rport_libfc_priv *rpriv;
1534 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); 1608 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
1535 struct fc_seq *seq; 1609 struct fc_seq *seq;
1536 struct fcp_srr *srr; 1610 struct fcp_srr *srr;
@@ -1538,12 +1612,13 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1538 u8 cdb_op; 1612 u8 cdb_op;
1539 1613
1540 rport = fsp->rport; 1614 rport = fsp->rport;
1541 rp = rport->dd_data; 1615 rpriv = rport->dd_data;
1542 cdb_op = fsp->cdb_cmd.fc_cdb[0]; 1616 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1543 1617
1544 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) 1618 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
1619 rpriv->rp_state != RPORT_ST_READY)
1545 goto retry; /* shouldn't happen */ 1620 goto retry; /* shouldn't happen */
1546 fp = fc_frame_alloc(lp, sizeof(*srr)); 1621 fp = fc_fcp_frame_alloc(lport, sizeof(*srr));
1547 if (!fp) 1622 if (!fp)
1548 goto retry; 1623 goto retry;
1549 1624
@@ -1556,15 +1631,14 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1556 srr->srr_rel_off = htonl(offset); 1631 srr->srr_rel_off = htonl(offset);
1557 1632
1558 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1633 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1559 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1634 fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
1560 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1635 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1561 1636
1562 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, 1637 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
1563 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); 1638 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
1564 if (!seq) { 1639 if (!seq)
1565 fc_frame_free(fp);
1566 goto retry; 1640 goto retry;
1567 } 1641
1568 fsp->recov_seq = seq; 1642 fsp->recov_seq = seq;
1569 fsp->xfer_len = offset; 1643 fsp->xfer_len = offset;
1570 fsp->xfer_contig_end = offset; 1644 fsp->xfer_contig_end = offset;
@@ -1575,8 +1649,11 @@ retry:
1575 fc_fcp_retry_cmd(fsp); 1649 fc_fcp_retry_cmd(fsp);
1576} 1650}
1577 1651
1578/* 1652/**
1579 * Handle response from SRR. 1653 * fc_fcp_srr_resp() - Handler for SRR response
1654 * @seq: The sequence the SRR is on
1655 * @fp: The SRR frame
1656 * @arg: The FCP packet the SRR is on
1580 */ 1657 */
1581static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1658static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1582{ 1659{
@@ -1622,6 +1699,11 @@ out:
1622 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1699 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1623} 1700}
1624 1701
1702/**
1703 * fc_fcp_srr_error() - Handler for SRR errors
1704 * @fsp: The FCP packet that the SRR error is on
1705 * @fp: The SRR frame
1706 */
1625static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1707static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1626{ 1708{
1627 if (fc_fcp_lock_pkt(fsp)) 1709 if (fc_fcp_lock_pkt(fsp))
@@ -1646,31 +1728,37 @@ out:
1646 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1728 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1647} 1729}
1648 1730
1649static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) 1731/**
1732 * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready
1733 * @lport: The local port to be checked
1734 */
1735static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
1650{ 1736{
1651 /* lock ? */ 1737 /* lock ? */
1652 return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; 1738 return (lport->state == LPORT_ST_READY) &&
1739 lport->link_up && !lport->qfull;
1653} 1740}
1654 1741
1655/** 1742/**
1656 * fc_queuecommand - The queuecommand function of the scsi template 1743 * fc_queuecommand() - The queuecommand function of the SCSI template
1657 * @cmd: struct scsi_cmnd to be executed 1744 * @cmd: The scsi_cmnd to be executed
1658 * @done: Callback function to be called when cmd is completed 1745 * @done: The callback function to be called when the scsi_cmnd is complete
1659 * 1746 *
1660 * this is the i/o strategy routine, called by the scsi layer 1747 * This is the i/o strategy routine, called by the SCSI layer. This routine
1661 * this routine is called with holding the host_lock. 1748 * is called with the host_lock held.
1662 */ 1749 */
1663int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) 1750int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1664{ 1751{
1665 struct fc_lport *lp; 1752 struct fc_lport *lport;
1666 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1753 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1667 struct fc_fcp_pkt *fsp; 1754 struct fc_fcp_pkt *fsp;
1668 struct fc_rport_libfc_priv *rp; 1755 struct fc_rport_libfc_priv *rpriv;
1669 int rval; 1756 int rval;
1670 int rc = 0; 1757 int rc = 0;
1671 struct fcoe_dev_stats *stats; 1758 struct fcoe_dev_stats *stats;
1672 1759
1673 lp = shost_priv(sc_cmd->device->host); 1760 lport = shost_priv(sc_cmd->device->host);
1761 spin_unlock_irq(lport->host->host_lock);
1674 1762
1675 rval = fc_remote_port_chkready(rport); 1763 rval = fc_remote_port_chkready(rport);
1676 if (rval) { 1764 if (rval) {
@@ -1689,14 +1777,16 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1689 goto out; 1777 goto out;
1690 } 1778 }
1691 1779
1692 rp = rport->dd_data; 1780 rpriv = rport->dd_data;
1693 1781
1694 if (!fc_fcp_lport_queue_ready(lp)) { 1782 if (!fc_fcp_lport_queue_ready(lport)) {
1783 if (lport->qfull)
1784 fc_fcp_can_queue_ramp_down(lport);
1695 rc = SCSI_MLQUEUE_HOST_BUSY; 1785 rc = SCSI_MLQUEUE_HOST_BUSY;
1696 goto out; 1786 goto out;
1697 } 1787 }
1698 1788
1699 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); 1789 fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC);
1700 if (fsp == NULL) { 1790 if (fsp == NULL) {
1701 rc = SCSI_MLQUEUE_HOST_BUSY; 1791 rc = SCSI_MLQUEUE_HOST_BUSY;
1702 goto out; 1792 goto out;
@@ -1706,8 +1796,9 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1706 * build the libfc request pkt 1796 * build the libfc request pkt
1707 */ 1797 */
1708 fsp->cmd = sc_cmd; /* save the cmd */ 1798 fsp->cmd = sc_cmd; /* save the cmd */
1709 fsp->lp = lp; /* save the softc ptr */ 1799 fsp->lp = lport; /* save the softc ptr */
1710 fsp->rport = rport; /* set the remote port ptr */ 1800 fsp->rport = rport; /* set the remote port ptr */
1801 fsp->xfer_ddp = FC_XID_UNKNOWN;
1711 sc_cmd->scsi_done = done; 1802 sc_cmd->scsi_done = done;
1712 1803
1713 /* 1804 /*
@@ -1719,7 +1810,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1719 /* 1810 /*
1720 * setup the data direction 1811 * setup the data direction
1721 */ 1812 */
1722 stats = fc_lport_get_stats(lp); 1813 stats = fc_lport_get_stats(lport);
1723 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1814 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1724 fsp->req_flags = FC_SRB_READ; 1815 fsp->req_flags = FC_SRB_READ;
1725 stats->InputRequests++; 1816 stats->InputRequests++;
@@ -1733,7 +1824,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1733 stats->ControlRequests++; 1824 stats->ControlRequests++;
1734 } 1825 }
1735 1826
1736 fsp->tgt_flags = rp->flags; 1827 fsp->tgt_flags = rpriv->flags;
1737 1828
1738 init_timer(&fsp->timer); 1829 init_timer(&fsp->timer);
1739 fsp->timer.data = (unsigned long)fsp; 1830 fsp->timer.data = (unsigned long)fsp;
@@ -1743,30 +1834,30 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1743 * if we get -1 return then put the request in the pending 1834 * if we get -1 return then put the request in the pending
1744 * queue. 1835 * queue.
1745 */ 1836 */
1746 rval = fc_fcp_pkt_send(lp, fsp); 1837 rval = fc_fcp_pkt_send(lport, fsp);
1747 if (rval != 0) { 1838 if (rval != 0) {
1748 fsp->state = FC_SRB_FREE; 1839 fsp->state = FC_SRB_FREE;
1749 fc_fcp_pkt_release(fsp); 1840 fc_fcp_pkt_release(fsp);
1750 rc = SCSI_MLQUEUE_HOST_BUSY; 1841 rc = SCSI_MLQUEUE_HOST_BUSY;
1751 } 1842 }
1752out: 1843out:
1844 spin_lock_irq(lport->host->host_lock);
1753 return rc; 1845 return rc;
1754} 1846}
1755EXPORT_SYMBOL(fc_queuecommand); 1847EXPORT_SYMBOL(fc_queuecommand);
1756 1848
1757/** 1849/**
1758 * fc_io_compl() - Handle responses for completed commands 1850 * fc_io_compl() - Handle responses for completed commands
1759 * @fsp: scsi packet 1851 * @fsp: The FCP packet that is complete
1760 *
1761 * Translates a error to a Linux SCSI error.
1762 * 1852 *
1853 * Translates fcp_pkt errors to a Linux SCSI errors.
1763 * The fcp packet lock must be held when calling. 1854 * The fcp packet lock must be held when calling.
1764 */ 1855 */
1765static void fc_io_compl(struct fc_fcp_pkt *fsp) 1856static void fc_io_compl(struct fc_fcp_pkt *fsp)
1766{ 1857{
1767 struct fc_fcp_internal *si; 1858 struct fc_fcp_internal *si;
1768 struct scsi_cmnd *sc_cmd; 1859 struct scsi_cmnd *sc_cmd;
1769 struct fc_lport *lp; 1860 struct fc_lport *lport;
1770 unsigned long flags; 1861 unsigned long flags;
1771 1862
1772 /* release outstanding ddp context */ 1863 /* release outstanding ddp context */
@@ -1779,30 +1870,23 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1779 spin_lock_bh(&fsp->scsi_pkt_lock); 1870 spin_lock_bh(&fsp->scsi_pkt_lock);
1780 } 1871 }
1781 1872
1782 lp = fsp->lp; 1873 lport = fsp->lp;
1783 si = fc_get_scsi_internal(lp); 1874 si = fc_get_scsi_internal(lport);
1784 spin_lock_irqsave(lp->host->host_lock, flags); 1875 if (!fsp->cmd)
1785 if (!fsp->cmd) {
1786 spin_unlock_irqrestore(lp->host->host_lock, flags);
1787 return; 1876 return;
1788 }
1789 1877
1790 /* 1878 /*
1791 * if a command timed out while we had to try and throttle IO 1879 * if can_queue ramp down is done then try can_queue ramp up
1792 * and it is now getting cleaned up, then we are about to 1880 * since commands are completing now.
1793 * try again so clear the throttled flag incase we get more
1794 * time outs.
1795 */ 1881 */
1796 if (si->throttled && fsp->state & FC_SRB_NOMEM) 1882 if (si->last_can_queue_ramp_down_time)
1797 si->throttled = 0; 1883 fc_fcp_can_queue_ramp_up(lport);
1798 1884
1799 sc_cmd = fsp->cmd; 1885 sc_cmd = fsp->cmd;
1800 fsp->cmd = NULL; 1886 fsp->cmd = NULL;
1801 1887
1802 if (!sc_cmd->SCp.ptr) { 1888 if (!sc_cmd->SCp.ptr)
1803 spin_unlock_irqrestore(lp->host->host_lock, flags);
1804 return; 1889 return;
1805 }
1806 1890
1807 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1891 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1808 switch (fsp->status_code) { 1892 switch (fsp->status_code) {
@@ -1814,21 +1898,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1814 sc_cmd->result = DID_OK << 16; 1898 sc_cmd->result = DID_OK << 16;
1815 if (fsp->scsi_resid) 1899 if (fsp->scsi_resid)
1816 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1900 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1817 } else if (fsp->cdb_status == QUEUE_FULL) {
1818 struct scsi_device *tmp_sdev;
1819 struct scsi_device *sdev = sc_cmd->device;
1820
1821 shost_for_each_device(tmp_sdev, sdev->host) {
1822 if (tmp_sdev->id != sdev->id)
1823 continue;
1824
1825 if (tmp_sdev->queue_depth > 1) {
1826 scsi_track_queue_full(tmp_sdev,
1827 tmp_sdev->
1828 queue_depth - 1);
1829 }
1830 }
1831 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1832 } else { 1901 } else {
1833 /* 1902 /*
1834 * transport level I/O was ok but scsi 1903 * transport level I/O was ok but scsi
@@ -1846,7 +1915,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1846 * scsi status is good but transport level 1915 * scsi status is good but transport level
1847 * underrun. 1916 * underrun.
1848 */ 1917 */
1849 sc_cmd->result = DID_OK << 16; 1918 sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ?
1919 DID_OK : DID_ERROR) << 16;
1850 } else { 1920 } else {
1851 /* 1921 /*
1852 * scsi got underrun, this is an error 1922 * scsi got underrun, this is an error
@@ -1878,63 +1948,46 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1878 break; 1948 break;
1879 } 1949 }
1880 1950
1951 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1881 list_del(&fsp->list); 1952 list_del(&fsp->list);
1953 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1882 sc_cmd->SCp.ptr = NULL; 1954 sc_cmd->SCp.ptr = NULL;
1883 sc_cmd->scsi_done(sc_cmd); 1955 sc_cmd->scsi_done(sc_cmd);
1884 spin_unlock_irqrestore(lp->host->host_lock, flags);
1885 1956
1886 /* release ref from initial allocation in queue command */ 1957 /* release ref from initial allocation in queue command */
1887 fc_fcp_pkt_release(fsp); 1958 fc_fcp_pkt_release(fsp);
1888} 1959}
1889 1960
1890/** 1961/**
1891 * fc_fcp_complete() - complete processing of a fcp packet
1892 * @fsp: fcp packet
1893 *
1894 * This function may sleep if a fsp timer is pending.
1895 * The host lock must not be held by caller.
1896 */
1897void fc_fcp_complete(struct fc_fcp_pkt *fsp)
1898{
1899 if (fc_fcp_lock_pkt(fsp))
1900 return;
1901
1902 fc_fcp_complete_locked(fsp);
1903 fc_fcp_unlock_pkt(fsp);
1904}
1905EXPORT_SYMBOL(fc_fcp_complete);
1906
1907/**
1908 * fc_eh_abort() - Abort a command 1962 * fc_eh_abort() - Abort a command
1909 * @sc_cmd: scsi command to abort 1963 * @sc_cmd: The SCSI command to abort
1910 * 1964 *
1911 * From scsi host template. 1965 * From SCSI host template.
1912 * send ABTS to the target device and wait for the response 1966 * Send an ABTS to the target device and wait for the response.
1913 * sc_cmd is the pointer to the command to be aborted.
1914 */ 1967 */
1915int fc_eh_abort(struct scsi_cmnd *sc_cmd) 1968int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1916{ 1969{
1917 struct fc_fcp_pkt *fsp; 1970 struct fc_fcp_pkt *fsp;
1918 struct fc_lport *lp; 1971 struct fc_lport *lport;
1919 int rc = FAILED; 1972 int rc = FAILED;
1920 unsigned long flags; 1973 unsigned long flags;
1921 1974
1922 lp = shost_priv(sc_cmd->device->host); 1975 lport = shost_priv(sc_cmd->device->host);
1923 if (lp->state != LPORT_ST_READY) 1976 if (lport->state != LPORT_ST_READY)
1924 return rc; 1977 return rc;
1925 else if (!lp->link_up) 1978 else if (!lport->link_up)
1926 return rc; 1979 return rc;
1927 1980
1928 spin_lock_irqsave(lp->host->host_lock, flags); 1981 spin_lock_irqsave(lport->host->host_lock, flags);
1929 fsp = CMD_SP(sc_cmd); 1982 fsp = CMD_SP(sc_cmd);
1930 if (!fsp) { 1983 if (!fsp) {
1931 /* command completed while scsi eh was setting up */ 1984 /* command completed while scsi eh was setting up */
1932 spin_unlock_irqrestore(lp->host->host_lock, flags); 1985 spin_unlock_irqrestore(lport->host->host_lock, flags);
1933 return SUCCESS; 1986 return SUCCESS;
1934 } 1987 }
1935 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 1988 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1936 fc_fcp_pkt_hold(fsp); 1989 fc_fcp_pkt_hold(fsp);
1937 spin_unlock_irqrestore(lp->host->host_lock, flags); 1990 spin_unlock_irqrestore(lport->host->host_lock, flags);
1938 1991
1939 if (fc_fcp_lock_pkt(fsp)) { 1992 if (fc_fcp_lock_pkt(fsp)) {
1940 /* completed while we were waiting for timer to be deleted */ 1993 /* completed while we were waiting for timer to be deleted */
@@ -1942,7 +1995,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1942 goto release_pkt; 1995 goto release_pkt;
1943 } 1996 }
1944 1997
1945 rc = fc_fcp_pkt_abort(lp, fsp); 1998 rc = fc_fcp_pkt_abort(fsp);
1946 fc_fcp_unlock_pkt(fsp); 1999 fc_fcp_unlock_pkt(fsp);
1947 2000
1948release_pkt: 2001release_pkt:
@@ -1952,37 +2005,34 @@ release_pkt:
1952EXPORT_SYMBOL(fc_eh_abort); 2005EXPORT_SYMBOL(fc_eh_abort);
1953 2006
1954/** 2007/**
1955 * fc_eh_device_reset() Reset a single LUN 2008 * fc_eh_device_reset() - Reset a single LUN
1956 * @sc_cmd: scsi command 2009 * @sc_cmd: The SCSI command which identifies the device whose
2010 * LUN is to be reset
1957 * 2011 *
1958 * Set from scsi host template to send tm cmd to the target and wait for the 2012 * Set from SCSI host template.
1959 * response.
1960 */ 2013 */
1961int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 2014int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1962{ 2015{
1963 struct fc_lport *lp; 2016 struct fc_lport *lport;
1964 struct fc_fcp_pkt *fsp; 2017 struct fc_fcp_pkt *fsp;
1965 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 2018 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1966 int rc = FAILED; 2019 int rc = FAILED;
1967 struct fc_rport_libfc_priv *rp;
1968 int rval; 2020 int rval;
1969 2021
1970 rval = fc_remote_port_chkready(rport); 2022 rval = fc_remote_port_chkready(rport);
1971 if (rval) 2023 if (rval)
1972 goto out; 2024 goto out;
1973 2025
1974 rp = rport->dd_data; 2026 lport = shost_priv(sc_cmd->device->host);
1975 lp = shost_priv(sc_cmd->device->host);
1976 2027
1977 if (lp->state != LPORT_ST_READY) 2028 if (lport->state != LPORT_ST_READY)
1978 return rc; 2029 return rc;
1979 2030
1980 FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); 2031 FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id);
1981 2032
1982 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 2033 fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
1983 if (fsp == NULL) { 2034 if (fsp == NULL) {
1984 printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); 2035 printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
1985 sc_cmd->result = DID_NO_CONNECT << 16;
1986 goto out; 2036 goto out;
1987 } 2037 }
1988 2038
@@ -1991,13 +2041,13 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1991 * the sc passed in is not setup for execution like when sent 2041 * the sc passed in is not setup for execution like when sent
1992 * through the queuecommand callout. 2042 * through the queuecommand callout.
1993 */ 2043 */
1994 fsp->lp = lp; /* save the softc ptr */ 2044 fsp->lp = lport; /* save the softc ptr */
1995 fsp->rport = rport; /* set the remote port ptr */ 2045 fsp->rport = rport; /* set the remote port ptr */
1996 2046
1997 /* 2047 /*
1998 * flush outstanding commands 2048 * flush outstanding commands
1999 */ 2049 */
2000 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); 2050 rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
2001 fsp->state = FC_SRB_FREE; 2051 fsp->state = FC_SRB_FREE;
2002 fc_fcp_pkt_release(fsp); 2052 fc_fcp_pkt_release(fsp);
2003 2053
@@ -2007,38 +2057,39 @@ out:
2007EXPORT_SYMBOL(fc_eh_device_reset); 2057EXPORT_SYMBOL(fc_eh_device_reset);
2008 2058
2009/** 2059/**
2010 * fc_eh_host_reset() - The reset function will reset the ports on the host. 2060 * fc_eh_host_reset() - Reset a Scsi_Host.
2011 * @sc_cmd: scsi command 2061 * @sc_cmd: The SCSI command that identifies the SCSI host to be reset
2012 */ 2062 */
2013int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) 2063int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
2014{ 2064{
2015 struct Scsi_Host *shost = sc_cmd->device->host; 2065 struct Scsi_Host *shost = sc_cmd->device->host;
2016 struct fc_lport *lp = shost_priv(shost); 2066 struct fc_lport *lport = shost_priv(shost);
2017 unsigned long wait_tmo; 2067 unsigned long wait_tmo;
2018 2068
2019 FC_SCSI_DBG(lp, "Resetting host\n"); 2069 FC_SCSI_DBG(lport, "Resetting host\n");
2020 2070
2021 lp->tt.lport_reset(lp); 2071 lport->tt.lport_reset(lport);
2022 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2072 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
2023 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 2073 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
2074 wait_tmo))
2024 msleep(1000); 2075 msleep(1000);
2025 2076
2026 if (fc_fcp_lport_queue_ready(lp)) { 2077 if (fc_fcp_lport_queue_ready(lport)) {
2027 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " 2078 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
2028 "on port (%6x)\n", fc_host_port_id(lp->host)); 2079 "on port (%6x)\n", fc_host_port_id(lport->host));
2029 return SUCCESS; 2080 return SUCCESS;
2030 } else { 2081 } else {
2031 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " 2082 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
2032 "port (%6x) is not ready.\n", 2083 "port (%6x) is not ready.\n",
2033 fc_host_port_id(lp->host)); 2084 fc_host_port_id(lport->host));
2034 return FAILED; 2085 return FAILED;
2035 } 2086 }
2036} 2087}
2037EXPORT_SYMBOL(fc_eh_host_reset); 2088EXPORT_SYMBOL(fc_eh_host_reset);
2038 2089
2039/** 2090/**
2040 * fc_slave_alloc() - configure queue depth 2091 * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
2041 * @sdev: scsi device 2092 * @sdev: The SCSI device that identifies the SCSI host
2042 * 2093 *
2043 * Configures queue depth based on host's cmd_per_len. If not set 2094 * Configures queue depth based on host's cmd_per_len. If not set
2044 * then we use the libfc default. 2095 * then we use the libfc default.
@@ -2046,29 +2097,50 @@ EXPORT_SYMBOL(fc_eh_host_reset);
2046int fc_slave_alloc(struct scsi_device *sdev) 2097int fc_slave_alloc(struct scsi_device *sdev)
2047{ 2098{
2048 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2099 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2049 int queue_depth;
2050 2100
2051 if (!rport || fc_remote_port_chkready(rport)) 2101 if (!rport || fc_remote_port_chkready(rport))
2052 return -ENXIO; 2102 return -ENXIO;
2053 2103
2054 if (sdev->tagged_supported) { 2104 if (sdev->tagged_supported)
2055 if (sdev->host->hostt->cmd_per_lun) 2105 scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
2056 queue_depth = sdev->host->hostt->cmd_per_lun; 2106 else
2057 else 2107 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
2058 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; 2108 FC_FCP_DFLT_QUEUE_DEPTH);
2059 scsi_activate_tcq(sdev, queue_depth); 2109
2060 }
2061 return 0; 2110 return 0;
2062} 2111}
2063EXPORT_SYMBOL(fc_slave_alloc); 2112EXPORT_SYMBOL(fc_slave_alloc);
2064 2113
2065int fc_change_queue_depth(struct scsi_device *sdev, int qdepth) 2114/**
2115 * fc_change_queue_depth() - Change a device's queue depth
2116 * @sdev: The SCSI device whose queue depth is to change
2117 * @qdepth: The new queue depth
2118 * @reason: The resason for the change
2119 */
2120int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2066{ 2121{
2067 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2122 switch (reason) {
2123 case SCSI_QDEPTH_DEFAULT:
2124 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2125 break;
2126 case SCSI_QDEPTH_QFULL:
2127 scsi_track_queue_full(sdev, qdepth);
2128 break;
2129 case SCSI_QDEPTH_RAMP_UP:
2130 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2131 break;
2132 default:
2133 return -EOPNOTSUPP;
2134 }
2068 return sdev->queue_depth; 2135 return sdev->queue_depth;
2069} 2136}
2070EXPORT_SYMBOL(fc_change_queue_depth); 2137EXPORT_SYMBOL(fc_change_queue_depth);
2071 2138
2139/**
2140 * fc_change_queue_type() - Change a device's queue type
2141 * @sdev: The SCSI device whose queue depth is to change
2142 * @tag_type: Identifier for queue type
2143 */
2072int fc_change_queue_type(struct scsi_device *sdev, int tag_type) 2144int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2073{ 2145{
2074 if (sdev->tagged_supported) { 2146 if (sdev->tagged_supported) {
@@ -2084,39 +2156,71 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2084} 2156}
2085EXPORT_SYMBOL(fc_change_queue_type); 2157EXPORT_SYMBOL(fc_change_queue_type);
2086 2158
2087void fc_fcp_destroy(struct fc_lport *lp) 2159/**
2160 * fc_fcp_destory() - Tear down the FCP layer for a given local port
2161 * @lport: The local port that no longer needs the FCP layer
2162 */
2163void fc_fcp_destroy(struct fc_lport *lport)
2088{ 2164{
2089 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2165 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
2090 2166
2091 if (!list_empty(&si->scsi_pkt_queue)) 2167 if (!list_empty(&si->scsi_pkt_queue))
2092 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " 2168 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
2093 "port (%6x)\n", fc_host_port_id(lp->host)); 2169 "port (%6x)\n", fc_host_port_id(lport->host));
2094 2170
2095 mempool_destroy(si->scsi_pkt_pool); 2171 mempool_destroy(si->scsi_pkt_pool);
2096 kfree(si); 2172 kfree(si);
2097 lp->scsi_priv = NULL; 2173 lport->scsi_priv = NULL;
2098} 2174}
2099EXPORT_SYMBOL(fc_fcp_destroy); 2175EXPORT_SYMBOL(fc_fcp_destroy);
2100 2176
2101int fc_fcp_init(struct fc_lport *lp) 2177int fc_setup_fcp()
2178{
2179 int rc = 0;
2180
2181 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
2182 sizeof(struct fc_fcp_pkt),
2183 0, SLAB_HWCACHE_ALIGN, NULL);
2184 if (!scsi_pkt_cachep) {
2185 printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
2186 "module load failed!");
2187 rc = -ENOMEM;
2188 }
2189
2190 return rc;
2191}
2192
2193void fc_destroy_fcp()
2194{
2195 if (scsi_pkt_cachep)
2196 kmem_cache_destroy(scsi_pkt_cachep);
2197}
2198
2199/**
2200 * fc_fcp_init() - Initialize the FCP layer for a local port
2201 * @lport: The local port to initialize the exchange layer for
2202 */
2203int fc_fcp_init(struct fc_lport *lport)
2102{ 2204{
2103 int rc; 2205 int rc;
2104 struct fc_fcp_internal *si; 2206 struct fc_fcp_internal *si;
2105 2207
2106 if (!lp->tt.fcp_cmd_send) 2208 if (!lport->tt.fcp_cmd_send)
2107 lp->tt.fcp_cmd_send = fc_fcp_cmd_send; 2209 lport->tt.fcp_cmd_send = fc_fcp_cmd_send;
2108 2210
2109 if (!lp->tt.fcp_cleanup) 2211 if (!lport->tt.fcp_cleanup)
2110 lp->tt.fcp_cleanup = fc_fcp_cleanup; 2212 lport->tt.fcp_cleanup = fc_fcp_cleanup;
2111 2213
2112 if (!lp->tt.fcp_abort_io) 2214 if (!lport->tt.fcp_abort_io)
2113 lp->tt.fcp_abort_io = fc_fcp_abort_io; 2215 lport->tt.fcp_abort_io = fc_fcp_abort_io;
2114 2216
2115 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); 2217 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
2116 if (!si) 2218 if (!si)
2117 return -ENOMEM; 2219 return -ENOMEM;
2118 lp->scsi_priv = si; 2220 lport->scsi_priv = si;
2221 si->max_can_queue = lport->host->can_queue;
2119 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2222 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2223 spin_lock_init(&si->scsi_queue_lock);
2120 2224
2121 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2225 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2122 if (!si->scsi_pkt_pool) { 2226 if (!si->scsi_pkt_pool) {
@@ -2130,42 +2234,3 @@ free_internal:
2130 return rc; 2234 return rc;
2131} 2235}
2132EXPORT_SYMBOL(fc_fcp_init); 2236EXPORT_SYMBOL(fc_fcp_init);
2133
2134static int __init libfc_init(void)
2135{
2136 int rc;
2137
2138 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
2139 sizeof(struct fc_fcp_pkt),
2140 0, SLAB_HWCACHE_ALIGN, NULL);
2141 if (scsi_pkt_cachep == NULL) {
2142 printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
2143 "module load failed!");
2144 return -ENOMEM;
2145 }
2146
2147 rc = fc_setup_exch_mgr();
2148 if (rc)
2149 goto destroy_pkt_cache;
2150
2151 rc = fc_setup_rport();
2152 if (rc)
2153 goto destroy_em;
2154
2155 return rc;
2156destroy_em:
2157 fc_destroy_exch_mgr();
2158destroy_pkt_cache:
2159 kmem_cache_destroy(scsi_pkt_cachep);
2160 return rc;
2161}
2162
2163static void __exit libfc_exit(void)
2164{
2165 kmem_cache_destroy(scsi_pkt_cachep);
2166 fc_destroy_exch_mgr();
2167 fc_destroy_rport();
2168}
2169
2170module_init(libfc_init);
2171module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
index 63fe00cfe667..981329a17c48 100644
--- a/drivers/scsi/libfc/fc_frame.c
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <linux/crc32.h> 26#include <linux/crc32.h>
27#include <linux/gfp.h>
27 28
28#include <scsi/fc_frame.h> 29#include <scsi/fc_frame.h>
29 30
@@ -51,24 +52,24 @@ EXPORT_SYMBOL(fc_frame_crc_check);
51 * Allocate a frame intended to be sent via fcoe_xmit. 52 * Allocate a frame intended to be sent via fcoe_xmit.
52 * Get an sk_buff for the frame and set the length. 53 * Get an sk_buff for the frame and set the length.
53 */ 54 */
54struct fc_frame *__fc_frame_alloc(size_t len) 55struct fc_frame *_fc_frame_alloc(size_t len)
55{ 56{
56 struct fc_frame *fp; 57 struct fc_frame *fp;
57 struct sk_buff *skb; 58 struct sk_buff *skb;
58 59
59 WARN_ON((len % sizeof(u32)) != 0); 60 WARN_ON((len % sizeof(u32)) != 0);
60 len += sizeof(struct fc_frame_header); 61 len += sizeof(struct fc_frame_header);
61 skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM); 62 skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM +
63 NET_SKB_PAD, GFP_ATOMIC);
62 if (!skb) 64 if (!skb)
63 return NULL; 65 return NULL;
66 skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM);
64 fp = (struct fc_frame *) skb; 67 fp = (struct fc_frame *) skb;
65 fc_frame_init(fp); 68 fc_frame_init(fp);
66 skb_reserve(skb, FC_FRAME_HEADROOM);
67 skb_put(skb, len); 69 skb_put(skb, len);
68 return fp; 70 return fp;
69} 71}
70EXPORT_SYMBOL(__fc_frame_alloc); 72EXPORT_SYMBOL(_fc_frame_alloc);
71
72 73
73struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) 74struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
74{ 75{
@@ -78,7 +79,7 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
78 fill = payload_len % 4; 79 fill = payload_len % 4;
79 if (fill != 0) 80 if (fill != 0)
80 fill = 4 - fill; 81 fill = 4 - fill;
81 fp = __fc_frame_alloc(payload_len + fill); 82 fp = _fc_frame_alloc(payload_len + fill);
82 if (fp) { 83 if (fp) {
83 memset((char *) fr_hdr(fp) + payload_len, 0, fill); 84 memset((char *) fr_hdr(fp) + payload_len, 0, fill);
84 /* trim is OK, we just allocated it so there are no fragments */ 85 /* trim is OK, we just allocated it so there are no fragments */
@@ -87,3 +88,4 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
87 } 88 }
88 return fp; 89 return fp;
89} 90}
91EXPORT_SYMBOL(fc_frame_alloc_fill);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
new file mode 100644
index 000000000000..39f4b6ab04b4
--- /dev/null
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -0,0 +1,134 @@
1/*
2 * Copyright(c) 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/scatterlist.h>
23#include <linux/crc32.h>
24
25#include <scsi/libfc.h>
26
27#include "fc_libfc.h"
28
29MODULE_AUTHOR("Open-FCoE.org");
30MODULE_DESCRIPTION("libfc");
31MODULE_LICENSE("GPL v2");
32
33unsigned int fc_debug_logging;
34module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
35MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
36
37/**
38 * libfc_init() - Initialize libfc.ko
39 */
40static int __init libfc_init(void)
41{
42 int rc = 0;
43
44 rc = fc_setup_fcp();
45 if (rc)
46 return rc;
47
48 rc = fc_setup_exch_mgr();
49 if (rc)
50 goto destroy_pkt_cache;
51
52 rc = fc_setup_rport();
53 if (rc)
54 goto destroy_em;
55
56 return rc;
57destroy_em:
58 fc_destroy_exch_mgr();
59destroy_pkt_cache:
60 fc_destroy_fcp();
61 return rc;
62}
63module_init(libfc_init);
64
65/**
66 * libfc_exit() - Tear down libfc.ko
67 */
68static void __exit libfc_exit(void)
69{
70 fc_destroy_fcp();
71 fc_destroy_exch_mgr();
72 fc_destroy_rport();
73}
74module_exit(libfc_exit);
75
76/**
77 * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer
78 * into a scatter-gather list (SG list).
79 *
80 * @buf: pointer to the data buffer.
81 * @len: the byte-length of the data buffer.
82 * @sg: pointer to the pointer of the SG list.
83 * @nents: pointer to the remaining number of entries in the SG list.
84 * @offset: pointer to the current offset in the SG list.
85 * @km_type: dedicated page table slot type for kmap_atomic.
86 * @crc: pointer to the 32-bit crc value.
87 * If crc is NULL, CRC is not calculated.
88 */
89u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
90 struct scatterlist *sg,
91 u32 *nents, size_t *offset,
92 enum km_type km_type, u32 *crc)
93{
94 size_t remaining = len;
95 u32 copy_len = 0;
96
97 while (remaining > 0 && sg) {
98 size_t off, sg_bytes;
99 void *page_addr;
100
101 if (*offset >= sg->length) {
102 /*
103 * Check for end and drop resources
104 * from the last iteration.
105 */
106 if (!(*nents))
107 break;
108 --(*nents);
109 *offset -= sg->length;
110 sg = sg_next(sg);
111 continue;
112 }
113 sg_bytes = min(remaining, sg->length - *offset);
114
115 /*
116 * The scatterlist item may be bigger than PAGE_SIZE,
117 * but we are limited to mapping PAGE_SIZE at a time.
118 */
119 off = *offset + sg->offset;
120 sg_bytes = min(sg_bytes,
121 (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
122 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
123 km_type);
124 if (crc)
125 *crc = crc32(*crc, buf, sg_bytes);
126 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
127 kunmap_atomic(page_addr, km_type);
128 buf += sg_bytes;
129 *offset += sg_bytes;
130 remaining -= sg_bytes;
131 copy_len += sg_bytes;
132 }
133 return copy_len;
134}
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
new file mode 100644
index 000000000000..741fd5c72e13
--- /dev/null
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright(c) 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#ifndef _FC_LIBFC_H_
21#define _FC_LIBFC_H_
22
23#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */
24#define FC_LPORT_LOGGING 0x02 /* lport layer logging */
25#define FC_DISC_LOGGING 0x04 /* discovery layer logging */
26#define FC_RPORT_LOGGING 0x08 /* rport layer logging */
27#define FC_FCP_LOGGING 0x10 /* I/O path logging */
28#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */
29#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */
30#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */
31
32extern unsigned int fc_debug_logging;
33
34#define FC_CHECK_LOGGING(LEVEL, CMD) \
35 do { \
36 if (unlikely(fc_debug_logging & LEVEL)) \
37 do { \
38 CMD; \
39 } while (0); \
40 } while (0)
41
42#define FC_LIBFC_DBG(fmt, args...) \
43 FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \
44 printk(KERN_INFO "libfc: " fmt, ##args))
45
46#define FC_LPORT_DBG(lport, fmt, args...) \
47 FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
48 printk(KERN_INFO "host%u: lport %6x: " fmt, \
49 (lport)->host->host_no, \
50 fc_host_port_id((lport)->host), ##args))
51
52#define FC_DISC_DBG(disc, fmt, args...) \
53 FC_CHECK_LOGGING(FC_DISC_LOGGING, \
54 printk(KERN_INFO "host%u: disc: " fmt, \
55 (disc)->lport->host->host_no, \
56 ##args))
57
58#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
59 FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
60 printk(KERN_INFO "host%u: rport %6x: " fmt, \
61 (lport)->host->host_no, \
62 (port_id), ##args))
63
64#define FC_RPORT_DBG(rdata, fmt, args...) \
65 FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
66
67#define FC_FCP_DBG(pkt, fmt, args...) \
68 FC_CHECK_LOGGING(FC_FCP_LOGGING, \
69 printk(KERN_INFO "host%u: fcp: %6x: " fmt, \
70 (pkt)->lp->host->host_no, \
71 pkt->rport->port_id, ##args))
72
73#define FC_EXCH_DBG(exch, fmt, args...) \
74 FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
75 printk(KERN_INFO "host%u: xid %4x: " fmt, \
76 (exch)->lp->host->host_no, \
77 exch->xid, ##args))
78
79#define FC_SCSI_DBG(lport, fmt, args...) \
80 FC_CHECK_LOGGING(FC_SCSI_LOGGING, \
81 printk(KERN_INFO "host%u: scsi: " fmt, \
82 (lport)->host->host_no, ##args))
83
84/*
85 * Set up direct-data placement for this I/O request
86 */
87void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
88
89/*
90 * Module setup functions
91 */
92int fc_setup_exch_mgr(void);
93void fc_destroy_exch_mgr(void);
94int fc_setup_rport(void);
95void fc_destroy_rport(void);
96int fc_setup_fcp(void);
97void fc_destroy_fcp(void);
98
99/*
100 * Internal libfc functions
101 */
102const char *fc_els_resp_type(struct fc_frame *);
103
104/*
105 * Copies a buffer into an sg list
106 */
107u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
108 struct scatterlist *sg,
109 u32 *nents, size_t *offset,
110 enum km_type km_type, u32 *crc);
111
112#endif /* _FC_LIBFC_H_ */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index bd2f77197447..d126ecfff704 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -56,7 +56,7 @@
56 * at the same time. 56 * at the same time.
57 * 57 *
58 * When discovery succeeds or fails a callback is made to the lport as 58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, succesful discovery causes the lport to take no 59 * notification. Currently, successful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular 60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation. 61 * locking problem with this implementation.
62 */ 62 */
@@ -88,12 +88,16 @@
88 */ 88 */
89 89
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/slab.h>
91#include <asm/unaligned.h> 92#include <asm/unaligned.h>
92 93
93#include <scsi/fc/fc_gs.h> 94#include <scsi/fc/fc_gs.h>
94 95
95#include <scsi/libfc.h> 96#include <scsi/libfc.h>
96#include <scsi/fc_encode.h> 97#include <scsi/fc_encode.h>
98#include <linux/scatterlist.h>
99
100#include "fc_libfc.h"
97 101
98/* Fabric IDs to use for point-to-point mode, chosen on whims. */ 102/* Fabric IDs to use for point-to-point mode, chosen on whims. */
99#define FC_LOCAL_PTP_FID_LO 0x010101 103#define FC_LOCAL_PTP_FID_LO 0x010101
@@ -106,8 +110,7 @@ static void fc_lport_error(struct fc_lport *, struct fc_frame *);
106static void fc_lport_enter_reset(struct fc_lport *); 110static void fc_lport_enter_reset(struct fc_lport *);
107static void fc_lport_enter_flogi(struct fc_lport *); 111static void fc_lport_enter_flogi(struct fc_lport *);
108static void fc_lport_enter_dns(struct fc_lport *); 112static void fc_lport_enter_dns(struct fc_lport *);
109static void fc_lport_enter_rpn_id(struct fc_lport *); 113static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
110static void fc_lport_enter_rft_id(struct fc_lport *);
111static void fc_lport_enter_scr(struct fc_lport *); 114static void fc_lport_enter_scr(struct fc_lport *);
112static void fc_lport_enter_ready(struct fc_lport *); 115static void fc_lport_enter_ready(struct fc_lport *);
113static void fc_lport_enter_logo(struct fc_lport *); 116static void fc_lport_enter_logo(struct fc_lport *);
@@ -116,14 +119,40 @@ static const char *fc_lport_state_names[] = {
116 [LPORT_ST_DISABLED] = "disabled", 119 [LPORT_ST_DISABLED] = "disabled",
117 [LPORT_ST_FLOGI] = "FLOGI", 120 [LPORT_ST_FLOGI] = "FLOGI",
118 [LPORT_ST_DNS] = "dNS", 121 [LPORT_ST_DNS] = "dNS",
119 [LPORT_ST_RPN_ID] = "RPN_ID", 122 [LPORT_ST_RNN_ID] = "RNN_ID",
123 [LPORT_ST_RSNN_NN] = "RSNN_NN",
124 [LPORT_ST_RSPN_ID] = "RSPN_ID",
120 [LPORT_ST_RFT_ID] = "RFT_ID", 125 [LPORT_ST_RFT_ID] = "RFT_ID",
126 [LPORT_ST_RFF_ID] = "RFF_ID",
121 [LPORT_ST_SCR] = "SCR", 127 [LPORT_ST_SCR] = "SCR",
122 [LPORT_ST_READY] = "Ready", 128 [LPORT_ST_READY] = "Ready",
123 [LPORT_ST_LOGO] = "LOGO", 129 [LPORT_ST_LOGO] = "LOGO",
124 [LPORT_ST_RESET] = "reset", 130 [LPORT_ST_RESET] = "reset",
125}; 131};
126 132
133/**
134 * struct fc_bsg_info - FC Passthrough managemet structure
135 * @job: The passthrough job
136 * @lport: The local port to pass through a command
137 * @rsp_code: The expected response code
138 * @sg: job->reply_payload.sg_list
139 * @nents: job->reply_payload.sg_cnt
140 * @offset: The offset into the response data
141 */
142struct fc_bsg_info {
143 struct fc_bsg_job *job;
144 struct fc_lport *lport;
145 u16 rsp_code;
146 struct scatterlist *sg;
147 u32 nents;
148 size_t offset;
149};
150
151/**
152 * fc_frame_drop() - Dummy frame handler
153 * @lport: The local port the frame was received on
154 * @fp: The received frame
155 */
127static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) 156static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
128{ 157{
129 fc_frame_free(fp); 158 fc_frame_free(fp);
@@ -150,8 +179,8 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
150 switch (event) { 179 switch (event) {
151 case RPORT_EV_READY: 180 case RPORT_EV_READY:
152 if (lport->state == LPORT_ST_DNS) { 181 if (lport->state == LPORT_ST_DNS) {
153 lport->dns_rp = rdata; 182 lport->dns_rdata = rdata;
154 fc_lport_enter_rpn_id(lport); 183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
155 } else { 184 } else {
156 FC_LPORT_DBG(lport, "Received an READY event " 185 FC_LPORT_DBG(lport, "Received an READY event "
157 "on port (%6x) for the directory " 186 "on port (%6x) for the directory "
@@ -165,7 +194,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
165 case RPORT_EV_LOGO: 194 case RPORT_EV_LOGO:
166 case RPORT_EV_FAILED: 195 case RPORT_EV_FAILED:
167 case RPORT_EV_STOP: 196 case RPORT_EV_STOP:
168 lport->dns_rp = NULL; 197 lport->dns_rdata = NULL;
169 break; 198 break;
170 case RPORT_EV_NONE: 199 case RPORT_EV_NONE:
171 break; 200 break;
@@ -189,8 +218,8 @@ static const char *fc_lport_state(struct fc_lport *lport)
189 218
190/** 219/**
191 * fc_lport_ptp_setup() - Create an rport for point-to-point mode 220 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
192 * @lport: The lport to attach the ptp rport to 221 * @lport: The lport to attach the ptp rport to
193 * @fid: The FID of the ptp rport 222 * @remote_fid: The FID of the ptp rport
194 * @remote_wwpn: The WWPN of the ptp rport 223 * @remote_wwpn: The WWPN of the ptp rport
195 * @remote_wwnn: The WWNN of the ptp rport 224 * @remote_wwnn: The WWNN of the ptp rport
196 */ 225 */
@@ -199,18 +228,22 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
199 u64 remote_wwnn) 228 u64 remote_wwnn)
200{ 229{
201 mutex_lock(&lport->disc.disc_mutex); 230 mutex_lock(&lport->disc.disc_mutex);
202 if (lport->ptp_rp) 231 if (lport->ptp_rdata)
203 lport->tt.rport_logoff(lport->ptp_rp); 232 lport->tt.rport_logoff(lport->ptp_rdata);
204 lport->ptp_rp = lport->tt.rport_create(lport, remote_fid); 233 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
205 lport->ptp_rp->ids.port_name = remote_wwpn; 234 lport->ptp_rdata->ids.port_name = remote_wwpn;
206 lport->ptp_rp->ids.node_name = remote_wwnn; 235 lport->ptp_rdata->ids.node_name = remote_wwnn;
207 mutex_unlock(&lport->disc.disc_mutex); 236 mutex_unlock(&lport->disc.disc_mutex);
208 237
209 lport->tt.rport_login(lport->ptp_rp); 238 lport->tt.rport_login(lport->ptp_rdata);
210 239
211 fc_lport_enter_ready(lport); 240 fc_lport_enter_ready(lport);
212} 241}
213 242
243/**
244 * fc_get_host_port_type() - Return the port type of the given Scsi_Host
245 * @shost: The SCSI host whose port type is to be determined
246 */
214void fc_get_host_port_type(struct Scsi_Host *shost) 247void fc_get_host_port_type(struct Scsi_Host *shost)
215{ 248{
216 /* TODO - currently just NPORT */ 249 /* TODO - currently just NPORT */
@@ -218,17 +251,33 @@ void fc_get_host_port_type(struct Scsi_Host *shost)
218} 251}
219EXPORT_SYMBOL(fc_get_host_port_type); 252EXPORT_SYMBOL(fc_get_host_port_type);
220 253
254/**
255 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
256 * @shost: The SCSI host whose port state is to be determined
257 */
221void fc_get_host_port_state(struct Scsi_Host *shost) 258void fc_get_host_port_state(struct Scsi_Host *shost)
222{ 259{
223 struct fc_lport *lp = shost_priv(shost); 260 struct fc_lport *lport = shost_priv(shost);
224 261
225 if (lp->link_up) 262 mutex_lock(&lport->lp_mutex);
226 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 263 if (!lport->link_up)
264 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
227 else 265 else
228 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 266 switch (lport->state) {
267 case LPORT_ST_READY:
268 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
269 break;
270 default:
271 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
272 }
273 mutex_unlock(&lport->lp_mutex);
229} 274}
230EXPORT_SYMBOL(fc_get_host_port_state); 275EXPORT_SYMBOL(fc_get_host_port_state);
231 276
277/**
278 * fc_get_host_speed() - Return the speed of the given Scsi_Host
279 * @shost: The SCSI host whose port speed is to be determined
280 */
232void fc_get_host_speed(struct Scsi_Host *shost) 281void fc_get_host_speed(struct Scsi_Host *shost)
233{ 282{
234 struct fc_lport *lport = shost_priv(shost); 283 struct fc_lport *lport = shost_priv(shost);
@@ -237,24 +286,28 @@ void fc_get_host_speed(struct Scsi_Host *shost)
237} 286}
238EXPORT_SYMBOL(fc_get_host_speed); 287EXPORT_SYMBOL(fc_get_host_speed);
239 288
289/**
290 * fc_get_host_stats() - Return the Scsi_Host's statistics
291 * @shost: The SCSI host whose statistics are to be returned
292 */
240struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) 293struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
241{ 294{
242 struct fc_host_statistics *fcoe_stats; 295 struct fc_host_statistics *fcoe_stats;
243 struct fc_lport *lp = shost_priv(shost); 296 struct fc_lport *lport = shost_priv(shost);
244 struct timespec v0, v1; 297 struct timespec v0, v1;
245 unsigned int cpu; 298 unsigned int cpu;
246 299
247 fcoe_stats = &lp->host_stats; 300 fcoe_stats = &lport->host_stats;
248 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); 301 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
249 302
250 jiffies_to_timespec(jiffies, &v0); 303 jiffies_to_timespec(jiffies, &v0);
251 jiffies_to_timespec(lp->boot_time, &v1); 304 jiffies_to_timespec(lport->boot_time, &v1);
252 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); 305 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
253 306
254 for_each_possible_cpu(cpu) { 307 for_each_possible_cpu(cpu) {
255 struct fcoe_dev_stats *stats; 308 struct fcoe_dev_stats *stats;
256 309
257 stats = per_cpu_ptr(lp->dev_stats, cpu); 310 stats = per_cpu_ptr(lport->dev_stats, cpu);
258 311
259 fcoe_stats->tx_frames += stats->TxFrames; 312 fcoe_stats->tx_frames += stats->TxFrames;
260 fcoe_stats->tx_words += stats->TxWords; 313 fcoe_stats->tx_words += stats->TxWords;
@@ -279,12 +332,15 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
279} 332}
280EXPORT_SYMBOL(fc_get_host_stats); 333EXPORT_SYMBOL(fc_get_host_stats);
281 334
282/* 335/**
283 * Fill in FLOGI command for request. 336 * fc_lport_flogi_fill() - Fill in FLOGI command for request
337 * @lport: The local port the FLOGI is for
338 * @flogi: The FLOGI command
339 * @op: The opcode
284 */ 340 */
285static void 341static void fc_lport_flogi_fill(struct fc_lport *lport,
286fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi, 342 struct fc_els_flogi *flogi,
287 unsigned int op) 343 unsigned int op)
288{ 344{
289 struct fc_els_csp *sp; 345 struct fc_els_csp *sp;
290 struct fc_els_cssp *cp; 346 struct fc_els_cssp *cp;
@@ -312,8 +368,10 @@ fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
312 } 368 }
313} 369}
314 370
315/* 371/**
316 * Add a supported FC-4 type. 372 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
373 * @lport: The local port to add a new FC-4 type to
374 * @type: The new FC-4 type
317 */ 375 */
318static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) 376static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
319{ 377{
@@ -325,11 +383,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
325 383
326/** 384/**
327 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. 385 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
386 * @sp: The sequence in the RLIR exchange
387 * @fp: The RLIR request frame
328 * @lport: Fibre Channel local port recieving the RLIR 388 * @lport: Fibre Channel local port recieving the RLIR
329 * @sp: current sequence in the RLIR exchange
330 * @fp: RLIR request frame
331 * 389 *
332 * Locking Note: The lport lock is exected to be held before calling 390 * Locking Note: The lport lock is expected to be held before calling
333 * this function. 391 * this function.
334 */ 392 */
335static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, 393static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
@@ -344,11 +402,11 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
344 402
345/** 403/**
346 * fc_lport_recv_echo_req() - Handle received ECHO request 404 * fc_lport_recv_echo_req() - Handle received ECHO request
347 * @lport: Fibre Channel local port recieving the ECHO 405 * @sp: The sequence in the ECHO exchange
348 * @sp: current sequence in the ECHO exchange 406 * @fp: ECHO request frame
349 * @fp: ECHO request frame 407 * @lport: The local port recieving the ECHO
350 * 408 *
351 * Locking Note: The lport lock is exected to be held before calling 409 * Locking Note: The lport lock is expected to be held before calling
352 * this function. 410 * this function.
353 */ 411 */
354static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, 412static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
@@ -361,7 +419,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
361 void *dp; 419 void *dp;
362 u32 f_ctl; 420 u32 f_ctl;
363 421
364 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 422 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
365 fc_lport_state(lport)); 423 fc_lport_state(lport));
366 424
367 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 425 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
@@ -374,7 +432,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
374 if (fp) { 432 if (fp) {
375 dp = fc_frame_payload_get(fp, len); 433 dp = fc_frame_payload_get(fp, len);
376 memcpy(dp, pp, len); 434 memcpy(dp, pp, len);
377 *((u32 *)dp) = htonl(ELS_LS_ACC << 24); 435 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
378 sp = lport->tt.seq_start_next(sp); 436 sp = lport->tt.seq_start_next(sp);
379 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; 437 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
380 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, 438 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
@@ -385,12 +443,12 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
385} 443}
386 444
387/** 445/**
388 * fc_lport_recv_echo_req() - Handle received Request Node ID data request 446 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
389 * @lport: Fibre Channel local port recieving the RNID 447 * @sp: The sequence in the RNID exchange
390 * @sp: current sequence in the RNID exchange 448 * @fp: The RNID request frame
391 * @fp: RNID request frame 449 * @lport: The local port recieving the RNID
392 * 450 *
393 * Locking Note: The lport lock is exected to be held before calling 451 * Locking Note: The lport lock is expected to be held before calling
394 * this function. 452 * this function.
395 */ 453 */
396static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, 454static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
@@ -453,9 +511,9 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
453 511
454/** 512/**
455 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 513 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
456 * @lport: Fibre Channel local port recieving the LOGO 514 * @sp: The sequence in the LOGO exchange
457 * @sp: current sequence in the LOGO exchange 515 * @fp: The LOGO request frame
458 * @fp: LOGO request frame 516 * @lport: The local port recieving the LOGO
459 * 517 *
460 * Locking Note: The lport lock is exected to be held before calling 518 * Locking Note: The lport lock is exected to be held before calling
461 * this function. 519 * this function.
@@ -470,7 +528,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
470 528
471/** 529/**
472 * fc_fabric_login() - Start the lport state machine 530 * fc_fabric_login() - Start the lport state machine
473 * @lport: The lport that should log into the fabric 531 * @lport: The local port that should log into the fabric
474 * 532 *
475 * Locking Note: This function should not be called 533 * Locking Note: This function should not be called
476 * with the lport lock held. 534 * with the lport lock held.
@@ -480,7 +538,9 @@ int fc_fabric_login(struct fc_lport *lport)
480 int rc = -1; 538 int rc = -1;
481 539
482 mutex_lock(&lport->lp_mutex); 540 mutex_lock(&lport->lp_mutex);
483 if (lport->state == LPORT_ST_DISABLED) { 541 if (lport->state == LPORT_ST_DISABLED ||
542 lport->state == LPORT_ST_LOGO) {
543 fc_lport_state_enter(lport, LPORT_ST_RESET);
484 fc_lport_enter_reset(lport); 544 fc_lport_enter_reset(lport);
485 rc = 0; 545 rc = 0;
486 } 546 }
@@ -491,47 +551,69 @@ int fc_fabric_login(struct fc_lport *lport)
491EXPORT_SYMBOL(fc_fabric_login); 551EXPORT_SYMBOL(fc_fabric_login);
492 552
493/** 553/**
494 * fc_linkup() - Handler for transport linkup events 554 * __fc_linkup() - Handler for transport linkup events
495 * @lport: The lport whose link is up 555 * @lport: The lport whose link is up
556 *
557 * Locking: must be called with the lp_mutex held
496 */ 558 */
497void fc_linkup(struct fc_lport *lport) 559void __fc_linkup(struct fc_lport *lport)
498{ 560{
499 printk(KERN_INFO "libfc: Link up on port (%6x)\n",
500 fc_host_port_id(lport->host));
501
502 mutex_lock(&lport->lp_mutex);
503 if (!lport->link_up) { 561 if (!lport->link_up) {
504 lport->link_up = 1; 562 lport->link_up = 1;
505 563
506 if (lport->state == LPORT_ST_RESET) 564 if (lport->state == LPORT_ST_RESET)
507 fc_lport_enter_flogi(lport); 565 fc_lport_enter_flogi(lport);
508 } 566 }
567}
568
569/**
570 * fc_linkup() - Handler for transport linkup events
571 * @lport: The local port whose link is up
572 */
573void fc_linkup(struct fc_lport *lport)
574{
575 printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n",
576 lport->host->host_no, fc_host_port_id(lport->host));
577
578 mutex_lock(&lport->lp_mutex);
579 __fc_linkup(lport);
509 mutex_unlock(&lport->lp_mutex); 580 mutex_unlock(&lport->lp_mutex);
510} 581}
511EXPORT_SYMBOL(fc_linkup); 582EXPORT_SYMBOL(fc_linkup);
512 583
513/** 584/**
514 * fc_linkdown() - Handler for transport linkdown events 585 * __fc_linkdown() - Handler for transport linkdown events
515 * @lport: The lport whose link is down 586 * @lport: The lport whose link is down
587 *
588 * Locking: must be called with the lp_mutex held
516 */ 589 */
517void fc_linkdown(struct fc_lport *lport) 590void __fc_linkdown(struct fc_lport *lport)
518{ 591{
519 mutex_lock(&lport->lp_mutex);
520 printk(KERN_INFO "libfc: Link down on port (%6x)\n",
521 fc_host_port_id(lport->host));
522
523 if (lport->link_up) { 592 if (lport->link_up) {
524 lport->link_up = 0; 593 lport->link_up = 0;
525 fc_lport_enter_reset(lport); 594 fc_lport_enter_reset(lport);
526 lport->tt.fcp_cleanup(lport); 595 lport->tt.fcp_cleanup(lport);
527 } 596 }
597}
598
599/**
600 * fc_linkdown() - Handler for transport linkdown events
601 * @lport: The local port whose link is down
602 */
603void fc_linkdown(struct fc_lport *lport)
604{
605 printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n",
606 lport->host->host_no, fc_host_port_id(lport->host));
607
608 mutex_lock(&lport->lp_mutex);
609 __fc_linkdown(lport);
528 mutex_unlock(&lport->lp_mutex); 610 mutex_unlock(&lport->lp_mutex);
529} 611}
530EXPORT_SYMBOL(fc_linkdown); 612EXPORT_SYMBOL(fc_linkdown);
531 613
532/** 614/**
533 * fc_fabric_logoff() - Logout of the fabric 615 * fc_fabric_logoff() - Logout of the fabric
534 * @lport: fc_lport pointer to logoff the fabric 616 * @lport: The local port to logoff the fabric
535 * 617 *
536 * Return value: 618 * Return value:
537 * 0 for success, -1 for failure 619 * 0 for success, -1 for failure
@@ -540,8 +622,8 @@ int fc_fabric_logoff(struct fc_lport *lport)
540{ 622{
541 lport->tt.disc_stop_final(lport); 623 lport->tt.disc_stop_final(lport);
542 mutex_lock(&lport->lp_mutex); 624 mutex_lock(&lport->lp_mutex);
543 if (lport->dns_rp) 625 if (lport->dns_rdata)
544 lport->tt.rport_logoff(lport->dns_rp); 626 lport->tt.rport_logoff(lport->dns_rdata);
545 mutex_unlock(&lport->lp_mutex); 627 mutex_unlock(&lport->lp_mutex);
546 lport->tt.rport_flush_queue(); 628 lport->tt.rport_flush_queue();
547 mutex_lock(&lport->lp_mutex); 629 mutex_lock(&lport->lp_mutex);
@@ -553,11 +635,9 @@ int fc_fabric_logoff(struct fc_lport *lport)
553EXPORT_SYMBOL(fc_fabric_logoff); 635EXPORT_SYMBOL(fc_fabric_logoff);
554 636
555/** 637/**
556 * fc_lport_destroy() - unregister a fc_lport 638 * fc_lport_destroy() - Unregister a fc_lport
557 * @lport: fc_lport pointer to unregister 639 * @lport: The local port to unregister
558 * 640 *
559 * Return value:
560 * None
561 * Note: 641 * Note:
562 * exit routine for fc_lport instance 642 * exit routine for fc_lport instance
563 * clean-up all the allocated memory 643 * clean-up all the allocated memory
@@ -580,13 +660,9 @@ int fc_lport_destroy(struct fc_lport *lport)
580EXPORT_SYMBOL(fc_lport_destroy); 660EXPORT_SYMBOL(fc_lport_destroy);
581 661
582/** 662/**
583 * fc_set_mfs() - sets up the mfs for the corresponding fc_lport 663 * fc_set_mfs() - Set the maximum frame size for a local port
584 * @lport: fc_lport pointer to unregister 664 * @lport: The local port to set the MFS for
585 * @mfs: the new mfs for fc_lport 665 * @mfs: The new MFS
586 *
587 * Set mfs for the given fc_lport to the new mfs.
588 *
589 * Return: 0 for success
590 */ 666 */
591int fc_set_mfs(struct fc_lport *lport, u32 mfs) 667int fc_set_mfs(struct fc_lport *lport, u32 mfs)
592{ 668{
@@ -617,7 +693,7 @@ EXPORT_SYMBOL(fc_set_mfs);
617 693
618/** 694/**
619 * fc_lport_disc_callback() - Callback for discovery events 695 * fc_lport_disc_callback() - Callback for discovery events
620 * @lport: FC local port 696 * @lport: The local port receiving the event
621 * @event: The discovery event 697 * @event: The discovery event
622 */ 698 */
623void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) 699void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
@@ -627,8 +703,9 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
627 FC_LPORT_DBG(lport, "Discovery succeeded\n"); 703 FC_LPORT_DBG(lport, "Discovery succeeded\n");
628 break; 704 break;
629 case DISC_EV_FAILED: 705 case DISC_EV_FAILED:
630 printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n", 706 printk(KERN_ERR "host%d: libfc: "
631 fc_host_port_id(lport->host)); 707 "Discovery failed for port (%6x)\n",
708 lport->host->host_no, fc_host_port_id(lport->host));
632 mutex_lock(&lport->lp_mutex); 709 mutex_lock(&lport->lp_mutex);
633 fc_lport_enter_reset(lport); 710 fc_lport_enter_reset(lport);
634 mutex_unlock(&lport->lp_mutex); 711 mutex_unlock(&lport->lp_mutex);
@@ -641,7 +718,7 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
641 718
642/** 719/**
643 * fc_rport_enter_ready() - Enter the ready state and start discovery 720 * fc_rport_enter_ready() - Enter the ready state and start discovery
644 * @lport: Fibre Channel local port that is ready 721 * @lport: The local port that is ready
645 * 722 *
646 * Locking Note: The lport lock is expected to be held before calling 723 * Locking Note: The lport lock is expected to be held before calling
647 * this routine. 724 * this routine.
@@ -652,22 +729,46 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
652 fc_lport_state(lport)); 729 fc_lport_state(lport));
653 730
654 fc_lport_state_enter(lport, LPORT_ST_READY); 731 fc_lport_state_enter(lport, LPORT_ST_READY);
732 if (lport->vport)
733 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
734 fc_vports_linkchange(lport);
655 735
656 if (!lport->ptp_rp) 736 if (!lport->ptp_rdata)
657 lport->tt.disc_start(fc_lport_disc_callback, lport); 737 lport->tt.disc_start(fc_lport_disc_callback, lport);
658} 738}
659 739
660/** 740/**
741 * fc_lport_set_port_id() - set the local port Port ID
742 * @lport: The local port which will have its Port ID set.
743 * @port_id: The new port ID.
744 * @fp: The frame containing the incoming request, or NULL.
745 *
746 * Locking Note: The lport lock is expected to be held before calling
747 * this function.
748 */
749static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
750 struct fc_frame *fp)
751{
752 if (port_id)
753 printk(KERN_INFO "host%d: Assigned Port ID %6x\n",
754 lport->host->host_no, port_id);
755
756 fc_host_port_id(lport->host) = port_id;
757 if (lport->tt.lport_set_port_id)
758 lport->tt.lport_set_port_id(lport, port_id, fp);
759}
760
761/**
661 * fc_lport_recv_flogi_req() - Receive a FLOGI request 762 * fc_lport_recv_flogi_req() - Receive a FLOGI request
662 * @sp_in: The sequence the FLOGI is on 763 * @sp_in: The sequence the FLOGI is on
663 * @rx_fp: The frame the FLOGI is in 764 * @rx_fp: The FLOGI frame
664 * @lport: The lport that recieved the request 765 * @lport: The local port that recieved the request
665 * 766 *
666 * A received FLOGI request indicates a point-to-point connection. 767 * A received FLOGI request indicates a point-to-point connection.
667 * Accept it with the common service parameters indicating our N port. 768 * Accept it with the common service parameters indicating our N port.
668 * Set up to do a PLOGI if we have the higher-number WWPN. 769 * Set up to do a PLOGI if we have the higher-number WWPN.
669 * 770 *
670 * Locking Note: The lport lock is exected to be held before calling 771 * Locking Note: The lport lock is expected to be held before calling
671 * this function. 772 * this function.
672 */ 773 */
673static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, 774static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
@@ -695,8 +796,9 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
695 goto out; 796 goto out;
696 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 797 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
697 if (remote_wwpn == lport->wwpn) { 798 if (remote_wwpn == lport->wwpn) {
698 printk(KERN_WARNING "libfc: Received FLOGI from port " 799 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
699 "with same WWPN %llx\n", remote_wwpn); 800 "with same WWPN %llx\n",
801 lport->host->host_no, remote_wwpn);
700 goto out; 802 goto out;
701 } 803 }
702 FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); 804 FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
@@ -715,7 +817,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
715 remote_fid = FC_LOCAL_PTP_FID_HI; 817 remote_fid = FC_LOCAL_PTP_FID_HI;
716 } 818 }
717 819
718 fc_host_port_id(lport->host) = local_fid; 820 fc_lport_set_port_id(lport, local_fid, rx_fp);
719 821
720 fp = fc_frame_alloc(lport, sizeof(*flp)); 822 fp = fc_frame_alloc(lport, sizeof(*flp));
721 if (fp) { 823 if (fp) {
@@ -747,9 +849,9 @@ out:
747 849
748/** 850/**
749 * fc_lport_recv_req() - The generic lport request handler 851 * fc_lport_recv_req() - The generic lport request handler
750 * @lport: The lport that received the request 852 * @lport: The local port that received the request
751 * @sp: The sequence the request is on 853 * @sp: The sequence the request is on
752 * @fp: The frame the request is in 854 * @fp: The request frame
753 * 855 *
754 * This function will see if the lport handles the request or 856 * This function will see if the lport handles the request or
755 * if an rport should handle the request. 857 * if an rport should handle the request.
@@ -817,8 +919,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
817} 919}
818 920
819/** 921/**
820 * fc_lport_reset() - Reset an lport 922 * fc_lport_reset() - Reset a local port
821 * @lport: The lport which should be reset 923 * @lport: The local port which should be reset
822 * 924 *
823 * Locking Note: This functions should not be called with the 925 * Locking Note: This functions should not be called with the
824 * lport lock held. 926 * lport lock held.
@@ -834,29 +936,31 @@ int fc_lport_reset(struct fc_lport *lport)
834EXPORT_SYMBOL(fc_lport_reset); 936EXPORT_SYMBOL(fc_lport_reset);
835 937
836/** 938/**
837 * fc_lport_reset_locked() - Reset the local port 939 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
838 * @lport: Fibre Channel local port to be reset 940 * @lport: The local port to be reset
839 * 941 *
840 * Locking Note: The lport lock is expected to be held before calling 942 * Locking Note: The lport lock is expected to be held before calling
841 * this routine. 943 * this routine.
842 */ 944 */
843static void fc_lport_reset_locked(struct fc_lport *lport) 945static void fc_lport_reset_locked(struct fc_lport *lport)
844{ 946{
845 if (lport->dns_rp) 947 if (lport->dns_rdata)
846 lport->tt.rport_logoff(lport->dns_rp); 948 lport->tt.rport_logoff(lport->dns_rdata);
847 949
848 lport->ptp_rp = NULL; 950 lport->ptp_rdata = NULL;
849 951
850 lport->tt.disc_stop(lport); 952 lport->tt.disc_stop(lport);
851 953
852 lport->tt.exch_mgr_reset(lport, 0, 0); 954 lport->tt.exch_mgr_reset(lport, 0, 0);
853 fc_host_fabric_name(lport->host) = 0; 955 fc_host_fabric_name(lport->host) = 0;
854 fc_host_port_id(lport->host) = 0; 956
957 if (fc_host_port_id(lport->host))
958 fc_lport_set_port_id(lport, 0, NULL);
855} 959}
856 960
857/** 961/**
858 * fc_lport_enter_reset() - Reset the local port 962 * fc_lport_enter_reset() - Reset the local port
859 * @lport: Fibre Channel local port to be reset 963 * @lport: The local port to be reset
860 * 964 *
861 * Locking Note: The lport lock is expected to be held before calling 965 * Locking Note: The lport lock is expected to be held before calling
862 * this routine. 966 * this routine.
@@ -866,15 +970,25 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
866 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 970 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
867 fc_lport_state(lport)); 971 fc_lport_state(lport));
868 972
973 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
974 return;
975
976 if (lport->vport) {
977 if (lport->link_up)
978 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
979 else
980 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
981 }
869 fc_lport_state_enter(lport, LPORT_ST_RESET); 982 fc_lport_state_enter(lport, LPORT_ST_RESET);
983 fc_vports_linkchange(lport);
870 fc_lport_reset_locked(lport); 984 fc_lport_reset_locked(lport);
871 if (lport->link_up) 985 if (lport->link_up)
872 fc_lport_enter_flogi(lport); 986 fc_lport_enter_flogi(lport);
873} 987}
874 988
875/** 989/**
876 * fc_lport_enter_disabled() - disable the local port 990 * fc_lport_enter_disabled() - Disable the local port
877 * @lport: Fibre Channel local port to be reset 991 * @lport: The local port to be reset
878 * 992 *
879 * Locking Note: The lport lock is expected to be held before calling 993 * Locking Note: The lport lock is expected to be held before calling
880 * this routine. 994 * this routine.
@@ -885,13 +999,14 @@ static void fc_lport_enter_disabled(struct fc_lport *lport)
885 fc_lport_state(lport)); 999 fc_lport_state(lport));
886 1000
887 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1001 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1002 fc_vports_linkchange(lport);
888 fc_lport_reset_locked(lport); 1003 fc_lport_reset_locked(lport);
889} 1004}
890 1005
891/** 1006/**
892 * fc_lport_error() - Handler for any errors 1007 * fc_lport_error() - Handler for any errors
893 * @lport: The fc_lport object 1008 * @lport: The local port that the error was on
894 * @fp: The frame pointer 1009 * @fp: The error code encoded in a frame pointer
895 * 1010 *
896 * If the error was caused by a resource allocation failure 1011 * If the error was caused by a resource allocation failure
897 * then wait for half a second and retry, otherwise retry 1012 * then wait for half a second and retry, otherwise retry
@@ -922,8 +1037,11 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
922 case LPORT_ST_DISABLED: 1037 case LPORT_ST_DISABLED:
923 case LPORT_ST_READY: 1038 case LPORT_ST_READY:
924 case LPORT_ST_RESET: 1039 case LPORT_ST_RESET:
925 case LPORT_ST_RPN_ID: 1040 case LPORT_ST_RNN_ID:
1041 case LPORT_ST_RSNN_NN:
1042 case LPORT_ST_RSPN_ID:
926 case LPORT_ST_RFT_ID: 1043 case LPORT_ST_RFT_ID:
1044 case LPORT_ST_RFF_ID:
927 case LPORT_ST_SCR: 1045 case LPORT_ST_SCR:
928 case LPORT_ST_DNS: 1046 case LPORT_ST_DNS:
929 case LPORT_ST_FLOGI: 1047 case LPORT_ST_FLOGI:
@@ -936,33 +1054,33 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
936} 1054}
937 1055
938/** 1056/**
939 * fc_lport_rft_id_resp() - Handle response to Register Fibre 1057 * fc_lport_ns_resp() - Handle response to a name server
940 * Channel Types by ID (RPN_ID) request 1058 * registration exchange
941 * @sp: current sequence in RPN_ID exchange 1059 * @sp: current sequence in exchange
942 * @fp: response frame 1060 * @fp: response frame
943 * @lp_arg: Fibre Channel host port instance 1061 * @lp_arg: Fibre Channel host port instance
944 * 1062 *
945 * Locking Note: This function will be called without the lport lock 1063 * Locking Note: This function will be called without the lport lock
946 * held, but it will lock, call an _enter_* function or fc_lport_error 1064 * held, but it will lock, call an _enter_* function or fc_lport_error()
947 * and then unlock the lport. 1065 * and then unlock the lport.
948 */ 1066 */
949static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, 1067static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
950 void *lp_arg) 1068 void *lp_arg)
951{ 1069{
952 struct fc_lport *lport = lp_arg; 1070 struct fc_lport *lport = lp_arg;
953 struct fc_frame_header *fh; 1071 struct fc_frame_header *fh;
954 struct fc_ct_hdr *ct; 1072 struct fc_ct_hdr *ct;
955 1073
956 FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp)); 1074 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
957 1075
958 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1076 if (fp == ERR_PTR(-FC_EX_CLOSED))
959 return; 1077 return;
960 1078
961 mutex_lock(&lport->lp_mutex); 1079 mutex_lock(&lport->lp_mutex);
962 1080
963 if (lport->state != LPORT_ST_RFT_ID) { 1081 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
964 FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " 1082 FC_LPORT_DBG(lport, "Received a name server response, "
965 "%s\n", fc_lport_state(lport)); 1083 "but in state %s\n", fc_lport_state(lport));
966 if (IS_ERR(fp)) 1084 if (IS_ERR(fp))
967 goto err; 1085 goto err;
968 goto out; 1086 goto out;
@@ -980,63 +1098,28 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
980 ct->ct_fs_type == FC_FST_DIR && 1098 ct->ct_fs_type == FC_FST_DIR &&
981 ct->ct_fs_subtype == FC_NS_SUBTYPE && 1099 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
982 ntohs(ct->ct_cmd) == FC_FS_ACC) 1100 ntohs(ct->ct_cmd) == FC_FS_ACC)
983 fc_lport_enter_scr(lport); 1101 switch (lport->state) {
984 else 1102 case LPORT_ST_RNN_ID:
985 fc_lport_error(lport, fp); 1103 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
986out: 1104 break;
987 fc_frame_free(fp); 1105 case LPORT_ST_RSNN_NN:
988err: 1106 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
989 mutex_unlock(&lport->lp_mutex); 1107 break;
990} 1108 case LPORT_ST_RSPN_ID:
991 1109 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
992/** 1110 break;
993 * fc_lport_rpn_id_resp() - Handle response to Register Port 1111 case LPORT_ST_RFT_ID:
994 * Name by ID (RPN_ID) request 1112 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
995 * @sp: current sequence in RPN_ID exchange 1113 break;
996 * @fp: response frame 1114 case LPORT_ST_RFF_ID:
997 * @lp_arg: Fibre Channel host port instance 1115 fc_lport_enter_scr(lport);
998 * 1116 break;
999 * Locking Note: This function will be called without the lport lock 1117 default:
1000 * held, but it will lock, call an _enter_* function or fc_lport_error 1118 /* should have already been caught by state checks */
1001 * and then unlock the lport. 1119 break;
1002 */ 1120 }
1003static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1004 void *lp_arg)
1005{
1006 struct fc_lport *lport = lp_arg;
1007 struct fc_frame_header *fh;
1008 struct fc_ct_hdr *ct;
1009
1010 FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp));
1011
1012 if (fp == ERR_PTR(-FC_EX_CLOSED))
1013 return;
1014
1015 mutex_lock(&lport->lp_mutex);
1016
1017 if (lport->state != LPORT_ST_RPN_ID) {
1018 FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
1019 "%s\n", fc_lport_state(lport));
1020 if (IS_ERR(fp))
1021 goto err;
1022 goto out;
1023 }
1024
1025 if (IS_ERR(fp)) {
1026 fc_lport_error(lport, fp);
1027 goto err;
1028 }
1029
1030 fh = fc_frame_header_get(fp);
1031 ct = fc_frame_payload_get(fp, sizeof(*ct));
1032 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1033 ct->ct_fs_type == FC_FST_DIR &&
1034 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1035 ntohs(ct->ct_cmd) == FC_FS_ACC)
1036 fc_lport_enter_rft_id(lport);
1037 else 1121 else
1038 fc_lport_error(lport, fp); 1122 fc_lport_error(lport, fp);
1039
1040out: 1123out:
1041 fc_frame_free(fp); 1124 fc_frame_free(fp);
1042err: 1125err:
@@ -1045,8 +1128,8 @@ err:
1045 1128
1046/** 1129/**
1047 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request 1130 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1048 * @sp: current sequence in SCR exchange 1131 * @sp: current sequence in SCR exchange
1049 * @fp: response frame 1132 * @fp: response frame
1050 * @lp_arg: Fibre Channel lport port instance that sent the registration request 1133 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1051 * 1134 *
1052 * Locking Note: This function will be called without the lport lock 1135 * Locking Note: This function will be called without the lport lock
@@ -1092,8 +1175,8 @@ err:
1092} 1175}
1093 1176
1094/** 1177/**
1095 * fc_lport_enter_scr() - Send a State Change Register (SCR) request 1178 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1096 * @lport: Fibre Channel local port to register for state changes 1179 * @lport: The local port to register for state changes
1097 * 1180 *
1098 * Locking Note: The lport lock is expected to be held before calling 1181 * Locking Note: The lport lock is expected to be held before calling
1099 * this routine. 1182 * this routine.
@@ -1114,78 +1197,74 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
1114 } 1197 }
1115 1198
1116 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, 1199 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1117 fc_lport_scr_resp, lport, lport->e_d_tov)) 1200 fc_lport_scr_resp, lport,
1118 fc_lport_error(lport, fp); 1201 2 * lport->r_a_tov))
1202 fc_lport_error(lport, NULL);
1119} 1203}
1120 1204
1121/** 1205/**
1122 * fc_lport_enter_rft_id() - Register FC4-types with the name server 1206 * fc_lport_enter_ns() - register some object with the name server
1123 * @lport: Fibre Channel local port to register 1207 * @lport: Fibre Channel local port to register
1124 * 1208 *
1125 * Locking Note: The lport lock is expected to be held before calling 1209 * Locking Note: The lport lock is expected to be held before calling
1126 * this routine. 1210 * this routine.
1127 */ 1211 */
1128static void fc_lport_enter_rft_id(struct fc_lport *lport) 1212static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
1129{ 1213{
1130 struct fc_frame *fp; 1214 struct fc_frame *fp;
1131 struct fc_ns_fts *lps; 1215 enum fc_ns_req cmd;
1132 int i; 1216 int size = sizeof(struct fc_ct_hdr);
1217 size_t len;
1133 1218
1134 FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n", 1219 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1220 fc_lport_state_names[state],
1135 fc_lport_state(lport)); 1221 fc_lport_state(lport));
1136 1222
1137 fc_lport_state_enter(lport, LPORT_ST_RFT_ID); 1223 fc_lport_state_enter(lport, state);
1138 1224
1139 lps = &lport->fcts; 1225 switch (state) {
1140 i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]); 1226 case LPORT_ST_RNN_ID:
1141 while (--i >= 0) 1227 cmd = FC_NS_RNN_ID;
1142 if (ntohl(lps->ff_type_map[i]) != 0) 1228 size += sizeof(struct fc_ns_rn_id);
1143 break; 1229 break;
1144 if (i < 0) { 1230 case LPORT_ST_RSNN_NN:
1145 /* nothing to register, move on to SCR */ 1231 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1146 fc_lport_enter_scr(lport); 1232 /* if there is no symbolic name, skip to RFT_ID */
1147 return; 1233 if (!len)
1148 } 1234 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1149 1235 cmd = FC_NS_RSNN_NN;
1150 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + 1236 size += sizeof(struct fc_ns_rsnn) + len;
1151 sizeof(struct fc_ns_rft)); 1237 break;
1152 if (!fp) { 1238 case LPORT_ST_RSPN_ID:
1153 fc_lport_error(lport, fp); 1239 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1240 /* if there is no symbolic name, skip to RFT_ID */
1241 if (!len)
1242 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1243 cmd = FC_NS_RSPN_ID;
1244 size += sizeof(struct fc_ns_rspn) + len;
1245 break;
1246 case LPORT_ST_RFT_ID:
1247 cmd = FC_NS_RFT_ID;
1248 size += sizeof(struct fc_ns_rft);
1249 break;
1250 case LPORT_ST_RFF_ID:
1251 cmd = FC_NS_RFF_ID;
1252 size += sizeof(struct fc_ns_rff_id);
1253 break;
1254 default:
1255 fc_lport_error(lport, NULL);
1154 return; 1256 return;
1155 } 1257 }
1156 1258
1157 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID, 1259 fp = fc_frame_alloc(lport, size);
1158 fc_lport_rft_id_resp,
1159 lport, lport->e_d_tov))
1160 fc_lport_error(lport, fp);
1161}
1162
1163/**
1164 * fc_rport_enter_rft_id() - Register port name with the name server
1165 * @lport: Fibre Channel local port to register
1166 *
1167 * Locking Note: The lport lock is expected to be held before calling
1168 * this routine.
1169 */
1170static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1171{
1172 struct fc_frame *fp;
1173
1174 FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
1175 fc_lport_state(lport));
1176
1177 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1178
1179 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1180 sizeof(struct fc_ns_rn_id));
1181 if (!fp) { 1260 if (!fp) {
1182 fc_lport_error(lport, fp); 1261 fc_lport_error(lport, fp);
1183 return; 1262 return;
1184 } 1263 }
1185 1264
1186 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID, 1265 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
1187 fc_lport_rpn_id_resp, 1266 fc_lport_ns_resp,
1188 lport, lport->e_d_tov)) 1267 lport, 3 * lport->r_a_tov))
1189 fc_lport_error(lport, fp); 1268 fc_lport_error(lport, fp);
1190} 1269}
1191 1270
@@ -1194,8 +1273,8 @@ static struct fc_rport_operations fc_lport_rport_ops = {
1194}; 1273};
1195 1274
1196/** 1275/**
1197 * fc_rport_enter_dns() - Create a rport to the name server 1276 * fc_rport_enter_dns() - Create a fc_rport for the name server
1198 * @lport: Fibre Channel local port requesting a rport for the name server 1277 * @lport: The local port requesting a remote port for the name server
1199 * 1278 *
1200 * Locking Note: The lport lock is expected to be held before calling 1279 * Locking Note: The lport lock is expected to be held before calling
1201 * this routine. 1280 * this routine.
@@ -1224,8 +1303,8 @@ err:
1224} 1303}
1225 1304
1226/** 1305/**
1227 * fc_lport_timeout() - Handler for the retry_work timer. 1306 * fc_lport_timeout() - Handler for the retry_work timer
1228 * @work: The work struct of the fc_lport 1307 * @work: The work struct of the local port
1229 */ 1308 */
1230static void fc_lport_timeout(struct work_struct *work) 1309static void fc_lport_timeout(struct work_struct *work)
1231{ 1310{
@@ -1237,21 +1316,25 @@ static void fc_lport_timeout(struct work_struct *work)
1237 1316
1238 switch (lport->state) { 1317 switch (lport->state) {
1239 case LPORT_ST_DISABLED: 1318 case LPORT_ST_DISABLED:
1319 WARN_ON(1);
1320 break;
1240 case LPORT_ST_READY: 1321 case LPORT_ST_READY:
1241 case LPORT_ST_RESET:
1242 WARN_ON(1); 1322 WARN_ON(1);
1243 break; 1323 break;
1324 case LPORT_ST_RESET:
1325 break;
1244 case LPORT_ST_FLOGI: 1326 case LPORT_ST_FLOGI:
1245 fc_lport_enter_flogi(lport); 1327 fc_lport_enter_flogi(lport);
1246 break; 1328 break;
1247 case LPORT_ST_DNS: 1329 case LPORT_ST_DNS:
1248 fc_lport_enter_dns(lport); 1330 fc_lport_enter_dns(lport);
1249 break; 1331 break;
1250 case LPORT_ST_RPN_ID: 1332 case LPORT_ST_RNN_ID:
1251 fc_lport_enter_rpn_id(lport); 1333 case LPORT_ST_RSNN_NN:
1252 break; 1334 case LPORT_ST_RSPN_ID:
1253 case LPORT_ST_RFT_ID: 1335 case LPORT_ST_RFT_ID:
1254 fc_lport_enter_rft_id(lport); 1336 case LPORT_ST_RFF_ID:
1337 fc_lport_enter_ns(lport, lport->state);
1255 break; 1338 break;
1256 case LPORT_ST_SCR: 1339 case LPORT_ST_SCR:
1257 fc_lport_enter_scr(lport); 1340 fc_lport_enter_scr(lport);
@@ -1266,16 +1349,16 @@ static void fc_lport_timeout(struct work_struct *work)
1266 1349
1267/** 1350/**
1268 * fc_lport_logo_resp() - Handle response to LOGO request 1351 * fc_lport_logo_resp() - Handle response to LOGO request
1269 * @sp: current sequence in LOGO exchange 1352 * @sp: The sequence that the LOGO was on
1270 * @fp: response frame 1353 * @fp: The LOGO frame
1271 * @lp_arg: Fibre Channel lport port instance that sent the LOGO request 1354 * @lp_arg: The lport port that received the LOGO request
1272 * 1355 *
1273 * Locking Note: This function will be called without the lport lock 1356 * Locking Note: This function will be called without the lport lock
1274 * held, but it will lock, call an _enter_* function or fc_lport_error 1357 * held, but it will lock, call an _enter_* function or fc_lport_error()
1275 * and then unlock the lport. 1358 * and then unlock the lport.
1276 */ 1359 */
1277static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 1360void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1278 void *lp_arg) 1361 void *lp_arg)
1279{ 1362{
1280 struct fc_lport *lport = lp_arg; 1363 struct fc_lport *lport = lp_arg;
1281 u8 op; 1364 u8 op;
@@ -1311,10 +1394,11 @@ out:
1311err: 1394err:
1312 mutex_unlock(&lport->lp_mutex); 1395 mutex_unlock(&lport->lp_mutex);
1313} 1396}
1397EXPORT_SYMBOL(fc_lport_logo_resp);
1314 1398
1315/** 1399/**
1316 * fc_rport_enter_logo() - Logout of the fabric 1400 * fc_rport_enter_logo() - Logout of the fabric
1317 * @lport: Fibre Channel local port to be logged out 1401 * @lport: The local port to be logged out
1318 * 1402 *
1319 * Locking Note: The lport lock is expected to be held before calling 1403 * Locking Note: The lport lock is expected to be held before calling
1320 * this routine. 1404 * this routine.
@@ -1328,6 +1412,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
1328 fc_lport_state(lport)); 1412 fc_lport_state(lport));
1329 1413
1330 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1414 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1415 fc_vports_linkchange(lport);
1331 1416
1332 fp = fc_frame_alloc(lport, sizeof(*logo)); 1417 fp = fc_frame_alloc(lport, sizeof(*logo));
1333 if (!fp) { 1418 if (!fp) {
@@ -1336,22 +1421,23 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
1336 } 1421 }
1337 1422
1338 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, 1423 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1339 fc_lport_logo_resp, lport, lport->e_d_tov)) 1424 fc_lport_logo_resp, lport,
1340 fc_lport_error(lport, fp); 1425 2 * lport->r_a_tov))
1426 fc_lport_error(lport, NULL);
1341} 1427}
1342 1428
1343/** 1429/**
1344 * fc_lport_flogi_resp() - Handle response to FLOGI request 1430 * fc_lport_flogi_resp() - Handle response to FLOGI request
1345 * @sp: current sequence in FLOGI exchange 1431 * @sp: The sequence that the FLOGI was on
1346 * @fp: response frame 1432 * @fp: The FLOGI response frame
1347 * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request 1433 * @lp_arg: The lport port that received the FLOGI response
1348 * 1434 *
1349 * Locking Note: This function will be called without the lport lock 1435 * Locking Note: This function will be called without the lport lock
1350 * held, but it will lock, call an _enter_* function or fc_lport_error 1436 * held, but it will lock, call an _enter_* function or fc_lport_error()
1351 * and then unlock the lport. 1437 * and then unlock the lport.
1352 */ 1438 */
1353static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 1439void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1354 void *lp_arg) 1440 void *lp_arg)
1355{ 1441{
1356 struct fc_lport *lport = lp_arg; 1442 struct fc_lport *lport = lp_arg;
1357 struct fc_frame_header *fh; 1443 struct fc_frame_header *fh;
@@ -1385,11 +1471,6 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1385 fh = fc_frame_header_get(fp); 1471 fh = fc_frame_header_get(fp);
1386 did = ntoh24(fh->fh_d_id); 1472 did = ntoh24(fh->fh_d_id);
1387 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { 1473 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1388
1389 printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
1390 did);
1391 fc_host_port_id(lport->host) = did;
1392
1393 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1474 flp = fc_frame_payload_get(fp, sizeof(*flp));
1394 if (flp) { 1475 if (flp) {
1395 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1476 mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1402,12 +1483,18 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1402 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1483 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1403 if (csp_flags & FC_SP_FT_EDTR) 1484 if (csp_flags & FC_SP_FT_EDTR)
1404 e_d_tov /= 1000000; 1485 e_d_tov /= 1000000;
1486
1487 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1488
1405 if ((csp_flags & FC_SP_FT_FPORT) == 0) { 1489 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1406 if (e_d_tov > lport->e_d_tov) 1490 if (e_d_tov > lport->e_d_tov)
1407 lport->e_d_tov = e_d_tov; 1491 lport->e_d_tov = e_d_tov;
1408 lport->r_a_tov = 2 * e_d_tov; 1492 lport->r_a_tov = 2 * e_d_tov;
1409 printk(KERN_INFO "libfc: Port (%6x) entered " 1493 fc_lport_set_port_id(lport, did, fp);
1410 "point to point mode\n", did); 1494 printk(KERN_INFO "host%d: libfc: "
1495 "Port (%6x) entered "
1496 "point-to-point mode\n",
1497 lport->host->host_no, did);
1411 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), 1498 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1412 get_unaligned_be64( 1499 get_unaligned_be64(
1413 &flp->fl_wwpn), 1500 &flp->fl_wwpn),
@@ -1418,6 +1505,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1418 lport->r_a_tov = r_a_tov; 1505 lport->r_a_tov = r_a_tov;
1419 fc_host_fabric_name(lport->host) = 1506 fc_host_fabric_name(lport->host) =
1420 get_unaligned_be64(&flp->fl_wwnn); 1507 get_unaligned_be64(&flp->fl_wwnn);
1508 fc_lport_set_port_id(lport, did, fp);
1421 fc_lport_enter_dns(lport); 1509 fc_lport_enter_dns(lport);
1422 } 1510 }
1423 } 1511 }
@@ -1430,6 +1518,7 @@ out:
1430err: 1518err:
1431 mutex_unlock(&lport->lp_mutex); 1519 mutex_unlock(&lport->lp_mutex);
1432} 1520}
1521EXPORT_SYMBOL(fc_lport_flogi_resp);
1433 1522
1434/** 1523/**
1435 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager 1524 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
@@ -1451,12 +1540,18 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
1451 if (!fp) 1540 if (!fp)
1452 return fc_lport_error(lport, fp); 1541 return fc_lport_error(lport, fp);
1453 1542
1454 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, 1543 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1455 fc_lport_flogi_resp, lport, lport->e_d_tov)) 1544 lport->vport ? ELS_FDISC : ELS_FLOGI,
1456 fc_lport_error(lport, fp); 1545 fc_lport_flogi_resp, lport,
1546 lport->vport ? 2 * lport->r_a_tov :
1547 lport->e_d_tov))
1548 fc_lport_error(lport, NULL);
1457} 1549}
1458 1550
1459/* Configure a fc_lport */ 1551/**
1552 * fc_lport_config() - Configure a fc_lport
1553 * @lport: The local port to be configured
1554 */
1460int fc_lport_config(struct fc_lport *lport) 1555int fc_lport_config(struct fc_lport *lport)
1461{ 1556{
1462 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1557 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
@@ -1471,6 +1566,10 @@ int fc_lport_config(struct fc_lport *lport)
1471} 1566}
1472EXPORT_SYMBOL(fc_lport_config); 1567EXPORT_SYMBOL(fc_lport_config);
1473 1568
1569/**
1570 * fc_lport_init() - Initialize the lport layer for a local port
1571 * @lport: The local port to initialize the exchange layer for
1572 */
1474int fc_lport_init(struct fc_lport *lport) 1573int fc_lport_init(struct fc_lport *lport)
1475{ 1574{
1476 if (!lport->tt.lport_recv) 1575 if (!lport->tt.lport_recv)
@@ -1500,7 +1599,254 @@ int fc_lport_init(struct fc_lport *lport)
1500 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) 1599 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1501 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; 1600 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1502 1601
1503 INIT_LIST_HEAD(&lport->ema_list);
1504 return 0; 1602 return 0;
1505} 1603}
1506EXPORT_SYMBOL(fc_lport_init); 1604EXPORT_SYMBOL(fc_lport_init);
1605
1606/**
1607 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1608 * @sp: The sequence for the FC Passthrough response
1609 * @fp: The response frame
1610 * @info_arg: The BSG info that the response is for
1611 */
1612static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1613 void *info_arg)
1614{
1615 struct fc_bsg_info *info = info_arg;
1616 struct fc_bsg_job *job = info->job;
1617 struct fc_lport *lport = info->lport;
1618 struct fc_frame_header *fh;
1619 size_t len;
1620 void *buf;
1621
1622 if (IS_ERR(fp)) {
1623 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1624 -ECONNABORTED : -ETIMEDOUT;
1625 job->reply_len = sizeof(uint32_t);
1626 job->state_flags |= FC_RQST_STATE_DONE;
1627 job->job_done(job);
1628 kfree(info);
1629 return;
1630 }
1631
1632 mutex_lock(&lport->lp_mutex);
1633 fh = fc_frame_header_get(fp);
1634 len = fr_len(fp) - sizeof(*fh);
1635 buf = fc_frame_payload_get(fp, 0);
1636
1637 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1638 /* Get the response code from the first frame payload */
1639 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1640 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1641 (unsigned short)fc_frame_payload_op(fp);
1642
1643 /* Save the reply status of the job */
1644 job->reply->reply_data.ctels_reply.status =
1645 (cmd == info->rsp_code) ?
1646 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1647 }
1648
1649 job->reply->reply_payload_rcv_len +=
1650 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1651 &info->offset, KM_BIO_SRC_IRQ, NULL);
1652
1653 if (fr_eof(fp) == FC_EOF_T &&
1654 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1655 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1656 if (job->reply->reply_payload_rcv_len >
1657 job->reply_payload.payload_len)
1658 job->reply->reply_payload_rcv_len =
1659 job->reply_payload.payload_len;
1660 job->reply->result = 0;
1661 job->state_flags |= FC_RQST_STATE_DONE;
1662 job->job_done(job);
1663 kfree(info);
1664 }
1665 fc_frame_free(fp);
1666 mutex_unlock(&lport->lp_mutex);
1667}
1668
1669/**
1670 * fc_lport_els_request() - Send ELS passthrough request
1671 * @job: The BSG Passthrough job
1672 * @lport: The local port sending the request
1673 * @did: The destination port id
1674 *
1675 * Locking Note: The lport lock is expected to be held before calling
1676 * this routine.
1677 */
1678static int fc_lport_els_request(struct fc_bsg_job *job,
1679 struct fc_lport *lport,
1680 u32 did, u32 tov)
1681{
1682 struct fc_bsg_info *info;
1683 struct fc_frame *fp;
1684 struct fc_frame_header *fh;
1685 char *pp;
1686 int len;
1687
1688 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
1689 if (!fp)
1690 return -ENOMEM;
1691
1692 len = job->request_payload.payload_len;
1693 pp = fc_frame_payload_get(fp, len);
1694
1695 sg_copy_to_buffer(job->request_payload.sg_list,
1696 job->request_payload.sg_cnt,
1697 pp, len);
1698
1699 fh = fc_frame_header_get(fp);
1700 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1701 hton24(fh->fh_d_id, did);
1702 hton24(fh->fh_s_id, fc_host_port_id(lport->host));
1703 fh->fh_type = FC_TYPE_ELS;
1704 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1705 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1706 fh->fh_cs_ctl = 0;
1707 fh->fh_df_ctl = 0;
1708 fh->fh_parm_offset = 0;
1709
1710 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1711 if (!info) {
1712 fc_frame_free(fp);
1713 return -ENOMEM;
1714 }
1715
1716 info->job = job;
1717 info->lport = lport;
1718 info->rsp_code = ELS_LS_ACC;
1719 info->nents = job->reply_payload.sg_cnt;
1720 info->sg = job->reply_payload.sg_list;
1721
1722 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1723 NULL, info, tov))
1724 return -ECOMM;
1725 return 0;
1726}
1727
1728/**
1729 * fc_lport_ct_request() - Send CT Passthrough request
1730 * @job: The BSG Passthrough job
1731 * @lport: The local port sending the request
1732 * @did: The destination FC-ID
1733 * @tov: The timeout period to wait for the response
1734 *
1735 * Locking Note: The lport lock is expected to be held before calling
1736 * this routine.
1737 */
1738static int fc_lport_ct_request(struct fc_bsg_job *job,
1739 struct fc_lport *lport, u32 did, u32 tov)
1740{
1741 struct fc_bsg_info *info;
1742 struct fc_frame *fp;
1743 struct fc_frame_header *fh;
1744 struct fc_ct_req *ct;
1745 size_t len;
1746
1747 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1748 job->request_payload.payload_len);
1749 if (!fp)
1750 return -ENOMEM;
1751
1752 len = job->request_payload.payload_len;
1753 ct = fc_frame_payload_get(fp, len);
1754
1755 sg_copy_to_buffer(job->request_payload.sg_list,
1756 job->request_payload.sg_cnt,
1757 ct, len);
1758
1759 fh = fc_frame_header_get(fp);
1760 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
1761 hton24(fh->fh_d_id, did);
1762 hton24(fh->fh_s_id, fc_host_port_id(lport->host));
1763 fh->fh_type = FC_TYPE_CT;
1764 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1765 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1766 fh->fh_cs_ctl = 0;
1767 fh->fh_df_ctl = 0;
1768 fh->fh_parm_offset = 0;
1769
1770 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1771 if (!info) {
1772 fc_frame_free(fp);
1773 return -ENOMEM;
1774 }
1775
1776 info->job = job;
1777 info->lport = lport;
1778 info->rsp_code = FC_FS_ACC;
1779 info->nents = job->reply_payload.sg_cnt;
1780 info->sg = job->reply_payload.sg_list;
1781
1782 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1783 NULL, info, tov))
1784 return -ECOMM;
1785 return 0;
1786}
1787
1788/**
1789 * fc_lport_bsg_request() - The common entry point for sending
1790 * FC Passthrough requests
1791 * @job: The BSG passthrough job
1792 */
1793int fc_lport_bsg_request(struct fc_bsg_job *job)
1794{
1795 struct request *rsp = job->req->next_rq;
1796 struct Scsi_Host *shost = job->shost;
1797 struct fc_lport *lport = shost_priv(shost);
1798 struct fc_rport *rport;
1799 struct fc_rport_priv *rdata;
1800 int rc = -EINVAL;
1801 u32 did;
1802
1803 job->reply->reply_payload_rcv_len = 0;
1804 if (rsp)
1805 rsp->resid_len = job->reply_payload.payload_len;
1806
1807 mutex_lock(&lport->lp_mutex);
1808
1809 switch (job->request->msgcode) {
1810 case FC_BSG_RPT_ELS:
1811 rport = job->rport;
1812 if (!rport)
1813 break;
1814
1815 rdata = rport->dd_data;
1816 rc = fc_lport_els_request(job, lport, rport->port_id,
1817 rdata->e_d_tov);
1818 break;
1819
1820 case FC_BSG_RPT_CT:
1821 rport = job->rport;
1822 if (!rport)
1823 break;
1824
1825 rdata = rport->dd_data;
1826 rc = fc_lport_ct_request(job, lport, rport->port_id,
1827 rdata->e_d_tov);
1828 break;
1829
1830 case FC_BSG_HST_CT:
1831 did = ntoh24(job->request->rqst_data.h_ct.port_id);
1832 if (did == FC_FID_DIR_SERV)
1833 rdata = lport->dns_rdata;
1834 else
1835 rdata = lport->tt.rport_lookup(lport, did);
1836
1837 if (!rdata)
1838 break;
1839
1840 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
1841 break;
1842
1843 case FC_BSG_HST_ELS_NOLOGIN:
1844 did = ntoh24(job->request->rqst_data.h_els.port_id);
1845 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
1846 break;
1847 }
1848
1849 mutex_unlock(&lport->lp_mutex);
1850 return rc;
1851}
1852EXPORT_SYMBOL(fc_lport_bsg_request);
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
new file mode 100644
index 000000000000..c68f6c7341c2
--- /dev/null
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright(c) 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * NPIV VN_Port helper functions for libfc
22 */
23
24#include <scsi/libfc.h>
25
26/**
27 * fc_vport_create() - Create a new NPIV vport instance
28 * @vport: fc_vport structure from scsi_transport_fc
29 * @privsize: driver private data size to allocate along with the Scsi_Host
30 */
31
32struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
33{
34 struct Scsi_Host *shost = vport_to_shost(vport);
35 struct fc_lport *n_port = shost_priv(shost);
36 struct fc_lport *vn_port;
37
38 vn_port = libfc_host_alloc(shost->hostt, privsize);
39 if (!vn_port)
40 goto err_out;
41 if (fc_exch_mgr_list_clone(n_port, vn_port))
42 goto err_put;
43
44 vn_port->vport = vport;
45 vport->dd_data = vn_port;
46
47 mutex_lock(&n_port->lp_mutex);
48 list_add_tail(&vn_port->list, &n_port->vports);
49 mutex_unlock(&n_port->lp_mutex);
50
51 return vn_port;
52
53err_put:
54 scsi_host_put(vn_port->host);
55err_out:
56 return NULL;
57}
58EXPORT_SYMBOL(libfc_vport_create);
59
60/**
61 * fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID
62 * @n_port: Top level N_Port which may have multiple NPIV VN_Ports
63 * @port_id: Fabric ID to find a match for
64 *
65 * Returns: matching lport pointer or NULL if there is no match
66 */
67struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
68{
69 struct fc_lport *lport = NULL;
70 struct fc_lport *vn_port;
71
72 if (fc_host_port_id(n_port->host) == port_id)
73 return n_port;
74
75 mutex_lock(&n_port->lp_mutex);
76 list_for_each_entry(vn_port, &n_port->vports, list) {
77 if (fc_host_port_id(vn_port->host) == port_id) {
78 lport = vn_port;
79 break;
80 }
81 }
82 mutex_unlock(&n_port->lp_mutex);
83
84 return lport;
85}
86
87/*
88 * When setting the link state of vports during an lport state change, it's
89 * necessary to hold the lp_mutex of both the N_Port and the VN_Port.
90 * This tells the lockdep engine to treat the nested locking of the VN_Port
91 * as a different lock class.
92 */
93enum libfc_lport_mutex_class {
94 LPORT_MUTEX_NORMAL = 0,
95 LPORT_MUTEX_VN_PORT = 1,
96};
97
98/**
99 * __fc_vport_setlink() - update link and status on a VN_Port
100 * @n_port: parent N_Port
101 * @vn_port: VN_Port to update
102 *
103 * Locking: must be called with both the N_Port and VN_Port lp_mutex held
104 */
105static void __fc_vport_setlink(struct fc_lport *n_port,
106 struct fc_lport *vn_port)
107{
108 struct fc_vport *vport = vn_port->vport;
109
110 if (vn_port->state == LPORT_ST_DISABLED)
111 return;
112
113 if (n_port->state == LPORT_ST_READY) {
114 if (n_port->npiv_enabled) {
115 fc_vport_set_state(vport, FC_VPORT_INITIALIZING);
116 __fc_linkup(vn_port);
117 } else {
118 fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
119 __fc_linkdown(vn_port);
120 }
121 } else {
122 fc_vport_set_state(vport, FC_VPORT_LINKDOWN);
123 __fc_linkdown(vn_port);
124 }
125}
126
127/**
128 * fc_vport_setlink() - update link and status on a VN_Port
129 * @vn_port: virtual port to update
130 */
131void fc_vport_setlink(struct fc_lport *vn_port)
132{
133 struct fc_vport *vport = vn_port->vport;
134 struct Scsi_Host *shost = vport_to_shost(vport);
135 struct fc_lport *n_port = shost_priv(shost);
136
137 mutex_lock(&n_port->lp_mutex);
138 mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
139 __fc_vport_setlink(n_port, vn_port);
140 mutex_unlock(&vn_port->lp_mutex);
141 mutex_unlock(&n_port->lp_mutex);
142}
143EXPORT_SYMBOL(fc_vport_setlink);
144
145/**
146 * fc_vports_linkchange() - change the link state of all vports
147 * @n_port: Parent N_Port that has changed state
148 *
149 * Locking: called with the n_port lp_mutex held
150 */
151void fc_vports_linkchange(struct fc_lport *n_port)
152{
153 struct fc_lport *vn_port;
154
155 list_for_each_entry(vn_port, &n_port->vports, list) {
156 mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
157 __fc_vport_setlink(n_port, vn_port);
158 mutex_unlock(&vn_port->lp_mutex);
159 }
160}
161
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 03ea6748e7ee..b37d0ff28b35 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -47,6 +47,7 @@
47#include <linux/kernel.h> 47#include <linux/kernel.h>
48#include <linux/spinlock.h> 48#include <linux/spinlock.h>
49#include <linux/interrupt.h> 49#include <linux/interrupt.h>
50#include <linux/slab.h>
50#include <linux/rcupdate.h> 51#include <linux/rcupdate.h>
51#include <linux/timer.h> 52#include <linux/timer.h>
52#include <linux/workqueue.h> 53#include <linux/workqueue.h>
@@ -55,6 +56,8 @@
55#include <scsi/libfc.h> 56#include <scsi/libfc.h>
56#include <scsi/fc_encode.h> 57#include <scsi/fc_encode.h>
57 58
59#include "fc_libfc.h"
60
58struct workqueue_struct *rport_event_queue; 61struct workqueue_struct *rport_event_queue;
59 62
60static void fc_rport_enter_plogi(struct fc_rport_priv *); 63static void fc_rport_enter_plogi(struct fc_rport_priv *);
@@ -86,12 +89,13 @@ static const char *fc_rport_state_names[] = {
86 [RPORT_ST_LOGO] = "LOGO", 89 [RPORT_ST_LOGO] = "LOGO",
87 [RPORT_ST_ADISC] = "ADISC", 90 [RPORT_ST_ADISC] = "ADISC",
88 [RPORT_ST_DELETE] = "Delete", 91 [RPORT_ST_DELETE] = "Delete",
92 [RPORT_ST_RESTART] = "Restart",
89}; 93};
90 94
91/** 95/**
92 * fc_rport_lookup() - lookup a remote port by port_id 96 * fc_rport_lookup() - Lookup a remote port by port_id
93 * @lport: Fibre Channel host port instance 97 * @lport: The local port to lookup the remote port on
94 * @port_id: remote port port_id to match 98 * @port_id: The remote port ID to look up
95 */ 99 */
96static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, 100static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
97 u32 port_id) 101 u32 port_id)
@@ -99,16 +103,17 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
99 struct fc_rport_priv *rdata; 103 struct fc_rport_priv *rdata;
100 104
101 list_for_each_entry(rdata, &lport->disc.rports, peers) 105 list_for_each_entry(rdata, &lport->disc.rports, peers)
102 if (rdata->ids.port_id == port_id && 106 if (rdata->ids.port_id == port_id)
103 rdata->rp_state != RPORT_ST_DELETE)
104 return rdata; 107 return rdata;
105 return NULL; 108 return NULL;
106} 109}
107 110
108/** 111/**
109 * fc_rport_create() - Create a new remote port 112 * fc_rport_create() - Create a new remote port
110 * @lport: The local port that the new remote port is for 113 * @lport: The local port this remote port will be associated with
111 * @port_id: The port ID for the new remote port 114 * @ids: The identifiers for the new remote port
115 *
116 * The remote port will start in the INIT state.
112 * 117 *
113 * Locking note: must be called with the disc_mutex held. 118 * Locking note: must be called with the disc_mutex held.
114 */ 119 */
@@ -147,8 +152,8 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
147} 152}
148 153
149/** 154/**
150 * fc_rport_destroy() - free a remote port after last reference is released. 155 * fc_rport_destroy() - Free a remote port after last reference is released
151 * @kref: pointer to kref inside struct fc_rport_priv 156 * @kref: The remote port's kref
152 */ 157 */
153static void fc_rport_destroy(struct kref *kref) 158static void fc_rport_destroy(struct kref *kref)
154{ 159{
@@ -159,8 +164,8 @@ static void fc_rport_destroy(struct kref *kref)
159} 164}
160 165
161/** 166/**
162 * fc_rport_state() - return a string for the state the rport is in 167 * fc_rport_state() - Return a string identifying the remote port's state
163 * @rdata: remote port private data 168 * @rdata: The remote port
164 */ 169 */
165static const char *fc_rport_state(struct fc_rport_priv *rdata) 170static const char *fc_rport_state(struct fc_rport_priv *rdata)
166{ 171{
@@ -173,9 +178,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata)
173} 178}
174 179
175/** 180/**
176 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds. 181 * fc_set_rport_loss_tmo() - Set the remote port loss timeout
177 * @rport: Pointer to Fibre Channel remote port structure 182 * @rport: The remote port that gets a new timeout value
178 * @timeout: timeout in seconds 183 * @timeout: The new timeout value (in seconds)
179 */ 184 */
180void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) 185void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
181{ 186{
@@ -187,9 +192,11 @@ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
187EXPORT_SYMBOL(fc_set_rport_loss_tmo); 192EXPORT_SYMBOL(fc_set_rport_loss_tmo);
188 193
189/** 194/**
190 * fc_plogi_get_maxframe() - Get max payload from the common service parameters 195 * fc_plogi_get_maxframe() - Get the maximum payload from the common service
191 * @flp: FLOGI payload structure 196 * parameters in a FLOGI frame
192 * @maxval: upper limit, may be less than what is in the service parameters 197 * @flp: The FLOGI payload
198 * @maxval: The maximum frame size upper limit; this may be less than what
199 * is in the service parameters
193 */ 200 */
194static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, 201static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
195 unsigned int maxval) 202 unsigned int maxval)
@@ -210,9 +217,9 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
210} 217}
211 218
212/** 219/**
213 * fc_rport_state_enter() - Change the rport's state 220 * fc_rport_state_enter() - Change the state of a remote port
214 * @rdata: The rport whose state should change 221 * @rdata: The remote port whose state should change
215 * @new: The new state of the rport 222 * @new: The new state
216 * 223 *
217 * Locking Note: Called with the rport lock held 224 * Locking Note: Called with the rport lock held
218 */ 225 */
@@ -224,17 +231,22 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata,
224 rdata->rp_state = new; 231 rdata->rp_state = new;
225} 232}
226 233
234/**
235 * fc_rport_work() - Handler for remote port events in the rport_event_queue
236 * @work: Handle to the remote port being dequeued
237 */
227static void fc_rport_work(struct work_struct *work) 238static void fc_rport_work(struct work_struct *work)
228{ 239{
229 u32 port_id; 240 u32 port_id;
230 struct fc_rport_priv *rdata = 241 struct fc_rport_priv *rdata =
231 container_of(work, struct fc_rport_priv, event_work); 242 container_of(work, struct fc_rport_priv, event_work);
232 struct fc_rport_libfc_priv *rp; 243 struct fc_rport_libfc_priv *rpriv;
233 enum fc_rport_event event; 244 enum fc_rport_event event;
234 struct fc_lport *lport = rdata->local_port; 245 struct fc_lport *lport = rdata->local_port;
235 struct fc_rport_operations *rport_ops; 246 struct fc_rport_operations *rport_ops;
236 struct fc_rport_identifiers ids; 247 struct fc_rport_identifiers ids;
237 struct fc_rport *rport; 248 struct fc_rport *rport;
249 int restart = 0;
238 250
239 mutex_lock(&rdata->rp_mutex); 251 mutex_lock(&rdata->rp_mutex);
240 event = rdata->event; 252 event = rdata->event;
@@ -265,12 +277,12 @@ static void fc_rport_work(struct work_struct *work)
265 rport->maxframe_size = rdata->maxframe_size; 277 rport->maxframe_size = rdata->maxframe_size;
266 rport->supported_classes = rdata->supported_classes; 278 rport->supported_classes = rdata->supported_classes;
267 279
268 rp = rport->dd_data; 280 rpriv = rport->dd_data;
269 rp->local_port = lport; 281 rpriv->local_port = lport;
270 rp->rp_state = rdata->rp_state; 282 rpriv->rp_state = rdata->rp_state;
271 rp->flags = rdata->flags; 283 rpriv->flags = rdata->flags;
272 rp->e_d_tov = rdata->e_d_tov; 284 rpriv->e_d_tov = rdata->e_d_tov;
273 rp->r_a_tov = rdata->r_a_tov; 285 rpriv->r_a_tov = rdata->r_a_tov;
274 mutex_unlock(&rdata->rp_mutex); 286 mutex_unlock(&rdata->rp_mutex);
275 287
276 if (rport_ops && rport_ops->event_callback) { 288 if (rport_ops && rport_ops->event_callback) {
@@ -287,8 +299,20 @@ static void fc_rport_work(struct work_struct *work)
287 mutex_unlock(&rdata->rp_mutex); 299 mutex_unlock(&rdata->rp_mutex);
288 300
289 if (port_id != FC_FID_DIR_SERV) { 301 if (port_id != FC_FID_DIR_SERV) {
302 /*
303 * We must drop rp_mutex before taking disc_mutex.
304 * Re-evaluate state to allow for restart.
305 * A transition to RESTART state must only happen
306 * while disc_mutex is held and rdata is on the list.
307 */
290 mutex_lock(&lport->disc.disc_mutex); 308 mutex_lock(&lport->disc.disc_mutex);
291 list_del(&rdata->peers); 309 mutex_lock(&rdata->rp_mutex);
310 if (rdata->rp_state == RPORT_ST_RESTART)
311 restart = 1;
312 else
313 list_del(&rdata->peers);
314 rdata->event = RPORT_EV_NONE;
315 mutex_unlock(&rdata->rp_mutex);
292 mutex_unlock(&lport->disc.disc_mutex); 316 mutex_unlock(&lport->disc.disc_mutex);
293 } 317 }
294 318
@@ -305,14 +329,20 @@ static void fc_rport_work(struct work_struct *work)
305 lport->tt.exch_mgr_reset(lport, port_id, 0); 329 lport->tt.exch_mgr_reset(lport, port_id, 0);
306 330
307 if (rport) { 331 if (rport) {
308 rp = rport->dd_data; 332 rpriv = rport->dd_data;
309 rp->rp_state = RPORT_ST_DELETE; 333 rpriv->rp_state = RPORT_ST_DELETE;
310 mutex_lock(&rdata->rp_mutex); 334 mutex_lock(&rdata->rp_mutex);
311 rdata->rport = NULL; 335 rdata->rport = NULL;
312 mutex_unlock(&rdata->rp_mutex); 336 mutex_unlock(&rdata->rp_mutex);
313 fc_remote_port_delete(rport); 337 fc_remote_port_delete(rport);
314 } 338 }
315 kref_put(&rdata->kref, lport->tt.rport_destroy); 339 if (restart) {
340 mutex_lock(&rdata->rp_mutex);
341 FC_RPORT_DBG(rdata, "work restart\n");
342 fc_rport_enter_plogi(rdata);
343 mutex_unlock(&rdata->rp_mutex);
344 } else
345 kref_put(&rdata->kref, lport->tt.rport_destroy);
316 break; 346 break;
317 347
318 default: 348 default:
@@ -323,7 +353,7 @@ static void fc_rport_work(struct work_struct *work)
323 353
324/** 354/**
325 * fc_rport_login() - Start the remote port login state machine 355 * fc_rport_login() - Start the remote port login state machine
326 * @rdata: private remote port 356 * @rdata: The remote port to be logged in to
327 * 357 *
328 * Locking Note: Called without the rport lock held. This 358 * Locking Note: Called without the rport lock held. This
329 * function will hold the rport lock, call an _enter_* 359 * function will hold the rport lock, call an _enter_*
@@ -342,6 +372,12 @@ int fc_rport_login(struct fc_rport_priv *rdata)
342 FC_RPORT_DBG(rdata, "ADISC port\n"); 372 FC_RPORT_DBG(rdata, "ADISC port\n");
343 fc_rport_enter_adisc(rdata); 373 fc_rport_enter_adisc(rdata);
344 break; 374 break;
375 case RPORT_ST_RESTART:
376 break;
377 case RPORT_ST_DELETE:
378 FC_RPORT_DBG(rdata, "Restart deleted port\n");
379 fc_rport_state_enter(rdata, RPORT_ST_RESTART);
380 break;
345 default: 381 default:
346 FC_RPORT_DBG(rdata, "Login to port\n"); 382 FC_RPORT_DBG(rdata, "Login to port\n");
347 fc_rport_enter_plogi(rdata); 383 fc_rport_enter_plogi(rdata);
@@ -353,9 +389,9 @@ int fc_rport_login(struct fc_rport_priv *rdata)
353} 389}
354 390
355/** 391/**
356 * fc_rport_enter_delete() - schedule a remote port to be deleted. 392 * fc_rport_enter_delete() - Schedule a remote port to be deleted
357 * @rdata: private remote port 393 * @rdata: The remote port to be deleted
358 * @event: event to report as the reason for deletion 394 * @event: The event to report as the reason for deletion
359 * 395 *
360 * Locking Note: Called with the rport lock held. 396 * Locking Note: Called with the rport lock held.
361 * 397 *
@@ -382,8 +418,8 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
382} 418}
383 419
384/** 420/**
385 * fc_rport_logoff() - Logoff and remove an rport 421 * fc_rport_logoff() - Logoff and remove a remote port
386 * @rdata: private remote port 422 * @rdata: The remote port to be logged off of
387 * 423 *
388 * Locking Note: Called without the rport lock held. This 424 * Locking Note: Called without the rport lock held. This
389 * function will hold the rport lock, call an _enter_* 425 * function will hold the rport lock, call an _enter_*
@@ -397,26 +433,27 @@ int fc_rport_logoff(struct fc_rport_priv *rdata)
397 433
398 if (rdata->rp_state == RPORT_ST_DELETE) { 434 if (rdata->rp_state == RPORT_ST_DELETE) {
399 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); 435 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
400 mutex_unlock(&rdata->rp_mutex);
401 goto out; 436 goto out;
402 } 437 }
403 438
404 fc_rport_enter_logo(rdata); 439 if (rdata->rp_state == RPORT_ST_RESTART)
440 FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n");
441 else
442 fc_rport_enter_logo(rdata);
405 443
406 /* 444 /*
407 * Change the state to Delete so that we discard 445 * Change the state to Delete so that we discard
408 * the response. 446 * the response.
409 */ 447 */
410 fc_rport_enter_delete(rdata, RPORT_EV_STOP); 448 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
411 mutex_unlock(&rdata->rp_mutex);
412
413out: 449out:
450 mutex_unlock(&rdata->rp_mutex);
414 return 0; 451 return 0;
415} 452}
416 453
417/** 454/**
418 * fc_rport_enter_ready() - The rport is ready 455 * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
419 * @rdata: private remote port 456 * @rdata: The remote port that is ready
420 * 457 *
421 * Locking Note: The rport lock is expected to be held before calling 458 * Locking Note: The rport lock is expected to be held before calling
422 * this routine. 459 * this routine.
@@ -433,8 +470,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
433} 470}
434 471
435/** 472/**
436 * fc_rport_timeout() - Handler for the retry_work timer. 473 * fc_rport_timeout() - Handler for the retry_work timer
437 * @work: The work struct of the fc_rport_priv 474 * @work: Handle to the remote port that has timed out
438 * 475 *
439 * Locking Note: Called without the rport lock held. This 476 * Locking Note: Called without the rport lock held. This
440 * function will hold the rport lock, call an _enter_* 477 * function will hold the rport lock, call an _enter_*
@@ -466,6 +503,7 @@ static void fc_rport_timeout(struct work_struct *work)
466 case RPORT_ST_READY: 503 case RPORT_ST_READY:
467 case RPORT_ST_INIT: 504 case RPORT_ST_INIT:
468 case RPORT_ST_DELETE: 505 case RPORT_ST_DELETE:
506 case RPORT_ST_RESTART:
469 break; 507 break;
470 } 508 }
471 509
@@ -474,8 +512,8 @@ static void fc_rport_timeout(struct work_struct *work)
474 512
475/** 513/**
476 * fc_rport_error() - Error handler, called once retries have been exhausted 514 * fc_rport_error() - Error handler, called once retries have been exhausted
477 * @rdata: private remote port 515 * @rdata: The remote port the error is happened on
478 * @fp: The frame pointer 516 * @fp: The error code encapsulated in a frame pointer
479 * 517 *
480 * Locking Note: The rport lock is expected to be held before 518 * Locking Note: The rport lock is expected to be held before
481 * calling this routine 519 * calling this routine
@@ -499,6 +537,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
499 fc_rport_enter_logo(rdata); 537 fc_rport_enter_logo(rdata);
500 break; 538 break;
501 case RPORT_ST_DELETE: 539 case RPORT_ST_DELETE:
540 case RPORT_ST_RESTART:
502 case RPORT_ST_READY: 541 case RPORT_ST_READY:
503 case RPORT_ST_INIT: 542 case RPORT_ST_INIT:
504 break; 543 break;
@@ -506,9 +545,9 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
506} 545}
507 546
508/** 547/**
509 * fc_rport_error_retry() - Error handler when retries are desired 548 * fc_rport_error_retry() - Handler for remote port state retries
510 * @rdata: private remote port data 549 * @rdata: The remote port whose state is to be retried
511 * @fp: The frame pointer 550 * @fp: The error code encapsulated in a frame pointer
512 * 551 *
513 * If the error was an exchange timeout retry immediately, 552 * If the error was an exchange timeout retry immediately,
514 * otherwise wait for E_D_TOV. 553 * otherwise wait for E_D_TOV.
@@ -540,10 +579,10 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
540} 579}
541 580
542/** 581/**
543 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response 582 * fc_rport_plogi_recv_resp() - Handler for ELS PLOGI responses
544 * @sp: current sequence in the PLOGI exchange 583 * @sp: The sequence the PLOGI is on
545 * @fp: response frame 584 * @fp: The PLOGI response frame
546 * @rdata_arg: private remote port data 585 * @rdata_arg: The remote port that sent the PLOGI response
547 * 586 *
548 * Locking Note: This function will be called without the rport lock 587 * Locking Note: This function will be called without the rport lock
549 * held, but it will lock, call an _enter_* function or fc_rport_error 588 * held, but it will lock, call an _enter_* function or fc_rport_error
@@ -585,7 +624,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
585 624
586 tov = ntohl(plp->fl_csp.sp_e_d_tov); 625 tov = ntohl(plp->fl_csp.sp_e_d_tov);
587 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) 626 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
588 tov /= 1000; 627 tov /= 1000000;
589 if (tov > rdata->e_d_tov) 628 if (tov > rdata->e_d_tov)
590 rdata->e_d_tov = tov; 629 rdata->e_d_tov = tov;
591 csp_seq = ntohs(plp->fl_csp.sp_tot_seq); 630 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
@@ -606,8 +645,8 @@ err:
606} 645}
607 646
608/** 647/**
609 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer 648 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request
610 * @rdata: private remote port data 649 * @rdata: The remote port to send a PLOGI to
611 * 650 *
612 * Locking Note: The rport lock is expected to be held before calling 651 * Locking Note: The rport lock is expected to be held before calling
613 * this routine. 652 * this routine.
@@ -631,17 +670,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
631 rdata->e_d_tov = lport->e_d_tov; 670 rdata->e_d_tov = lport->e_d_tov;
632 671
633 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, 672 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
634 fc_rport_plogi_resp, rdata, lport->e_d_tov)) 673 fc_rport_plogi_resp, rdata,
635 fc_rport_error_retry(rdata, fp); 674 2 * lport->r_a_tov))
675 fc_rport_error_retry(rdata, NULL);
636 else 676 else
637 kref_get(&rdata->kref); 677 kref_get(&rdata->kref);
638} 678}
639 679
640/** 680/**
641 * fc_rport_prli_resp() - Process Login (PRLI) response handler 681 * fc_rport_prli_resp() - Process Login (PRLI) response handler
642 * @sp: current sequence in the PRLI exchange 682 * @sp: The sequence the PRLI response was on
643 * @fp: response frame 683 * @fp: The PRLI response frame
644 * @rdata_arg: private remote port data 684 * @rdata_arg: The remote port that sent the PRLI response
645 * 685 *
646 * Locking Note: This function will be called without the rport lock 686 * Locking Note: This function will be called without the rport lock
647 * held, but it will lock, call an _enter_* function or fc_rport_error 687 * held, but it will lock, call an _enter_* function or fc_rport_error
@@ -710,10 +750,10 @@ err:
710} 750}
711 751
712/** 752/**
713 * fc_rport_logo_resp() - Logout (LOGO) response handler 753 * fc_rport_logo_resp() - Handler for logout (LOGO) responses
714 * @sp: current sequence in the LOGO exchange 754 * @sp: The sequence the LOGO was on
715 * @fp: response frame 755 * @fp: The LOGO response frame
716 * @rdata_arg: private remote port data 756 * @rdata_arg: The remote port that sent the LOGO response
717 * 757 *
718 * Locking Note: This function will be called without the rport lock 758 * Locking Note: This function will be called without the rport lock
719 * held, but it will lock, call an _enter_* function or fc_rport_error 759 * held, but it will lock, call an _enter_* function or fc_rport_error
@@ -756,8 +796,8 @@ err:
756} 796}
757 797
758/** 798/**
759 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer 799 * fc_rport_enter_prli() - Send Process Login (PRLI) request
760 * @rdata: private remote port data 800 * @rdata: The remote port to send the PRLI request to
761 * 801 *
762 * Locking Note: The rport lock is expected to be held before calling 802 * Locking Note: The rport lock is expected to be held before calling
763 * this routine. 803 * this routine.
@@ -792,17 +832,18 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
792 } 832 }
793 833
794 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, 834 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
795 fc_rport_prli_resp, rdata, lport->e_d_tov)) 835 fc_rport_prli_resp, rdata,
796 fc_rport_error_retry(rdata, fp); 836 2 * lport->r_a_tov))
837 fc_rport_error_retry(rdata, NULL);
797 else 838 else
798 kref_get(&rdata->kref); 839 kref_get(&rdata->kref);
799} 840}
800 841
801/** 842/**
802 * fc_rport_els_rtv_resp() - Request Timeout Value response handler 843 * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
803 * @sp: current sequence in the RTV exchange 844 * @sp: The sequence the RTV was on
804 * @fp: response frame 845 * @fp: The RTV response frame
805 * @rdata_arg: private remote port data 846 * @rdata_arg: The remote port that sent the RTV response
806 * 847 *
807 * Many targets don't seem to support this. 848 * Many targets don't seem to support this.
808 * 849 *
@@ -865,8 +906,8 @@ err:
865} 906}
866 907
867/** 908/**
868 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer 909 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
869 * @rdata: private remote port data 910 * @rdata: The remote port to send the RTV request to
870 * 911 *
871 * Locking Note: The rport lock is expected to be held before calling 912 * Locking Note: The rport lock is expected to be held before calling
872 * this routine. 913 * this routine.
@@ -888,15 +929,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
888 } 929 }
889 930
890 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, 931 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
891 fc_rport_rtv_resp, rdata, lport->e_d_tov)) 932 fc_rport_rtv_resp, rdata,
892 fc_rport_error_retry(rdata, fp); 933 2 * lport->r_a_tov))
934 fc_rport_error_retry(rdata, NULL);
893 else 935 else
894 kref_get(&rdata->kref); 936 kref_get(&rdata->kref);
895} 937}
896 938
897/** 939/**
898 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer 940 * fc_rport_enter_logo() - Send a logout (LOGO) request
899 * @rdata: private remote port data 941 * @rdata: The remote port to send the LOGO request to
900 * 942 *
901 * Locking Note: The rport lock is expected to be held before calling 943 * Locking Note: The rport lock is expected to be held before calling
902 * this routine. 944 * this routine.
@@ -918,24 +960,25 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
918 } 960 }
919 961
920 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, 962 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
921 fc_rport_logo_resp, rdata, lport->e_d_tov)) 963 fc_rport_logo_resp, rdata,
922 fc_rport_error_retry(rdata, fp); 964 2 * lport->r_a_tov))
965 fc_rport_error_retry(rdata, NULL);
923 else 966 else
924 kref_get(&rdata->kref); 967 kref_get(&rdata->kref);
925} 968}
926 969
927/** 970/**
928 * fc_rport_els_adisc_resp() - Address Discovery response handler 971 * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses
929 * @sp: current sequence in the ADISC exchange 972 * @sp: The sequence the ADISC response was on
930 * @fp: response frame 973 * @fp: The ADISC response frame
931 * @rdata_arg: remote port private. 974 * @rdata_arg: The remote port that sent the ADISC response
932 * 975 *
933 * Locking Note: This function will be called without the rport lock 976 * Locking Note: This function will be called without the rport lock
934 * held, but it will lock, call an _enter_* function or fc_rport_error 977 * held, but it will lock, call an _enter_* function or fc_rport_error
935 * and then unlock the rport. 978 * and then unlock the rport.
936 */ 979 */
937static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, 980static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
938 void *rdata_arg) 981 void *rdata_arg)
939{ 982{
940 struct fc_rport_priv *rdata = rdata_arg; 983 struct fc_rport_priv *rdata = rdata_arg;
941 struct fc_els_adisc *adisc; 984 struct fc_els_adisc *adisc;
@@ -983,8 +1026,8 @@ err:
983} 1026}
984 1027
985/** 1028/**
986 * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer 1029 * fc_rport_enter_adisc() - Send Address Discover (ADISC) request
987 * @rdata: remote port private data 1030 * @rdata: The remote port to send the ADISC request to
988 * 1031 *
989 * Locking Note: The rport lock is expected to be held before calling 1032 * Locking Note: The rport lock is expected to be held before calling
990 * this routine. 1033 * this routine.
@@ -1005,17 +1048,18 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
1005 return; 1048 return;
1006 } 1049 }
1007 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, 1050 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1008 fc_rport_adisc_resp, rdata, lport->e_d_tov)) 1051 fc_rport_adisc_resp, rdata,
1009 fc_rport_error_retry(rdata, fp); 1052 2 * lport->r_a_tov))
1053 fc_rport_error_retry(rdata, NULL);
1010 else 1054 else
1011 kref_get(&rdata->kref); 1055 kref_get(&rdata->kref);
1012} 1056}
1013 1057
1014/** 1058/**
1015 * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request 1059 * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
1016 * @rdata: remote port private 1060 * @rdata: The remote port that sent the ADISC request
1017 * @sp: current sequence in the ADISC exchange 1061 * @sp: The sequence the ADISC request was on
1018 * @in_fp: ADISC request frame 1062 * @in_fp: The ADISC request frame
1019 * 1063 *
1020 * Locking Note: Called with the lport and rport locks held. 1064 * Locking Note: Called with the lport and rport locks held.
1021 */ 1065 */
@@ -1056,10 +1100,82 @@ drop:
1056} 1100}
1057 1101
1058/** 1102/**
1059 * fc_rport_recv_els_req() - handle a validated ELS request. 1103 * fc_rport_recv_rls_req() - Handle received Read Link Status request
1060 * @lport: Fibre Channel local port 1104 * @rdata: The remote port that sent the RLS request
1061 * @sp: current sequence in the PLOGI exchange 1105 * @sp: The sequence that the RLS was on
1062 * @fp: response frame 1106 * @rx_fp: The PRLI request frame
1107 *
1108 * Locking Note: The rport lock is expected to be held before calling
1109 * this function.
1110 */
1111static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
1112 struct fc_seq *sp, struct fc_frame *rx_fp)
1113
1114{
1115 struct fc_lport *lport = rdata->local_port;
1116 struct fc_frame *fp;
1117 struct fc_exch *ep = fc_seq_exch(sp);
1118 struct fc_els_rls *rls;
1119 struct fc_els_rls_resp *rsp;
1120 struct fc_els_lesb *lesb;
1121 struct fc_seq_els_data rjt_data;
1122 struct fc_host_statistics *hst;
1123 u32 f_ctl;
1124
1125 FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
1126 fc_rport_state(rdata));
1127
1128 rls = fc_frame_payload_get(rx_fp, sizeof(*rls));
1129 if (!rls) {
1130 rjt_data.reason = ELS_RJT_PROT;
1131 rjt_data.explan = ELS_EXPL_INV_LEN;
1132 goto out_rjt;
1133 }
1134
1135 fp = fc_frame_alloc(lport, sizeof(*rsp));
1136 if (!fp) {
1137 rjt_data.reason = ELS_RJT_UNAB;
1138 rjt_data.explan = ELS_EXPL_INSUF_RES;
1139 goto out_rjt;
1140 }
1141
1142 rsp = fc_frame_payload_get(fp, sizeof(*rsp));
1143 memset(rsp, 0, sizeof(*rsp));
1144 rsp->rls_cmd = ELS_LS_ACC;
1145 lesb = &rsp->rls_lesb;
1146 if (lport->tt.get_lesb) {
1147 /* get LESB from LLD if it supports it */
1148 lport->tt.get_lesb(lport, lesb);
1149 } else {
1150 fc_get_host_stats(lport->host);
1151 hst = &lport->host_stats;
1152 lesb->lesb_link_fail = htonl(hst->link_failure_count);
1153 lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count);
1154 lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count);
1155 lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count);
1156 lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count);
1157 lesb->lesb_inv_crc = htonl(hst->invalid_crc_count);
1158 }
1159
1160 sp = lport->tt.seq_start_next(sp);
1161 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1162 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1163 FC_TYPE_ELS, f_ctl, 0);
1164 lport->tt.seq_send(lport, sp, fp);
1165 goto out;
1166
1167out_rjt:
1168 rjt_data.fp = NULL;
1169 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1170out:
1171 fc_frame_free(rx_fp);
1172}
1173
1174/**
1175 * fc_rport_recv_els_req() - Handler for validated ELS requests
1176 * @lport: The local port that received the ELS request
1177 * @sp: The sequence that the ELS request was on
1178 * @fp: The ELS request frame
1063 * 1179 *
1064 * Handle incoming ELS requests that require port login. 1180 * Handle incoming ELS requests that require port login.
1065 * The ELS opcode has already been validated by the caller. 1181 * The ELS opcode has already been validated by the caller.
@@ -1117,6 +1233,9 @@ static void fc_rport_recv_els_req(struct fc_lport *lport,
1117 els_data.fp = fp; 1233 els_data.fp = fp;
1118 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); 1234 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
1119 break; 1235 break;
1236 case ELS_RLS:
1237 fc_rport_recv_rls_req(rdata, sp, fp);
1238 break;
1120 default: 1239 default:
1121 fc_frame_free(fp); /* can't happen */ 1240 fc_frame_free(fp); /* can't happen */
1122 break; 1241 break;
@@ -1131,10 +1250,10 @@ reject:
1131} 1250}
1132 1251
1133/** 1252/**
1134 * fc_rport_recv_req() - Handle a received ELS request from a rport 1253 * fc_rport_recv_req() - Handler for requests
1135 * @sp: current sequence in the PLOGI exchange 1254 * @sp: The sequence the request was on
1136 * @fp: response frame 1255 * @fp: The request frame
1137 * @lport: Fibre Channel local port 1256 * @lport: The local port that received the request
1138 * 1257 *
1139 * Locking Note: Called with the lport lock held. 1258 * Locking Note: Called with the lport lock held.
1140 */ 1259 */
@@ -1161,6 +1280,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1161 case ELS_ADISC: 1280 case ELS_ADISC:
1162 case ELS_RRQ: 1281 case ELS_RRQ:
1163 case ELS_REC: 1282 case ELS_REC:
1283 case ELS_RLS:
1164 fc_rport_recv_els_req(lport, sp, fp); 1284 fc_rport_recv_els_req(lport, sp, fp);
1165 break; 1285 break;
1166 default: 1286 default:
@@ -1174,10 +1294,10 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1174} 1294}
1175 1295
1176/** 1296/**
1177 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request 1297 * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
1178 * @lport: local port 1298 * @lport: The local port that received the PLOGI request
1179 * @sp: current sequence in the PLOGI exchange 1299 * @sp: The sequence that the PLOGI request was on
1180 * @fp: PLOGI request frame 1300 * @rx_fp: The PLOGI request frame
1181 * 1301 *
1182 * Locking Note: The rport lock is held before calling this function. 1302 * Locking Note: The rport lock is held before calling this function.
1183 */ 1303 */
@@ -1248,6 +1368,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1248 } 1368 }
1249 break; 1369 break;
1250 case RPORT_ST_PRLI: 1370 case RPORT_ST_PRLI:
1371 case RPORT_ST_RTV:
1251 case RPORT_ST_READY: 1372 case RPORT_ST_READY:
1252 case RPORT_ST_ADISC: 1373 case RPORT_ST_ADISC:
1253 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " 1374 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
@@ -1255,11 +1376,14 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1255 /* XXX TBD - should reset */ 1376 /* XXX TBD - should reset */
1256 break; 1377 break;
1257 case RPORT_ST_DELETE: 1378 case RPORT_ST_DELETE:
1258 default: 1379 case RPORT_ST_LOGO:
1259 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n", 1380 case RPORT_ST_RESTART:
1260 rdata->rp_state); 1381 FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
1261 fc_frame_free(rx_fp); 1382 fc_rport_state(rdata));
1262 goto out; 1383 mutex_unlock(&rdata->rp_mutex);
1384 rjt_data.reason = ELS_RJT_BUSY;
1385 rjt_data.explan = ELS_EXPL_NONE;
1386 goto reject;
1263 } 1387 }
1264 1388
1265 /* 1389 /*
@@ -1295,10 +1419,10 @@ reject:
1295} 1419}
1296 1420
1297/** 1421/**
1298 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request 1422 * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
1299 * @rdata: private remote port data 1423 * @rdata: The remote port that sent the PRLI request
1300 * @sp: current sequence in the PRLI exchange 1424 * @sp: The sequence that the PRLI was on
1301 * @fp: PRLI request frame 1425 * @rx_fp: The PRLI request frame
1302 * 1426 *
1303 * Locking Note: The rport lock is exected to be held before calling 1427 * Locking Note: The rport lock is exected to be held before calling
1304 * this function. 1428 * this function.
@@ -1402,7 +1526,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1402 break; 1526 break;
1403 case FC_TYPE_FCP: 1527 case FC_TYPE_FCP:
1404 fcp_parm = ntohl(rspp->spp_params); 1528 fcp_parm = ntohl(rspp->spp_params);
1405 if (fcp_parm * FCP_SPPF_RETRY) 1529 if (fcp_parm & FCP_SPPF_RETRY)
1406 rdata->flags |= FC_RP_FLAGS_RETRY; 1530 rdata->flags |= FC_RP_FLAGS_RETRY;
1407 rdata->supported_classes = FC_COS_CLASS3; 1531 rdata->supported_classes = FC_COS_CLASS3;
1408 if (fcp_parm & FCP_SPPF_INIT_FCN) 1532 if (fcp_parm & FCP_SPPF_INIT_FCN)
@@ -1452,10 +1576,10 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1452} 1576}
1453 1577
1454/** 1578/**
1455 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request 1579 * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
1456 * @rdata: private remote port data 1580 * @rdata: The remote port that sent the PRLO request
1457 * @sp: current sequence in the PRLO exchange 1581 * @sp: The sequence that the PRLO was on
1458 * @fp: PRLO request frame 1582 * @fp: The PRLO request frame
1459 * 1583 *
1460 * Locking Note: The rport lock is exected to be held before calling 1584 * Locking Note: The rport lock is exected to be held before calling
1461 * this function. 1585 * this function.
@@ -1482,10 +1606,10 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1482} 1606}
1483 1607
1484/** 1608/**
1485 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request 1609 * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests
1486 * @lport: local port. 1610 * @lport: The local port that received the LOGO request
1487 * @sp: current sequence in the LOGO exchange 1611 * @sp: The sequence that the LOGO request was on
1488 * @fp: LOGO request frame 1612 * @fp: The LOGO request frame
1489 * 1613 *
1490 * Locking Note: The rport lock is exected to be held before calling 1614 * Locking Note: The rport lock is exected to be held before calling
1491 * this function. 1615 * this function.
@@ -1510,14 +1634,14 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport,
1510 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", 1634 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1511 fc_rport_state(rdata)); 1635 fc_rport_state(rdata));
1512 1636
1637 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1638
1513 /* 1639 /*
1514 * If the remote port was created due to discovery, 1640 * If the remote port was created due to discovery, set state
1515 * log back in. It may have seen a stale RSCN about us. 1641 * to log back in. It may have seen a stale RSCN about us.
1516 */ 1642 */
1517 if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id) 1643 if (rdata->disc_id)
1518 fc_rport_enter_plogi(rdata); 1644 fc_rport_state_enter(rdata, RPORT_ST_RESTART);
1519 else
1520 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1521 mutex_unlock(&rdata->rp_mutex); 1645 mutex_unlock(&rdata->rp_mutex);
1522 } else 1646 } else
1523 FC_RPORT_ID_DBG(lport, sid, 1647 FC_RPORT_ID_DBG(lport, sid,
@@ -1526,11 +1650,18 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport,
1526 fc_frame_free(fp); 1650 fc_frame_free(fp);
1527} 1651}
1528 1652
1653/**
1654 * fc_rport_flush_queue() - Flush the rport_event_queue
1655 */
1529static void fc_rport_flush_queue(void) 1656static void fc_rport_flush_queue(void)
1530{ 1657{
1531 flush_workqueue(rport_event_queue); 1658 flush_workqueue(rport_event_queue);
1532} 1659}
1533 1660
1661/**
1662 * fc_rport_init() - Initialize the remote port layer for a local port
1663 * @lport: The local port to initialize the remote port layer for
1664 */
1534int fc_rport_init(struct fc_lport *lport) 1665int fc_rport_init(struct fc_lport *lport)
1535{ 1666{
1536 if (!lport->tt.rport_lookup) 1667 if (!lport->tt.rport_lookup)
@@ -1558,25 +1689,33 @@ int fc_rport_init(struct fc_lport *lport)
1558} 1689}
1559EXPORT_SYMBOL(fc_rport_init); 1690EXPORT_SYMBOL(fc_rport_init);
1560 1691
1561int fc_setup_rport(void) 1692/**
1693 * fc_setup_rport() - Initialize the rport_event_queue
1694 */
1695int fc_setup_rport()
1562{ 1696{
1563 rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); 1697 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1564 if (!rport_event_queue) 1698 if (!rport_event_queue)
1565 return -ENOMEM; 1699 return -ENOMEM;
1566 return 0; 1700 return 0;
1567} 1701}
1568EXPORT_SYMBOL(fc_setup_rport);
1569 1702
1570void fc_destroy_rport(void) 1703/**
1704 * fc_destroy_rport() - Destroy the rport_event_queue
1705 */
1706void fc_destroy_rport()
1571{ 1707{
1572 destroy_workqueue(rport_event_queue); 1708 destroy_workqueue(rport_event_queue);
1573} 1709}
1574EXPORT_SYMBOL(fc_destroy_rport);
1575 1710
1711/**
1712 * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port
1713 * @rport: The remote port whose I/O should be terminated
1714 */
1576void fc_rport_terminate_io(struct fc_rport *rport) 1715void fc_rport_terminate_io(struct fc_rport *rport)
1577{ 1716{
1578 struct fc_rport_libfc_priv *rp = rport->dd_data; 1717 struct fc_rport_libfc_priv *rpriv = rport->dd_data;
1579 struct fc_lport *lport = rp->local_port; 1718 struct fc_lport *lport = rpriv->local_port;
1580 1719
1581 lport->tt.exch_mgr_reset(lport, 0, rport->port_id); 1720 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1582 lport->tt.exch_mgr_reset(lport, rport->port_id, 0); 1721 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f1a4246f890c..633e09036357 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -25,6 +25,7 @@
25#include <linux/kfifo.h> 25#include <linux/kfifo.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/log2.h> 27#include <linux/log2.h>
28#include <linux/slab.h>
28#include <asm/unaligned.h> 29#include <asm/unaligned.h>
29#include <net/tcp.h> 30#include <net/tcp.h>
30#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
@@ -266,6 +267,88 @@ static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
266} 267}
267 268
268/** 269/**
270 * iscsi_check_tmf_restrictions - check if a task is affected by TMF
271 * @task: iscsi task
272 * @opcode: opcode to check for
273 *
274 * During TMF a task has to be checked if it's affected.
275 * All unrelated I/O can be passed through, but I/O to the
276 * affected LUN should be restricted.
277 * If 'fast_abort' is set we won't be sending any I/O to the
278 * affected LUN.
279 * Otherwise the target is waiting for all TTTs to be completed,
280 * so we have to send all outstanding Data-Out PDUs to the target.
281 */
282static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
283{
284 struct iscsi_conn *conn = task->conn;
285 struct iscsi_tm *tmf = &conn->tmhdr;
286 unsigned int hdr_lun;
287
288 if (conn->tmf_state == TMF_INITIAL)
289 return 0;
290
291 if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
292 return 0;
293
294 switch (ISCSI_TM_FUNC_VALUE(tmf)) {
295 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
296 /*
297 * Allow PDUs for unrelated LUNs
298 */
299 hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun);
300 if (hdr_lun != task->sc->device->lun)
301 return 0;
302 /* fall through */
303 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
304 /*
305 * Fail all SCSI cmd PDUs
306 */
307 if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
308 iscsi_conn_printk(KERN_INFO, conn,
309 "task [op %x/%x itt "
310 "0x%x/0x%x] "
311 "rejected.\n",
312 task->hdr->opcode, opcode,
313 task->itt, task->hdr_itt);
314 return -EACCES;
315 }
316 /*
317 * And also all data-out PDUs in response to R2T
318 * if fast_abort is set.
319 */
320 if (conn->session->fast_abort) {
321 iscsi_conn_printk(KERN_INFO, conn,
322 "task [op %x/%x itt "
323 "0x%x/0x%x] fast abort.\n",
324 task->hdr->opcode, opcode,
325 task->itt, task->hdr_itt);
326 return -EACCES;
327 }
328 break;
329 case ISCSI_TM_FUNC_ABORT_TASK:
330 /*
331 * the caller has already checked if the task
332 * they want to abort was in the pending queue so if
333 * we are here the cmd pdu has gone out already, and
334 * we will only hit this for data-outs
335 */
336 if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
337 task->hdr_itt == tmf->rtt) {
338 ISCSI_DBG_SESSION(conn->session,
339 "Preventing task %x/%x from sending "
340 "data-out due to abort task in "
341 "progress\n", task->itt,
342 task->hdr_itt);
343 return -EACCES;
344 }
345 break;
346 }
347
348 return 0;
349}
350
351/**
269 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu 352 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
270 * @task: iscsi task 353 * @task: iscsi task
271 * 354 *
@@ -282,6 +365,10 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
282 itt_t itt; 365 itt_t itt;
283 int rc; 366 int rc;
284 367
368 rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
369 if (rc)
370 return rc;
371
285 if (conn->session->tt->alloc_pdu) { 372 if (conn->session->tt->alloc_pdu) {
286 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); 373 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
287 if (rc) 374 if (rc)
@@ -384,12 +471,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
384 471
385 WARN_ON(hdrlength >= 256); 472 WARN_ON(hdrlength >= 256);
386 hdr->hlength = hdrlength & 0xFF; 473 hdr->hlength = hdrlength & 0xFF;
474 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
387 475
388 if (session->tt->init_task && session->tt->init_task(task)) 476 if (session->tt->init_task && session->tt->init_task(task))
389 return -EIO; 477 return -EIO;
390 478
391 task->state = ISCSI_TASK_RUNNING; 479 task->state = ISCSI_TASK_RUNNING;
392 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
393 session->cmdsn++; 480 session->cmdsn++;
394 481
395 conn->scsicmd_pdus_cnt++; 482 conn->scsicmd_pdus_cnt++;
@@ -431,7 +518,7 @@ static void iscsi_free_task(struct iscsi_task *task)
431 if (conn->login_task == task) 518 if (conn->login_task == task)
432 return; 519 return;
433 520
434 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 521 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
435 522
436 if (sc) { 523 if (sc) {
437 task->sc = NULL; 524 task->sc = NULL;
@@ -577,12 +664,12 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
577 struct iscsi_session *session = conn->session; 664 struct iscsi_session *session = conn->session;
578 struct iscsi_hdr *hdr = task->hdr; 665 struct iscsi_hdr *hdr = task->hdr;
579 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; 666 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
667 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
580 668
581 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 669 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
582 return -ENOTCONN; 670 return -ENOTCONN;
583 671
584 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) && 672 if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT)
585 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
586 nop->exp_statsn = cpu_to_be32(conn->exp_statsn); 673 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
587 /* 674 /*
588 * pre-format CmdSN for outgoing PDU. 675 * pre-format CmdSN for outgoing PDU.
@@ -590,9 +677,12 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
590 nop->cmdsn = cpu_to_be32(session->cmdsn); 677 nop->cmdsn = cpu_to_be32(session->cmdsn);
591 if (hdr->itt != RESERVED_ITT) { 678 if (hdr->itt != RESERVED_ITT) {
592 /* 679 /*
593 * TODO: We always use immediate, so we never hit this. 680 * TODO: We always use immediate for normal session pdus.
594 * If we start to send tmfs or nops as non-immediate then 681 * If we start to send tmfs or nops as non-immediate then
595 * we should start checking the cmdsn numbers for mgmt tasks. 682 * we should start checking the cmdsn numbers for mgmt tasks.
683 *
684 * During discovery sessions iscsid sends TEXT as non immediate,
685 * but we always only send one PDU at a time.
596 */ 686 */
597 if (conn->c_stage == ISCSI_CONN_STARTED && 687 if (conn->c_stage == ISCSI_CONN_STARTED &&
598 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 688 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -620,29 +710,35 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
620{ 710{
621 struct iscsi_session *session = conn->session; 711 struct iscsi_session *session = conn->session;
622 struct iscsi_host *ihost = shost_priv(session->host); 712 struct iscsi_host *ihost = shost_priv(session->host);
713 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
623 struct iscsi_task *task; 714 struct iscsi_task *task;
624 itt_t itt; 715 itt_t itt;
625 716
626 if (session->state == ISCSI_STATE_TERMINATE) 717 if (session->state == ISCSI_STATE_TERMINATE)
627 return NULL; 718 return NULL;
628 719
629 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) || 720 if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
630 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
631 /* 721 /*
632 * Login and Text are sent serially, in 722 * Login and Text are sent serially, in
633 * request-followed-by-response sequence. 723 * request-followed-by-response sequence.
634 * Same task can be used. Same ITT must be used. 724 * Same task can be used. Same ITT must be used.
635 * Note that login_task is preallocated at conn_create(). 725 * Note that login_task is preallocated at conn_create().
636 */ 726 */
727 if (conn->login_task->state != ISCSI_TASK_FREE) {
728 iscsi_conn_printk(KERN_ERR, conn, "Login/Text in "
729 "progress. Cannot start new task.\n");
730 return NULL;
731 }
732
637 task = conn->login_task; 733 task = conn->login_task;
638 else { 734 } else {
639 if (session->state != ISCSI_STATE_LOGGED_IN) 735 if (session->state != ISCSI_STATE_LOGGED_IN)
640 return NULL; 736 return NULL;
641 737
642 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 738 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
643 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 739 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
644 740
645 if (!__kfifo_get(session->cmdpool.queue, 741 if (!kfifo_out(&session->cmdpool.queue,
646 (void*)&task, sizeof(void*))) 742 (void*)&task, sizeof(void*)))
647 return NULL; 743 return NULL;
648 } 744 }
@@ -1357,6 +1453,7 @@ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
1357 **/ 1453 **/
1358static int iscsi_data_xmit(struct iscsi_conn *conn) 1454static int iscsi_data_xmit(struct iscsi_conn *conn)
1359{ 1455{
1456 struct iscsi_task *task;
1360 int rc = 0; 1457 int rc = 0;
1361 1458
1362 spin_lock_bh(&conn->session->lock); 1459 spin_lock_bh(&conn->session->lock);
@@ -1394,11 +1491,8 @@ check_mgmt:
1394 1491
1395 /* process pending command queue */ 1492 /* process pending command queue */
1396 while (!list_empty(&conn->cmdqueue)) { 1493 while (!list_empty(&conn->cmdqueue)) {
1397 if (conn->tmf_state == TMF_QUEUED) 1494 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
1398 break; 1495 running);
1399
1400 conn->task = list_entry(conn->cmdqueue.next,
1401 struct iscsi_task, running);
1402 list_del_init(&conn->task->running); 1496 list_del_init(&conn->task->running);
1403 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1497 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1404 fail_scsi_task(conn->task, DID_IMM_RETRY); 1498 fail_scsi_task(conn->task, DID_IMM_RETRY);
@@ -1406,7 +1500,7 @@ check_mgmt:
1406 } 1500 }
1407 rc = iscsi_prep_scsi_cmd_pdu(conn->task); 1501 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1408 if (rc) { 1502 if (rc) {
1409 if (rc == -ENOMEM) { 1503 if (rc == -ENOMEM || rc == -EACCES) {
1410 list_add_tail(&conn->task->running, 1504 list_add_tail(&conn->task->running,
1411 &conn->cmdqueue); 1505 &conn->cmdqueue);
1412 conn->task = NULL; 1506 conn->task = NULL;
@@ -1428,17 +1522,18 @@ check_mgmt:
1428 } 1522 }
1429 1523
1430 while (!list_empty(&conn->requeue)) { 1524 while (!list_empty(&conn->requeue)) {
1431 if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
1432 break;
1433
1434 /* 1525 /*
1435 * we always do fastlogout - conn stop code will clean up. 1526 * we always do fastlogout - conn stop code will clean up.
1436 */ 1527 */
1437 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 1528 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1438 break; 1529 break;
1439 1530
1440 conn->task = list_entry(conn->requeue.next, 1531 task = list_entry(conn->requeue.next, struct iscsi_task,
1441 struct iscsi_task, running); 1532 running);
1533 if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
1534 break;
1535
1536 conn->task = task;
1442 list_del_init(&conn->task->running); 1537 list_del_init(&conn->task->running);
1443 conn->task->state = ISCSI_TASK_RUNNING; 1538 conn->task->state = ISCSI_TASK_RUNNING;
1444 rc = iscsi_xmit_task(conn); 1539 rc = iscsi_xmit_task(conn);
@@ -1473,7 +1568,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1473{ 1568{
1474 struct iscsi_task *task; 1569 struct iscsi_task *task;
1475 1570
1476 if (!__kfifo_get(conn->session->cmdpool.queue, 1571 if (!kfifo_out(&conn->session->cmdpool.queue,
1477 (void *) &task, sizeof(void *))) 1572 (void *) &task, sizeof(void *)))
1478 return NULL; 1573 return NULL;
1479 1574
@@ -1591,7 +1686,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1591 if (!ihost->workq) { 1686 if (!ihost->workq) {
1592 reason = iscsi_prep_scsi_cmd_pdu(task); 1687 reason = iscsi_prep_scsi_cmd_pdu(task);
1593 if (reason) { 1688 if (reason) {
1594 if (reason == -ENOMEM) { 1689 if (reason == -ENOMEM || reason == -EACCES) {
1595 reason = FAILURE_OOM; 1690 reason = FAILURE_OOM;
1596 goto prepd_reject; 1691 goto prepd_reject;
1597 } else { 1692 } else {
@@ -1643,9 +1738,21 @@ fault:
1643} 1738}
1644EXPORT_SYMBOL_GPL(iscsi_queuecommand); 1739EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1645 1740
1646int iscsi_change_queue_depth(struct scsi_device *sdev, int depth) 1741int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
1647{ 1742{
1648 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 1743 switch (reason) {
1744 case SCSI_QDEPTH_DEFAULT:
1745 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1746 break;
1747 case SCSI_QDEPTH_QFULL:
1748 scsi_track_queue_full(sdev, depth);
1749 break;
1750 case SCSI_QDEPTH_RAMP_UP:
1751 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1752 break;
1753 default:
1754 return -EOPNOTSUPP;
1755 }
1649 return sdev->queue_depth; 1756 return sdev->queue_depth;
1650} 1757}
1651EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); 1758EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
@@ -1660,72 +1767,6 @@ int iscsi_target_alloc(struct scsi_target *starget)
1660} 1767}
1661EXPORT_SYMBOL_GPL(iscsi_target_alloc); 1768EXPORT_SYMBOL_GPL(iscsi_target_alloc);
1662 1769
1663void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
1664{
1665 struct iscsi_session *session = cls_session->dd_data;
1666
1667 spin_lock_bh(&session->lock);
1668 if (session->state != ISCSI_STATE_LOGGED_IN) {
1669 session->state = ISCSI_STATE_RECOVERY_FAILED;
1670 if (session->leadconn)
1671 wake_up(&session->leadconn->ehwait);
1672 }
1673 spin_unlock_bh(&session->lock);
1674}
1675EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
1676
1677int iscsi_eh_target_reset(struct scsi_cmnd *sc)
1678{
1679 struct iscsi_cls_session *cls_session;
1680 struct iscsi_session *session;
1681 struct iscsi_conn *conn;
1682
1683 cls_session = starget_to_session(scsi_target(sc->device));
1684 session = cls_session->dd_data;
1685 conn = session->leadconn;
1686
1687 mutex_lock(&session->eh_mutex);
1688 spin_lock_bh(&session->lock);
1689 if (session->state == ISCSI_STATE_TERMINATE) {
1690failed:
1691 ISCSI_DBG_EH(session,
1692 "failing target reset: Could not log back into "
1693 "target [age %d]\n",
1694 session->age);
1695 spin_unlock_bh(&session->lock);
1696 mutex_unlock(&session->eh_mutex);
1697 return FAILED;
1698 }
1699
1700 spin_unlock_bh(&session->lock);
1701 mutex_unlock(&session->eh_mutex);
1702 /*
1703 * we drop the lock here but the leadconn cannot be destoyed while
1704 * we are in the scsi eh
1705 */
1706 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1707
1708 ISCSI_DBG_EH(session, "wait for relogin\n");
1709 wait_event_interruptible(conn->ehwait,
1710 session->state == ISCSI_STATE_TERMINATE ||
1711 session->state == ISCSI_STATE_LOGGED_IN ||
1712 session->state == ISCSI_STATE_RECOVERY_FAILED);
1713 if (signal_pending(current))
1714 flush_signals(current);
1715
1716 mutex_lock(&session->eh_mutex);
1717 spin_lock_bh(&session->lock);
1718 if (session->state == ISCSI_STATE_LOGGED_IN) {
1719 ISCSI_DBG_EH(session,
1720 "target reset succeeded\n");
1721 } else
1722 goto failed;
1723 spin_unlock_bh(&session->lock);
1724 mutex_unlock(&session->eh_mutex);
1725 return SUCCESS;
1726}
1727EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
1728
1729static void iscsi_tmf_timedout(unsigned long data) 1770static void iscsi_tmf_timedout(unsigned long data)
1730{ 1771{
1731 struct iscsi_conn *conn = (struct iscsi_conn *)data; 1772 struct iscsi_conn *conn = (struct iscsi_conn *)data;
@@ -1879,10 +1920,11 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1879static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) 1920static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1880{ 1921{
1881 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; 1922 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1882 struct iscsi_task *task = NULL; 1923 struct iscsi_task *task = NULL, *running_task;
1883 struct iscsi_cls_session *cls_session; 1924 struct iscsi_cls_session *cls_session;
1884 struct iscsi_session *session; 1925 struct iscsi_session *session;
1885 struct iscsi_conn *conn; 1926 struct iscsi_conn *conn;
1927 int i;
1886 1928
1887 cls_session = starget_to_session(scsi_target(sc->device)); 1929 cls_session = starget_to_session(scsi_target(sc->device));
1888 session = cls_session->dd_data; 1930 session = cls_session->dd_data;
@@ -1907,8 +1949,15 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1907 } 1949 }
1908 1950
1909 task = (struct iscsi_task *)sc->SCp.ptr; 1951 task = (struct iscsi_task *)sc->SCp.ptr;
1910 if (!task) 1952 if (!task) {
1953 /*
1954 * Raced with completion. Just reset timer, and let it
1955 * complete normally
1956 */
1957 rc = BLK_EH_RESET_TIMER;
1911 goto done; 1958 goto done;
1959 }
1960
1912 /* 1961 /*
1913 * If we have sent (at least queued to the network layer) a pdu or 1962 * If we have sent (at least queued to the network layer) a pdu or
1914 * recvd one for the task since the last timeout ask for 1963 * recvd one for the task since the last timeout ask for
@@ -1916,10 +1965,10 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1916 * we can check if it is the task or connection when we send the 1965 * we can check if it is the task or connection when we send the
1917 * nop as a ping. 1966 * nop as a ping.
1918 */ 1967 */
1919 if (time_after_eq(task->last_xfer, task->last_timeout)) { 1968 if (time_after(task->last_xfer, task->last_timeout)) {
1920 ISCSI_DBG_EH(session, "Command making progress. Asking " 1969 ISCSI_DBG_EH(session, "Command making progress. Asking "
1921 "scsi-ml for more time to complete. " 1970 "scsi-ml for more time to complete. "
1922 "Last data recv at %lu. Last timeout was at " 1971 "Last data xfer at %lu. Last timeout was at "
1923 "%lu\n.", task->last_xfer, task->last_timeout); 1972 "%lu\n.", task->last_xfer, task->last_timeout);
1924 task->have_checked_conn = false; 1973 task->have_checked_conn = false;
1925 rc = BLK_EH_RESET_TIMER; 1974 rc = BLK_EH_RESET_TIMER;
@@ -1937,6 +1986,43 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1937 goto done; 1986 goto done;
1938 } 1987 }
1939 1988
1989 for (i = 0; i < conn->session->cmds_max; i++) {
1990 running_task = conn->session->cmds[i];
1991 if (!running_task->sc || running_task == task ||
1992 running_task->state != ISCSI_TASK_RUNNING)
1993 continue;
1994
1995 /*
1996 * Only check if cmds started before this one have made
1997 * progress, or this could never fail
1998 */
1999 if (time_after(running_task->sc->jiffies_at_alloc,
2000 task->sc->jiffies_at_alloc))
2001 continue;
2002
2003 if (time_after(running_task->last_xfer, task->last_timeout)) {
2004 /*
2005 * This task has not made progress, but a task
2006 * started before us has transferred data since
2007 * we started/last-checked. We could be queueing
2008 * too many tasks or the LU is bad.
2009 *
2010 * If the device is bad the cmds ahead of us on
2011 * other devs will complete, and this loop will
2012 * eventually fail starting the scsi eh.
2013 */
2014 ISCSI_DBG_EH(session, "Command has not made progress "
2015 "but commands ahead of it have. "
2016 "Asking scsi-ml for more time to "
2017 "complete. Our last xfer vs running task "
2018 "last xfer %lu/%lu. Last check %lu.\n",
2019 task->last_xfer, running_task->last_xfer,
2020 task->last_timeout);
2021 rc = BLK_EH_RESET_TIMER;
2022 goto done;
2023 }
2024 }
2025
1940 /* Assumes nop timeout is shorter than scsi cmd timeout */ 2026 /* Assumes nop timeout is shorter than scsi cmd timeout */
1941 if (task->have_checked_conn) 2027 if (task->have_checked_conn)
1942 goto done; 2028 goto done;
@@ -2108,6 +2194,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2108 spin_lock_bh(&session->lock); 2194 spin_lock_bh(&session->lock);
2109 fail_scsi_task(task, DID_ABORT); 2195 fail_scsi_task(task, DID_ABORT);
2110 conn->tmf_state = TMF_INITIAL; 2196 conn->tmf_state = TMF_INITIAL;
2197 memset(hdr, 0, sizeof(*hdr));
2111 spin_unlock_bh(&session->lock); 2198 spin_unlock_bh(&session->lock);
2112 iscsi_start_tx(conn); 2199 iscsi_start_tx(conn);
2113 goto success_unlocked; 2200 goto success_unlocked;
@@ -2118,6 +2205,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2118 case TMF_NOT_FOUND: 2205 case TMF_NOT_FOUND:
2119 if (!sc->SCp.ptr) { 2206 if (!sc->SCp.ptr) {
2120 conn->tmf_state = TMF_INITIAL; 2207 conn->tmf_state = TMF_INITIAL;
2208 memset(hdr, 0, sizeof(*hdr));
2121 /* task completed before tmf abort response */ 2209 /* task completed before tmf abort response */
2122 ISCSI_DBG_EH(session, "sc completed while abort in " 2210 ISCSI_DBG_EH(session, "sc completed while abort in "
2123 "progress\n"); 2211 "progress\n");
@@ -2212,6 +2300,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2212 iscsi_suspend_tx(conn); 2300 iscsi_suspend_tx(conn);
2213 2301
2214 spin_lock_bh(&session->lock); 2302 spin_lock_bh(&session->lock);
2303 memset(hdr, 0, sizeof(*hdr));
2215 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); 2304 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
2216 conn->tmf_state = TMF_INITIAL; 2305 conn->tmf_state = TMF_INITIAL;
2217 spin_unlock_bh(&session->lock); 2306 spin_unlock_bh(&session->lock);
@@ -2229,6 +2318,187 @@ done:
2229} 2318}
2230EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); 2319EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
2231 2320
2321void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
2322{
2323 struct iscsi_session *session = cls_session->dd_data;
2324
2325 spin_lock_bh(&session->lock);
2326 if (session->state != ISCSI_STATE_LOGGED_IN) {
2327 session->state = ISCSI_STATE_RECOVERY_FAILED;
2328 if (session->leadconn)
2329 wake_up(&session->leadconn->ehwait);
2330 }
2331 spin_unlock_bh(&session->lock);
2332}
2333EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
2334
2335/**
2336 * iscsi_eh_session_reset - drop session and attempt relogin
2337 * @sc: scsi command
2338 *
2339 * This function will wait for a relogin, session termination from
2340 * userspace, or a recovery/replacement timeout.
2341 */
2342int iscsi_eh_session_reset(struct scsi_cmnd *sc)
2343{
2344 struct iscsi_cls_session *cls_session;
2345 struct iscsi_session *session;
2346 struct iscsi_conn *conn;
2347
2348 cls_session = starget_to_session(scsi_target(sc->device));
2349 session = cls_session->dd_data;
2350 conn = session->leadconn;
2351
2352 mutex_lock(&session->eh_mutex);
2353 spin_lock_bh(&session->lock);
2354 if (session->state == ISCSI_STATE_TERMINATE) {
2355failed:
2356 ISCSI_DBG_EH(session,
2357 "failing session reset: Could not log back into "
2358 "%s, %s [age %d]\n", session->targetname,
2359 conn->persistent_address, session->age);
2360 spin_unlock_bh(&session->lock);
2361 mutex_unlock(&session->eh_mutex);
2362 return FAILED;
2363 }
2364
2365 spin_unlock_bh(&session->lock);
2366 mutex_unlock(&session->eh_mutex);
2367 /*
2368 * we drop the lock here but the leadconn cannot be destoyed while
2369 * we are in the scsi eh
2370 */
2371 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2372
2373 ISCSI_DBG_EH(session, "wait for relogin\n");
2374 wait_event_interruptible(conn->ehwait,
2375 session->state == ISCSI_STATE_TERMINATE ||
2376 session->state == ISCSI_STATE_LOGGED_IN ||
2377 session->state == ISCSI_STATE_RECOVERY_FAILED);
2378 if (signal_pending(current))
2379 flush_signals(current);
2380
2381 mutex_lock(&session->eh_mutex);
2382 spin_lock_bh(&session->lock);
2383 if (session->state == ISCSI_STATE_LOGGED_IN) {
2384 ISCSI_DBG_EH(session,
2385 "session reset succeeded for %s,%s\n",
2386 session->targetname, conn->persistent_address);
2387 } else
2388 goto failed;
2389 spin_unlock_bh(&session->lock);
2390 mutex_unlock(&session->eh_mutex);
2391 return SUCCESS;
2392}
2393EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
2394
2395static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2396{
2397 memset(hdr, 0, sizeof(*hdr));
2398 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2399 hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2400 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2401 hdr->rtt = RESERVED_ITT;
2402}
2403
2404/**
2405 * iscsi_eh_target_reset - reset target
2406 * @sc: scsi command
2407 *
2408 * This will attempt to send a warm target reset.
2409 */
2410int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2411{
2412 struct iscsi_cls_session *cls_session;
2413 struct iscsi_session *session;
2414 struct iscsi_conn *conn;
2415 struct iscsi_tm *hdr;
2416 int rc = FAILED;
2417
2418 cls_session = starget_to_session(scsi_target(sc->device));
2419 session = cls_session->dd_data;
2420
2421 ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc,
2422 session->targetname);
2423
2424 mutex_lock(&session->eh_mutex);
2425 spin_lock_bh(&session->lock);
2426 /*
2427 * Just check if we are not logged in. We cannot check for
2428 * the phase because the reset could come from a ioctl.
2429 */
2430 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2431 goto unlock;
2432 conn = session->leadconn;
2433
2434 /* only have one tmf outstanding at a time */
2435 if (conn->tmf_state != TMF_INITIAL)
2436 goto unlock;
2437 conn->tmf_state = TMF_QUEUED;
2438
2439 hdr = &conn->tmhdr;
2440 iscsi_prep_tgt_reset_pdu(sc, hdr);
2441
2442 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2443 session->tgt_reset_timeout)) {
2444 rc = FAILED;
2445 goto unlock;
2446 }
2447
2448 switch (conn->tmf_state) {
2449 case TMF_SUCCESS:
2450 break;
2451 case TMF_TIMEDOUT:
2452 spin_unlock_bh(&session->lock);
2453 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2454 goto done;
2455 default:
2456 conn->tmf_state = TMF_INITIAL;
2457 goto unlock;
2458 }
2459
2460 rc = SUCCESS;
2461 spin_unlock_bh(&session->lock);
2462
2463 iscsi_suspend_tx(conn);
2464
2465 spin_lock_bh(&session->lock);
2466 memset(hdr, 0, sizeof(*hdr));
2467 fail_scsi_tasks(conn, -1, DID_ERROR);
2468 conn->tmf_state = TMF_INITIAL;
2469 spin_unlock_bh(&session->lock);
2470
2471 iscsi_start_tx(conn);
2472 goto done;
2473
2474unlock:
2475 spin_unlock_bh(&session->lock);
2476done:
2477 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
2478 rc == SUCCESS ? "SUCCESS" : "FAILED");
2479 mutex_unlock(&session->eh_mutex);
2480 return rc;
2481}
2482EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
2483
2484/**
2485 * iscsi_eh_recover_target - reset target and possibly the session
2486 * @sc: scsi command
2487 *
2488 * This will attempt to send a warm target reset. If that fails,
2489 * we will escalate to ERL0 session recovery.
2490 */
2491int iscsi_eh_recover_target(struct scsi_cmnd *sc)
2492{
2493 int rc;
2494
2495 rc = iscsi_eh_target_reset(sc);
2496 if (rc == FAILED)
2497 rc = iscsi_eh_session_reset(sc);
2498 return rc;
2499}
2500EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
2501
2232/* 2502/*
2233 * Pre-allocate a pool of @max items of @item_size. By default, the pool 2503 * Pre-allocate a pool of @max items of @item_size. By default, the pool
2234 * should be accessed via kfifo_{get,put} on q->queue. 2504 * should be accessed via kfifo_{get,put} on q->queue.
@@ -2252,12 +2522,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2252 if (q->pool == NULL) 2522 if (q->pool == NULL)
2253 return -ENOMEM; 2523 return -ENOMEM;
2254 2524
2255 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), 2525 kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
2256 GFP_KERNEL, NULL);
2257 if (IS_ERR(q->queue)) {
2258 q->queue = NULL;
2259 goto enomem;
2260 }
2261 2526
2262 for (i = 0; i < max; i++) { 2527 for (i = 0; i < max; i++) {
2263 q->pool[i] = kzalloc(item_size, GFP_KERNEL); 2528 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
@@ -2265,7 +2530,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2265 q->max = i; 2530 q->max = i;
2266 goto enomem; 2531 goto enomem;
2267 } 2532 }
2268 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*)); 2533 kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
2269 } 2534 }
2270 2535
2271 if (items) { 2536 if (items) {
@@ -2288,7 +2553,6 @@ void iscsi_pool_free(struct iscsi_pool *q)
2288 for (i = 0; i < q->max; i++) 2553 for (i = 0; i < q->max; i++)
2289 kfree(q->pool[i]); 2554 kfree(q->pool[i]);
2290 kfree(q->pool); 2555 kfree(q->pool);
2291 kfree(q->queue);
2292} 2556}
2293EXPORT_SYMBOL_GPL(iscsi_pool_free); 2557EXPORT_SYMBOL_GPL(iscsi_pool_free);
2294 2558
@@ -2495,6 +2759,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2495 session->host = shost; 2759 session->host = shost;
2496 session->state = ISCSI_STATE_FREE; 2760 session->state = ISCSI_STATE_FREE;
2497 session->fast_abort = 1; 2761 session->fast_abort = 1;
2762 session->tgt_reset_timeout = 30;
2498 session->lu_reset_timeout = 15; 2763 session->lu_reset_timeout = 15;
2499 session->abort_timeout = 10; 2764 session->abort_timeout = 10;
2500 session->scsi_cmds_max = scsi_cmds; 2765 session->scsi_cmds_max = scsi_cmds;
@@ -2615,7 +2880,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2615 2880
2616 /* allocate login_task used for the login/text sequences */ 2881 /* allocate login_task used for the login/text sequences */
2617 spin_lock_bh(&session->lock); 2882 spin_lock_bh(&session->lock);
2618 if (!__kfifo_get(session->cmdpool.queue, 2883 if (!kfifo_out(&session->cmdpool.queue,
2619 (void*)&conn->login_task, 2884 (void*)&conn->login_task,
2620 sizeof(void*))) { 2885 sizeof(void*))) {
2621 spin_unlock_bh(&session->lock); 2886 spin_unlock_bh(&session->lock);
@@ -2635,7 +2900,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2635 return cls_conn; 2900 return cls_conn;
2636 2901
2637login_task_data_alloc_fail: 2902login_task_data_alloc_fail:
2638 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2903 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
2639 sizeof(void*)); 2904 sizeof(void*));
2640login_task_alloc_fail: 2905login_task_alloc_fail:
2641 iscsi_destroy_conn(cls_conn); 2906 iscsi_destroy_conn(cls_conn);
@@ -2698,7 +2963,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2698 free_pages((unsigned long) conn->data, 2963 free_pages((unsigned long) conn->data,
2699 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); 2964 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2700 kfree(conn->persistent_address); 2965 kfree(conn->persistent_address);
2701 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2966 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
2702 sizeof(void*)); 2967 sizeof(void*));
2703 if (session->leadconn == conn) 2968 if (session->leadconn == conn)
2704 session->leadconn = NULL; 2969 session->leadconn = NULL;
@@ -2823,14 +3088,15 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2823 session->state = ISCSI_STATE_TERMINATE; 3088 session->state = ISCSI_STATE_TERMINATE;
2824 else if (conn->stop_stage != STOP_CONN_RECOVER) 3089 else if (conn->stop_stage != STOP_CONN_RECOVER)
2825 session->state = ISCSI_STATE_IN_RECOVERY; 3090 session->state = ISCSI_STATE_IN_RECOVERY;
3091
3092 old_stop_stage = conn->stop_stage;
3093 conn->stop_stage = flag;
2826 spin_unlock_bh(&session->lock); 3094 spin_unlock_bh(&session->lock);
2827 3095
2828 del_timer_sync(&conn->transport_timer); 3096 del_timer_sync(&conn->transport_timer);
2829 iscsi_suspend_tx(conn); 3097 iscsi_suspend_tx(conn);
2830 3098
2831 spin_lock_bh(&session->lock); 3099 spin_lock_bh(&session->lock);
2832 old_stop_stage = conn->stop_stage;
2833 conn->stop_stage = flag;
2834 conn->c_stage = ISCSI_CONN_STOPPED; 3100 conn->c_stage = ISCSI_CONN_STOPPED;
2835 spin_unlock_bh(&session->lock); 3101 spin_unlock_bh(&session->lock);
2836 3102
@@ -2856,6 +3122,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2856 spin_lock_bh(&session->lock); 3122 spin_lock_bh(&session->lock);
2857 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); 3123 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
2858 fail_mgmt_tasks(session, conn); 3124 fail_mgmt_tasks(session, conn);
3125 memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
2859 spin_unlock_bh(&session->lock); 3126 spin_unlock_bh(&session->lock);
2860 mutex_unlock(&session->eh_mutex); 3127 mutex_unlock(&session->eh_mutex);
2861} 3128}
@@ -2932,6 +3199,9 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2932 case ISCSI_PARAM_LU_RESET_TMO: 3199 case ISCSI_PARAM_LU_RESET_TMO:
2933 sscanf(buf, "%d", &session->lu_reset_timeout); 3200 sscanf(buf, "%d", &session->lu_reset_timeout);
2934 break; 3201 break;
3202 case ISCSI_PARAM_TGT_RESET_TMO:
3203 sscanf(buf, "%d", &session->tgt_reset_timeout);
3204 break;
2935 case ISCSI_PARAM_PING_TMO: 3205 case ISCSI_PARAM_PING_TMO:
2936 sscanf(buf, "%d", &conn->ping_timeout); 3206 sscanf(buf, "%d", &conn->ping_timeout);
2937 break; 3207 break;
@@ -3031,6 +3301,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3031 case ISCSI_PARAM_LU_RESET_TMO: 3301 case ISCSI_PARAM_LU_RESET_TMO:
3032 len = sprintf(buf, "%d\n", session->lu_reset_timeout); 3302 len = sprintf(buf, "%d\n", session->lu_reset_timeout);
3033 break; 3303 break;
3304 case ISCSI_PARAM_TGT_RESET_TMO:
3305 len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
3306 break;
3034 case ISCSI_PARAM_INITIAL_R2T_EN: 3307 case ISCSI_PARAM_INITIAL_R2T_EN:
3035 len = sprintf(buf, "%d\n", session->initial_r2t_en); 3308 len = sprintf(buf, "%d\n", session->initial_r2t_en);
3036 break; 3309 break;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 2e0746d70303..5c92620292fb 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -29,6 +29,7 @@
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/inet.h> 31#include <linux/inet.h>
32#include <linux/slab.h>
32#include <linux/file.h> 33#include <linux/file.h>
33#include <linux/blkdev.h> 34#include <linux/blkdev.h>
34#include <linux/crypto.h> 35#include <linux/crypto.h>
@@ -445,15 +446,15 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
445 return; 446 return;
446 447
447 /* flush task's r2t queues */ 448 /* flush task's r2t queues */
448 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { 449 while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
449 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 450 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
450 sizeof(void*)); 451 sizeof(void*));
451 ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n"); 452 ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
452 } 453 }
453 454
454 r2t = tcp_task->r2t; 455 r2t = tcp_task->r2t;
455 if (r2t != NULL) { 456 if (r2t != NULL) {
456 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 457 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
457 sizeof(void*)); 458 sizeof(void*));
458 tcp_task->r2t = NULL; 459 tcp_task->r2t = NULL;
459 } 460 }
@@ -541,7 +542,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
541 return 0; 542 return 0;
542 } 543 }
543 544
544 rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); 545 rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
545 if (!rc) { 546 if (!rc) {
546 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " 547 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
547 "Target has sent more R2Ts than it " 548 "Target has sent more R2Ts than it "
@@ -554,7 +555,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
554 if (r2t->data_length == 0) { 555 if (r2t->data_length == 0) {
555 iscsi_conn_printk(KERN_ERR, conn, 556 iscsi_conn_printk(KERN_ERR, conn,
556 "invalid R2T with zero data len\n"); 557 "invalid R2T with zero data len\n");
557 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 558 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
558 sizeof(void*)); 559 sizeof(void*));
559 return ISCSI_ERR_DATALEN; 560 return ISCSI_ERR_DATALEN;
560 } 561 }
@@ -570,7 +571,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
570 "invalid R2T with data len %u at offset %u " 571 "invalid R2T with data len %u at offset %u "
571 "and total length %d\n", r2t->data_length, 572 "and total length %d\n", r2t->data_length,
572 r2t->data_offset, scsi_out(task->sc)->length); 573 r2t->data_offset, scsi_out(task->sc)->length);
573 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 574 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
574 sizeof(void*)); 575 sizeof(void*));
575 return ISCSI_ERR_DATALEN; 576 return ISCSI_ERR_DATALEN;
576 } 577 }
@@ -580,7 +581,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
580 r2t->sent = 0; 581 r2t->sent = 0;
581 582
582 tcp_task->exp_datasn = r2tsn + 1; 583 tcp_task->exp_datasn = r2tsn + 1;
583 __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); 584 kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
584 conn->r2t_pdus_cnt++; 585 conn->r2t_pdus_cnt++;
585 586
586 iscsi_requeue_task(task); 587 iscsi_requeue_task(task);
@@ -951,7 +952,7 @@ int iscsi_tcp_task_init(struct iscsi_task *task)
951 return conn->session->tt->init_pdu(task, 0, task->data_count); 952 return conn->session->tt->init_pdu(task, 0, task->data_count);
952 } 953 }
953 954
954 BUG_ON(__kfifo_len(tcp_task->r2tqueue)); 955 BUG_ON(kfifo_len(&tcp_task->r2tqueue));
955 tcp_task->exp_datasn = 0; 956 tcp_task->exp_datasn = 0;
956 957
957 /* Prepare PDU, optionally w/ immediate data */ 958 /* Prepare PDU, optionally w/ immediate data */
@@ -982,7 +983,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
982 if (r2t->data_length <= r2t->sent) { 983 if (r2t->data_length <= r2t->sent) {
983 ISCSI_DBG_TCP(task->conn, 984 ISCSI_DBG_TCP(task->conn,
984 " done with r2t %p\n", r2t); 985 " done with r2t %p\n", r2t);
985 __kfifo_put(tcp_task->r2tpool.queue, 986 kfifo_in(&tcp_task->r2tpool.queue,
986 (void *)&tcp_task->r2t, 987 (void *)&tcp_task->r2t,
987 sizeof(void *)); 988 sizeof(void *));
988 tcp_task->r2t = r2t = NULL; 989 tcp_task->r2t = r2t = NULL;
@@ -990,9 +991,12 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
990 } 991 }
991 992
992 if (r2t == NULL) { 993 if (r2t == NULL) {
993 __kfifo_get(tcp_task->r2tqueue, 994 if (kfifo_out(&tcp_task->r2tqueue,
994 (void *)&tcp_task->r2t, sizeof(void *)); 995 (void *)&tcp_task->r2t, sizeof(void *)) !=
995 r2t = tcp_task->r2t; 996 sizeof(void *))
997 r2t = NULL;
998 else
999 r2t = tcp_task->r2t;
996 } 1000 }
997 spin_unlock_bh(&session->lock); 1001 spin_unlock_bh(&session->lock);
998 } 1002 }
@@ -1004,7 +1008,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
1004 * iscsi_tcp_task_xmit - xmit normal PDU task 1008 * iscsi_tcp_task_xmit - xmit normal PDU task
1005 * @task: iscsi command task 1009 * @task: iscsi command task
1006 * 1010 *
1007 * We're expected to return 0 when everything was transmitted succesfully, 1011 * We're expected to return 0 when everything was transmitted successfully,
1008 * -EAGAIN if there's still data in the queue, or != 0 for any other kind 1012 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
1009 * of error. 1013 * of error.
1010 */ 1014 */
@@ -1127,9 +1131,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
1127 } 1131 }
1128 1132
1129 /* R2T xmit queue */ 1133 /* R2T xmit queue */
1130 tcp_task->r2tqueue = kfifo_alloc( 1134 if (kfifo_alloc(&tcp_task->r2tqueue,
1131 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); 1135 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) {
1132 if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
1133 iscsi_pool_free(&tcp_task->r2tpool); 1136 iscsi_pool_free(&tcp_task->r2tpool);
1134 goto r2t_alloc_fail; 1137 goto r2t_alloc_fail;
1135 } 1138 }
@@ -1142,7 +1145,7 @@ r2t_alloc_fail:
1142 struct iscsi_task *task = session->cmds[i]; 1145 struct iscsi_task *task = session->cmds[i];
1143 struct iscsi_tcp_task *tcp_task = task->dd_data; 1146 struct iscsi_tcp_task *tcp_task = task->dd_data;
1144 1147
1145 kfifo_free(tcp_task->r2tqueue); 1148 kfifo_free(&tcp_task->r2tqueue);
1146 iscsi_pool_free(&tcp_task->r2tpool); 1149 iscsi_pool_free(&tcp_task->r2tpool);
1147 } 1150 }
1148 return -ENOMEM; 1151 return -ENOMEM;
@@ -1157,7 +1160,7 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
1157 struct iscsi_task *task = session->cmds[i]; 1160 struct iscsi_task *task = session->cmds[i];
1158 struct iscsi_tcp_task *tcp_task = task->dd_data; 1161 struct iscsi_tcp_task *tcp_task = task->dd_data;
1159 1162
1160 kfifo_free(tcp_task->r2tqueue); 1163 kfifo_free(&tcp_task->r2tqueue);
1161 iscsi_pool_free(&tcp_task->r2tpool); 1164 iscsi_pool_free(&tcp_task->r2tpool);
1162 } 1165 }
1163} 1166}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e15501170698..88f744672576 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <linux/slab.h>
25 26
26#include <scsi/sas_ata.h> 27#include <scsi/sas_ata.h>
27#include "sas_internal.h" 28#include "sas_internal.h"
@@ -394,11 +395,15 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
394void sas_ata_task_abort(struct sas_task *task) 395void sas_ata_task_abort(struct sas_task *task)
395{ 396{
396 struct ata_queued_cmd *qc = task->uldd_task; 397 struct ata_queued_cmd *qc = task->uldd_task;
398 struct request_queue *q = qc->scsicmd->device->request_queue;
397 struct completion *waiting; 399 struct completion *waiting;
400 unsigned long flags;
398 401
399 /* Bounce SCSI-initiated commands to the SCSI EH */ 402 /* Bounce SCSI-initiated commands to the SCSI EH */
400 if (qc->scsicmd) { 403 if (qc->scsicmd) {
404 spin_lock_irqsave(q->queue_lock, flags);
401 blk_abort_request(qc->scsicmd->request); 405 blk_abort_request(qc->scsicmd->request);
406 spin_unlock_irqrestore(q->queue_lock, flags);
402 scsi_schedule_eh(qc->scsicmd->device->host); 407 scsi_schedule_eh(qc->scsicmd->device->host);
403 return; 408 return;
404 } 409 }
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index facc5bfcf7db..f5831930df9b 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/slab.h>
26#include <scsi/scsi_host.h> 27#include <scsi/scsi_host.h>
27#include <scsi/scsi_eh.h> 28#include <scsi/scsi_eh.h>
28#include "sas_internal.h" 29#include "sas_internal.h"
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 33cf988c8c8a..c65af02dcfe8 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/slab.h>
27 28
28#include "sas_internal.h" 29#include "sas_internal.h"
29 30
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 1bc3b7567994..04ad8dd1a74c 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/scatterlist.h> 11#include <linux/scatterlist.h>
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13#include <linux/slab.h>
13 14
14#include "sas_internal.h" 15#include "sas_internal.h"
15 16
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 9cd5abe9e714..2dc55343f671 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/slab.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/device.h> 29#include <linux/device.h>
29#include <linux/spinlock.h> 30#include <linux/spinlock.h>
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 1c558d3bce18..822835055cef 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -44,6 +44,7 @@
44#include <linux/err.h> 44#include <linux/err.h>
45#include <linux/blkdev.h> 45#include <linux/blkdev.h>
46#include <linux/freezer.h> 46#include <linux/freezer.h>
47#include <linux/gfp.h>
47#include <linux/scatterlist.h> 48#include <linux/scatterlist.h>
48#include <linux/libata.h> 49#include <linux/libata.h>
49 50
@@ -820,10 +821,14 @@ void sas_slave_destroy(struct scsi_device *scsi_dev)
820 ata_port_disable(dev->sata_dev.ap); 821 ata_port_disable(dev->sata_dev.ap);
821} 822}
822 823
823int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth) 824int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth,
825 int reason)
824{ 826{
825 int res = min(new_depth, SAS_MAX_QD); 827 int res = min(new_depth, SAS_MAX_QD);
826 828
829 if (reason != SCSI_QDEPTH_DEFAULT)
830 return -EOPNOTSUPP;
831
827 if (scsi_dev->tagged_supported) 832 if (scsi_dev->tagged_supported)
828 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), 833 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev),
829 res); 834 res);
@@ -1025,6 +1030,8 @@ int __sas_task_abort(struct sas_task *task)
1025void sas_task_abort(struct sas_task *task) 1030void sas_task_abort(struct sas_task *task)
1026{ 1031{
1027 struct scsi_cmnd *sc = task->uldd_task; 1032 struct scsi_cmnd *sc = task->uldd_task;
1033 struct request_queue *q = sc->device->request_queue;
1034 unsigned long flags;
1028 1035
1029 /* Escape for libsas internal commands */ 1036 /* Escape for libsas internal commands */
1030 if (!sc) { 1037 if (!sc) {
@@ -1039,7 +1046,9 @@ void sas_task_abort(struct sas_task *task)
1039 return; 1046 return;
1040 } 1047 }
1041 1048
1049 spin_lock_irqsave(q->queue_lock, flags);
1042 blk_abort_request(sc->request); 1050 blk_abort_request(sc->request);
1051 spin_unlock_irqrestore(q->queue_lock, flags);
1043 scsi_schedule_eh(sc->device->host); 1052 scsi_schedule_eh(sc->device->host);
1044} 1053}
1045 1054
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 9ad38e81e343..ff6a28ce9b69 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * SCSI RDAM Protocol lib functions 2 * SCSI RDMA Protocol lib functions
3 * 3 *
4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org> 4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
5 * 5 *
@@ -19,6 +19,7 @@
19 * 02110-1301 USA 19 * 02110-1301 USA
20 */ 20 */
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/slab.h>
22#include <linux/kfifo.h> 23#include <linux/kfifo.h>
23#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
24#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
@@ -58,19 +59,15 @@ static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
58 goto free_pool; 59 goto free_pool;
59 60
60 spin_lock_init(&q->lock); 61 spin_lock_init(&q->lock);
61 q->queue = kfifo_init((void *) q->pool, max * sizeof(void *), 62 kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *));
62 GFP_KERNEL, &q->lock);
63 if (IS_ERR(q->queue))
64 goto free_item;
65 63
66 for (i = 0, iue = q->items; i < max; i++) { 64 for (i = 0, iue = q->items; i < max; i++) {
67 __kfifo_put(q->queue, (void *) &iue, sizeof(void *)); 65 kfifo_in(&q->queue, (void *) &iue, sizeof(void *));
68 iue->sbuf = ring[i]; 66 iue->sbuf = ring[i];
69 iue++; 67 iue++;
70 } 68 }
71 return 0; 69 return 0;
72 70
73free_item:
74 kfree(q->items); 71 kfree(q->items);
75free_pool: 72free_pool:
76 kfree(q->pool); 73 kfree(q->pool);
@@ -167,7 +164,11 @@ struct iu_entry *srp_iu_get(struct srp_target *target)
167{ 164{
168 struct iu_entry *iue = NULL; 165 struct iu_entry *iue = NULL;
169 166
170 kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *)); 167 if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue,
168 sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) {
169 WARN_ONCE(1, "unexpected fifo state");
170 return NULL;
171 }
171 if (!iue) 172 if (!iue)
172 return iue; 173 return iue;
173 iue->target = target; 174 iue->target = target;
@@ -179,7 +180,8 @@ EXPORT_SYMBOL_GPL(srp_iu_get);
179 180
180void srp_iu_put(struct iu_entry *iue) 181void srp_iu_put(struct iu_entry *iue)
181{ 182{
182 kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *)); 183 kfifo_in_locked(&iue->target->iu_queue.queue, (void *) &iue,
184 sizeof(void *), &iue->target->iu_queue.lock);
183} 185}
184EXPORT_SYMBOL_GPL(srp_iu_put); 186EXPORT_SYMBOL_GPL(srp_iu_put);
185 187
@@ -327,7 +329,7 @@ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
327 int offset, err = 0; 329 int offset, err = 0;
328 u8 format; 330 u8 format;
329 331
330 offset = cmd->add_cdb_len * 4; 332 offset = cmd->add_cdb_len & ~3;
331 333
332 dir = srp_cmd_direction(cmd); 334 dir = srp_cmd_direction(cmd);
333 if (dir == DMA_FROM_DEVICE) 335 if (dir == DMA_FROM_DEVICE)
@@ -365,7 +367,7 @@ static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
365{ 367{
366 struct srp_direct_buf *md; 368 struct srp_direct_buf *md;
367 struct srp_indirect_buf *id; 369 struct srp_indirect_buf *id;
368 int len = 0, offset = cmd->add_cdb_len * 4; 370 int len = 0, offset = cmd->add_cdb_len & ~3;
369 u8 fmt; 371 u8 fmt;
370 372
371 if (dir == DMA_TO_DEVICE) 373 if (dir == DMA_TO_DEVICE)
@@ -439,6 +441,6 @@ int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
439} 441}
440EXPORT_SYMBOL_GPL(srp_cmd_queue); 442EXPORT_SYMBOL_GPL(srp_cmd_queue);
441 443
442MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions"); 444MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
443MODULE_AUTHOR("FUJITA Tomonori"); 445MODULE_AUTHOR("FUJITA Tomonori");
444MODULE_LICENSE("GPL"); 446MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index aa10f7951634..565e16dd74fc 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -37,6 +37,9 @@ struct lpfc_sli2_slim;
37 the NameServer before giving up. */ 37 the NameServer before giving up. */
38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
40#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
41 cmnd for menlo needs nearly twice as for firmware
42 downloads using bsg */
40#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 43#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
41#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 44#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
42#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ 45#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
@@ -109,7 +112,8 @@ struct hbq_dmabuf {
109 struct lpfc_dmabuf dbuf; 112 struct lpfc_dmabuf dbuf;
110 uint32_t size; 113 uint32_t size;
111 uint32_t tag; 114 uint32_t tag;
112 struct lpfc_rcqe rcqe; 115 struct lpfc_cq_event cq_event;
116 unsigned long time_stamp;
113}; 117};
114 118
115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 119/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -201,6 +205,7 @@ struct lpfc_stats {
201 uint32_t elsRcvLIRR; 205 uint32_t elsRcvLIRR;
202 uint32_t elsRcvRPS; 206 uint32_t elsRcvRPS;
203 uint32_t elsRcvRPL; 207 uint32_t elsRcvRPL;
208 uint32_t elsRcvRRQ;
204 uint32_t elsXmitFLOGI; 209 uint32_t elsXmitFLOGI;
205 uint32_t elsXmitFDISC; 210 uint32_t elsXmitFDISC;
206 uint32_t elsXmitPLOGI; 211 uint32_t elsXmitPLOGI;
@@ -289,8 +294,8 @@ struct lpfc_vport {
289 294
290 uint16_t vpi; 295 uint16_t vpi;
291 uint16_t vfi; 296 uint16_t vfi;
292 uint8_t vfi_state; 297 uint8_t vpi_state;
293#define LPFC_VFI_REGISTERED 0x1 298#define LPFC_VPI_REGISTERED 0x1
294 299
295 uint32_t fc_flag; /* FC flags */ 300 uint32_t fc_flag; /* FC flags */
296/* Several of these flags are HBA centric and should be moved to 301/* Several of these flags are HBA centric and should be moved to
@@ -313,6 +318,9 @@ struct lpfc_vport {
313#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ 318#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
314#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ 319#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
315#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */ 320#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
321#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
322#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
323#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
316 324
317 uint32_t ct_flags; 325 uint32_t ct_flags;
318#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ 326#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -405,6 +413,7 @@ struct lpfc_vport {
405 uint8_t stat_data_enabled; 413 uint8_t stat_data_enabled;
406 uint8_t stat_data_blocked; 414 uint8_t stat_data_blocked;
407 struct list_head rcv_buffer_list; 415 struct list_head rcv_buffer_list;
416 unsigned long rcv_buffer_time_stamp;
408 uint32_t vport_flag; 417 uint32_t vport_flag;
409#define STATIC_VPORT 1 418#define STATIC_VPORT 1
410}; 419};
@@ -445,6 +454,8 @@ struct unsol_rcv_ct_ctx {
445 uint32_t ctxt_id; 454 uint32_t ctxt_id;
446 uint32_t SID; 455 uint32_t SID;
447 uint32_t oxid; 456 uint32_t oxid;
457 uint32_t flags;
458#define UNSOL_VALID 0x00000001
448}; 459};
449 460
450struct lpfc_hba { 461struct lpfc_hba {
@@ -496,7 +507,10 @@ struct lpfc_hba {
496 (struct lpfc_hba *); 507 (struct lpfc_hba *);
497 void (*lpfc_stop_port) 508 void (*lpfc_stop_port)
498 (struct lpfc_hba *); 509 (struct lpfc_hba *);
499 510 int (*lpfc_hba_init_link)
511 (struct lpfc_hba *);
512 int (*lpfc_hba_down_link)
513 (struct lpfc_hba *);
500 514
501 /* SLI4 specific HBA data structure */ 515 /* SLI4 specific HBA data structure */
502 struct lpfc_sli4_hba sli4_hba; 516 struct lpfc_sli4_hba sli4_hba;
@@ -527,13 +541,16 @@ struct lpfc_hba {
527#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 541#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
528#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ 542#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
529#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ 543#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
530#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ 544#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
531#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ 545#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
532#define FCP_XRI_ABORT_EVENT 0x20 546#define FCP_XRI_ABORT_EVENT 0x20
533#define ELS_XRI_ABORT_EVENT 0x40 547#define ELS_XRI_ABORT_EVENT 0x40
534#define ASYNC_EVENT 0x80 548#define ASYNC_EVENT 0x80
535#define LINK_DISABLED 0x100 /* Link disabled by user */ 549#define LINK_DISABLED 0x100 /* Link disabled by user */
536#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ 550#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
551#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */
552#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */
553 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
537 struct lpfc_dmabuf slim2p; 554 struct lpfc_dmabuf slim2p;
538 555
539 MAILBOX_t *mbox; 556 MAILBOX_t *mbox;
@@ -551,6 +568,7 @@ struct lpfc_hba {
551 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 568 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
552 569
553 uint32_t fc_eventTag; /* event tag for link attention */ 570 uint32_t fc_eventTag; /* event tag for link attention */
571 uint32_t link_events;
554 572
555 /* These fields used to be binfo */ 573 /* These fields used to be binfo */
556 uint32_t fc_pref_DID; /* preferred D_ID */ 574 uint32_t fc_pref_DID; /* preferred D_ID */
@@ -604,8 +622,12 @@ struct lpfc_hba {
604 uint32_t cfg_enable_hba_reset; 622 uint32_t cfg_enable_hba_reset;
605 uint32_t cfg_enable_hba_heartbeat; 623 uint32_t cfg_enable_hba_heartbeat;
606 uint32_t cfg_enable_bg; 624 uint32_t cfg_enable_bg;
607 uint32_t cfg_enable_fip;
608 uint32_t cfg_log_verbose; 625 uint32_t cfg_log_verbose;
626 uint32_t cfg_aer_support;
627 uint32_t cfg_suppress_link_up;
628#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
629#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
630#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
609 631
610 lpfc_vpd_t vpd; /* vital product data */ 632 lpfc_vpd_t vpd; /* vital product data */
611 633
@@ -783,10 +805,13 @@ struct lpfc_hba {
783 uint16_t vlan_id; 805 uint16_t vlan_id;
784 struct list_head fcf_conn_rec_list; 806 struct list_head fcf_conn_rec_list;
785 807
786 struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */ 808 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
787 struct list_head ct_ev_waiters; 809 struct list_head ct_ev_waiters;
788 struct unsol_rcv_ct_ctx ct_ctx[64]; 810 struct unsol_rcv_ct_ctx ct_ctx[64];
789 uint32_t ctx_idx; 811 uint32_t ctx_idx;
812
813 uint8_t menlo_flag; /* menlo generic flags */
814#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
790}; 815};
791 816
792static inline struct Scsi_Host * 817static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e1a30a16a9fa..1849e33e68f9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -23,12 +23,15 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/aer.h>
27#include <linux/gfp.h>
26 28
27#include <scsi/scsi.h> 29#include <scsi/scsi.h>
28#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
30#include <scsi/scsi_tcq.h> 32#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/fc/fc_fs.h>
32 35
33#include "lpfc_hw4.h" 36#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 37#include "lpfc_hw.h"
@@ -98,6 +101,28 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
98 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 101 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
99} 102}
100 103
104/**
105 * lpfc_enable_fip_show - Return the fip mode of the HBA
106 * @dev: class unused variable.
107 * @attr: device attribute, not used.
108 * @buf: on return contains the module description text.
109 *
110 * Returns: size of formatted string.
111 **/
112static ssize_t
113lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
114 char *buf)
115{
116 struct Scsi_Host *shost = class_to_shost(dev);
117 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
118 struct lpfc_hba *phba = vport->phba;
119
120 if (phba->hba_flag & HBA_FIP_SUPPORT)
121 return snprintf(buf, PAGE_SIZE, "1\n");
122 else
123 return snprintf(buf, PAGE_SIZE, "0\n");
124}
125
101static ssize_t 126static ssize_t
102lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, 127lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
103 char *buf) 128 char *buf)
@@ -458,6 +483,41 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
458} 483}
459 484
460/** 485/**
486 * lpfc_link_state_store - Transition the link_state on an HBA port
487 * @dev: class device that is converted into a Scsi_host.
488 * @attr: device attribute, not used.
489 * @buf: one or more lpfc_polling_flags values.
490 * @count: not used.
491 *
492 * Returns:
493 * -EINVAL if the buffer is not "up" or "down"
494 * return from link state change function if non-zero
495 * length of the buf on success
496 **/
497static ssize_t
498lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
499 const char *buf, size_t count)
500{
501 struct Scsi_Host *shost = class_to_shost(dev);
502 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
503 struct lpfc_hba *phba = vport->phba;
504
505 int status = -EINVAL;
506
507 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
508 (phba->link_state == LPFC_LINK_DOWN))
509 status = phba->lpfc_hba_init_link(phba);
510 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
511 (phba->link_state >= LPFC_LINK_UP))
512 status = phba->lpfc_hba_down_link(phba);
513
514 if (status == 0)
515 return strlen(buf);
516 else
517 return status;
518}
519
520/**
461 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports 521 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
462 * @dev: class device that is converted into a Scsi_host. 522 * @dev: class device that is converted into a Scsi_host.
463 * @attr: device attribute, not used. 523 * @attr: device attribute, not used.
@@ -654,7 +714,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
654 * Notes: 714 * Notes:
655 * Assumes any error from lpfc_selective_reset() will be negative. 715 * Assumes any error from lpfc_selective_reset() will be negative.
656 * If lpfc_selective_reset() returns zero then the length of the buffer 716 * If lpfc_selective_reset() returns zero then the length of the buffer
657 * is returned which indicates succcess 717 * is returned which indicates success
658 * 718 *
659 * Returns: 719 * Returns:
660 * -EINVAL if the buffer does not contain the string "selective" 720 * -EINVAL if the buffer does not contain the string "selective"
@@ -762,9 +822,15 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
762 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 822 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
763 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 823 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
764 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) 824 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
765 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); 825 if (phba->sli_rev == LPFC_SLI_REV4)
826 return -EINVAL;
827 else
828 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
766 else if (strncmp(buf, "error", sizeof("error") - 1) == 0) 829 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
767 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 830 if (phba->sli_rev == LPFC_SLI_REV4)
831 return -EINVAL;
832 else
833 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
768 else 834 else
769 return -EINVAL; 835 return -EINVAL;
770 836
@@ -1126,6 +1192,9 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1126 if ((val & 0x3) != val) 1192 if ((val & 0x3) != val)
1127 return -EINVAL; 1193 return -EINVAL;
1128 1194
1195 if (phba->sli_rev == LPFC_SLI_REV4)
1196 val = 0;
1197
1129 spin_lock_irq(&phba->hbalock); 1198 spin_lock_irq(&phba->hbalock);
1130 1199
1131 old_val = phba->cfg_poll; 1200 old_val = phba->cfg_poll;
@@ -1186,7 +1255,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1186 struct Scsi_Host *shost = class_to_shost(dev);\ 1255 struct Scsi_Host *shost = class_to_shost(dev);\
1187 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1256 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1188 struct lpfc_hba *phba = vport->phba;\ 1257 struct lpfc_hba *phba = vport->phba;\
1189 int val = 0;\ 1258 uint val = 0;\
1190 val = phba->cfg_##attr;\ 1259 val = phba->cfg_##attr;\
1191 return snprintf(buf, PAGE_SIZE, "%d\n",\ 1260 return snprintf(buf, PAGE_SIZE, "%d\n",\
1192 phba->cfg_##attr);\ 1261 phba->cfg_##attr);\
@@ -1214,7 +1283,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1214 struct Scsi_Host *shost = class_to_shost(dev);\ 1283 struct Scsi_Host *shost = class_to_shost(dev);\
1215 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1284 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1216 struct lpfc_hba *phba = vport->phba;\ 1285 struct lpfc_hba *phba = vport->phba;\
1217 int val = 0;\ 1286 uint val = 0;\
1218 val = phba->cfg_##attr;\ 1287 val = phba->cfg_##attr;\
1219 return snprintf(buf, PAGE_SIZE, "%#x\n",\ 1288 return snprintf(buf, PAGE_SIZE, "%#x\n",\
1220 phba->cfg_##attr);\ 1289 phba->cfg_##attr);\
@@ -1241,7 +1310,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1241 **/ 1310 **/
1242#define lpfc_param_init(attr, default, minval, maxval) \ 1311#define lpfc_param_init(attr, default, minval, maxval) \
1243static int \ 1312static int \
1244lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ 1313lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
1245{ \ 1314{ \
1246 if (val >= minval && val <= maxval) {\ 1315 if (val >= minval && val <= maxval) {\
1247 phba->cfg_##attr = val;\ 1316 phba->cfg_##attr = val;\
@@ -1276,7 +1345,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
1276 **/ 1345 **/
1277#define lpfc_param_set(attr, default, minval, maxval) \ 1346#define lpfc_param_set(attr, default, minval, maxval) \
1278static int \ 1347static int \
1279lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ 1348lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
1280{ \ 1349{ \
1281 if (val >= minval && val <= maxval) {\ 1350 if (val >= minval && val <= maxval) {\
1282 phba->cfg_##attr = val;\ 1351 phba->cfg_##attr = val;\
@@ -1317,7 +1386,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1317 struct Scsi_Host *shost = class_to_shost(dev);\ 1386 struct Scsi_Host *shost = class_to_shost(dev);\
1318 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1387 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1319 struct lpfc_hba *phba = vport->phba;\ 1388 struct lpfc_hba *phba = vport->phba;\
1320 int val=0;\ 1389 uint val = 0;\
1321 if (!isdigit(buf[0]))\ 1390 if (!isdigit(buf[0]))\
1322 return -EINVAL;\ 1391 return -EINVAL;\
1323 if (sscanf(buf, "%i", &val) != 1)\ 1392 if (sscanf(buf, "%i", &val) != 1)\
@@ -1349,7 +1418,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1349{ \ 1418{ \
1350 struct Scsi_Host *shost = class_to_shost(dev);\ 1419 struct Scsi_Host *shost = class_to_shost(dev);\
1351 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1420 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1352 int val = 0;\ 1421 uint val = 0;\
1353 val = vport->cfg_##attr;\ 1422 val = vport->cfg_##attr;\
1354 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ 1423 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
1355} 1424}
@@ -1376,7 +1445,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1376{ \ 1445{ \
1377 struct Scsi_Host *shost = class_to_shost(dev);\ 1446 struct Scsi_Host *shost = class_to_shost(dev);\
1378 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1447 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1379 int val = 0;\ 1448 uint val = 0;\
1380 val = vport->cfg_##attr;\ 1449 val = vport->cfg_##attr;\
1381 return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ 1450 return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
1382} 1451}
@@ -1401,7 +1470,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1401 **/ 1470 **/
1402#define lpfc_vport_param_init(attr, default, minval, maxval) \ 1471#define lpfc_vport_param_init(attr, default, minval, maxval) \
1403static int \ 1472static int \
1404lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ 1473lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
1405{ \ 1474{ \
1406 if (val >= minval && val <= maxval) {\ 1475 if (val >= minval && val <= maxval) {\
1407 vport->cfg_##attr = val;\ 1476 vport->cfg_##attr = val;\
@@ -1433,7 +1502,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
1433 **/ 1502 **/
1434#define lpfc_vport_param_set(attr, default, minval, maxval) \ 1503#define lpfc_vport_param_set(attr, default, minval, maxval) \
1435static int \ 1504static int \
1436lpfc_##attr##_set(struct lpfc_vport *vport, int val) \ 1505lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
1437{ \ 1506{ \
1438 if (val >= minval && val <= maxval) {\ 1507 if (val >= minval && val <= maxval) {\
1439 vport->cfg_##attr = val;\ 1508 vport->cfg_##attr = val;\
@@ -1469,7 +1538,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1469{ \ 1538{ \
1470 struct Scsi_Host *shost = class_to_shost(dev);\ 1539 struct Scsi_Host *shost = class_to_shost(dev);\
1471 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1540 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1472 int val=0;\ 1541 uint val = 0;\
1473 if (!isdigit(buf[0]))\ 1542 if (!isdigit(buf[0]))\
1474 return -EINVAL;\ 1543 return -EINVAL;\
1475 if (sscanf(buf, "%i", &val) != 1)\ 1544 if (sscanf(buf, "%i", &val) != 1)\
@@ -1482,22 +1551,22 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1482 1551
1483 1552
1484#define LPFC_ATTR(name, defval, minval, maxval, desc) \ 1553#define LPFC_ATTR(name, defval, minval, maxval, desc) \
1485static int lpfc_##name = defval;\ 1554static uint lpfc_##name = defval;\
1486module_param(lpfc_##name, int, 0);\ 1555module_param(lpfc_##name, uint, 0);\
1487MODULE_PARM_DESC(lpfc_##name, desc);\ 1556MODULE_PARM_DESC(lpfc_##name, desc);\
1488lpfc_param_init(name, defval, minval, maxval) 1557lpfc_param_init(name, defval, minval, maxval)
1489 1558
1490#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ 1559#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
1491static int lpfc_##name = defval;\ 1560static uint lpfc_##name = defval;\
1492module_param(lpfc_##name, int, 0);\ 1561module_param(lpfc_##name, uint, 0);\
1493MODULE_PARM_DESC(lpfc_##name, desc);\ 1562MODULE_PARM_DESC(lpfc_##name, desc);\
1494lpfc_param_show(name)\ 1563lpfc_param_show(name)\
1495lpfc_param_init(name, defval, minval, maxval)\ 1564lpfc_param_init(name, defval, minval, maxval)\
1496static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1565static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1497 1566
1498#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ 1567#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
1499static int lpfc_##name = defval;\ 1568static uint lpfc_##name = defval;\
1500module_param(lpfc_##name, int, 0);\ 1569module_param(lpfc_##name, uint, 0);\
1501MODULE_PARM_DESC(lpfc_##name, desc);\ 1570MODULE_PARM_DESC(lpfc_##name, desc);\
1502lpfc_param_show(name)\ 1571lpfc_param_show(name)\
1503lpfc_param_init(name, defval, minval, maxval)\ 1572lpfc_param_init(name, defval, minval, maxval)\
@@ -1507,16 +1576,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1507 lpfc_##name##_show, lpfc_##name##_store) 1576 lpfc_##name##_show, lpfc_##name##_store)
1508 1577
1509#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ 1578#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
1510static int lpfc_##name = defval;\ 1579static uint lpfc_##name = defval;\
1511module_param(lpfc_##name, int, 0);\ 1580module_param(lpfc_##name, uint, 0);\
1512MODULE_PARM_DESC(lpfc_##name, desc);\ 1581MODULE_PARM_DESC(lpfc_##name, desc);\
1513lpfc_param_hex_show(name)\ 1582lpfc_param_hex_show(name)\
1514lpfc_param_init(name, defval, minval, maxval)\ 1583lpfc_param_init(name, defval, minval, maxval)\
1515static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1584static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1516 1585
1517#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ 1586#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
1518static int lpfc_##name = defval;\ 1587static uint lpfc_##name = defval;\
1519module_param(lpfc_##name, int, 0);\ 1588module_param(lpfc_##name, uint, 0);\
1520MODULE_PARM_DESC(lpfc_##name, desc);\ 1589MODULE_PARM_DESC(lpfc_##name, desc);\
1521lpfc_param_hex_show(name)\ 1590lpfc_param_hex_show(name)\
1522lpfc_param_init(name, defval, minval, maxval)\ 1591lpfc_param_init(name, defval, minval, maxval)\
@@ -1526,22 +1595,22 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1526 lpfc_##name##_show, lpfc_##name##_store) 1595 lpfc_##name##_show, lpfc_##name##_store)
1527 1596
1528#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ 1597#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
1529static int lpfc_##name = defval;\ 1598static uint lpfc_##name = defval;\
1530module_param(lpfc_##name, int, 0);\ 1599module_param(lpfc_##name, uint, 0);\
1531MODULE_PARM_DESC(lpfc_##name, desc);\ 1600MODULE_PARM_DESC(lpfc_##name, desc);\
1532lpfc_vport_param_init(name, defval, minval, maxval) 1601lpfc_vport_param_init(name, defval, minval, maxval)
1533 1602
1534#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ 1603#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
1535static int lpfc_##name = defval;\ 1604static uint lpfc_##name = defval;\
1536module_param(lpfc_##name, int, 0);\ 1605module_param(lpfc_##name, uint, 0);\
1537MODULE_PARM_DESC(lpfc_##name, desc);\ 1606MODULE_PARM_DESC(lpfc_##name, desc);\
1538lpfc_vport_param_show(name)\ 1607lpfc_vport_param_show(name)\
1539lpfc_vport_param_init(name, defval, minval, maxval)\ 1608lpfc_vport_param_init(name, defval, minval, maxval)\
1540static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1609static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1541 1610
1542#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ 1611#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
1543static int lpfc_##name = defval;\ 1612static uint lpfc_##name = defval;\
1544module_param(lpfc_##name, int, 0);\ 1613module_param(lpfc_##name, uint, 0);\
1545MODULE_PARM_DESC(lpfc_##name, desc);\ 1614MODULE_PARM_DESC(lpfc_##name, desc);\
1546lpfc_vport_param_show(name)\ 1615lpfc_vport_param_show(name)\
1547lpfc_vport_param_init(name, defval, minval, maxval)\ 1616lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1551,16 +1620,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1551 lpfc_##name##_show, lpfc_##name##_store) 1620 lpfc_##name##_show, lpfc_##name##_store)
1552 1621
1553#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ 1622#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
1554static int lpfc_##name = defval;\ 1623static uint lpfc_##name = defval;\
1555module_param(lpfc_##name, int, 0);\ 1624module_param(lpfc_##name, uint, 0);\
1556MODULE_PARM_DESC(lpfc_##name, desc);\ 1625MODULE_PARM_DESC(lpfc_##name, desc);\
1557lpfc_vport_param_hex_show(name)\ 1626lpfc_vport_param_hex_show(name)\
1558lpfc_vport_param_init(name, defval, minval, maxval)\ 1627lpfc_vport_param_init(name, defval, minval, maxval)\
1559static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1628static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1560 1629
1561#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ 1630#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
1562static int lpfc_##name = defval;\ 1631static uint lpfc_##name = defval;\
1563module_param(lpfc_##name, int, 0);\ 1632module_param(lpfc_##name, uint, 0);\
1564MODULE_PARM_DESC(lpfc_##name, desc);\ 1633MODULE_PARM_DESC(lpfc_##name, desc);\
1565lpfc_vport_param_hex_show(name)\ 1634lpfc_vport_param_hex_show(name)\
1566lpfc_vport_param_init(name, defval, minval, maxval)\ 1635lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1581,7 +1650,8 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
1581static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); 1650static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
1582static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 1651static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
1583static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 1652static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
1584static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL); 1653static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
1654 lpfc_link_state_store);
1585static DEVICE_ATTR(option_rom_version, S_IRUGO, 1655static DEVICE_ATTR(option_rom_version, S_IRUGO,
1586 lpfc_option_rom_version_show, NULL); 1656 lpfc_option_rom_version_show, NULL);
1587static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 1657static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1589,6 +1659,7 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
1589static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); 1659static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
1590static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); 1660static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
1591static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); 1661static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
1662static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
1592static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 1663static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
1593 lpfc_board_mode_show, lpfc_board_mode_store); 1664 lpfc_board_mode_show, lpfc_board_mode_store);
1594static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 1665static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
@@ -1863,6 +1934,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
1863 lpfc_enable_npiv_show, NULL); 1934 lpfc_enable_npiv_show, NULL);
1864 1935
1865/* 1936/*
1937# lpfc_suppress_link_up: Bring link up at initialization
1938# 0x0 = bring link up (issue MBX_INIT_LINK)
1939# 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
1940# 0x2 = never bring up link
1941# Default value is 0.
1942*/
1943LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
1944 LPFC_DELAY_INIT_LINK_INDEFINITELY,
1945 "Suppress Link Up at initialization");
1946
1947/*
1866# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1948# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
1867# until the timer expires. Value range is [0,255]. Default value is 30. 1949# until the timer expires. Value range is [0,255]. Default value is 30.
1868*/ 1950*/
@@ -1887,8 +1969,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
1887{ 1969{
1888 struct Scsi_Host *shost = class_to_shost(dev); 1970 struct Scsi_Host *shost = class_to_shost(dev);
1889 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1971 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1890 int val = 0; 1972
1891 val = vport->cfg_devloss_tmo;
1892 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 1973 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
1893} 1974}
1894 1975
@@ -2759,6 +2840,196 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
2759 lpfc_link_speed_show, lpfc_link_speed_store); 2840 lpfc_link_speed_show, lpfc_link_speed_store);
2760 2841
2761/* 2842/*
2843# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
2844# 0 = aer disabled or not supported
2845# 1 = aer supported and enabled (default)
2846# Value range is [0,1]. Default value is 1.
2847*/
2848
2849/**
2850 * lpfc_aer_support_store - Set the adapter for aer support
2851 *
2852 * @dev: class device that is converted into a Scsi_host.
2853 * @attr: device attribute, not used.
2854 * @buf: containing the string "selective".
2855 * @count: unused variable.
2856 *
2857 * Description:
2858 * If the val is 1 and currently the device's AER capability was not
2859 * enabled, invoke the kernel's enable AER helper routine, trying to
2860 * enable the device's AER capability. If the helper routine enabling
2861 * AER returns success, update the device's cfg_aer_support flag to
2862 * indicate AER is supported by the device; otherwise, if the device
2863 * AER capability is already enabled to support AER, then do nothing.
2864 *
2865 * If the val is 0 and currently the device's AER support was enabled,
2866 * invoke the kernel's disable AER helper routine. After that, update
2867 * the device's cfg_aer_support flag to indicate AER is not supported
2868 * by the device; otherwise, if the device AER capability is already
2869 * disabled from supporting AER, then do nothing.
2870 *
2871 * Returns:
2872 * length of the buf on success if val is in range the intended mode
2873 * is supported.
2874 * -EINVAL if val out of range or intended mode is not supported.
2875 **/
2876static ssize_t
2877lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
2878 const char *buf, size_t count)
2879{
2880 struct Scsi_Host *shost = class_to_shost(dev);
2881 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
2882 struct lpfc_hba *phba = vport->phba;
2883 int val = 0, rc = -EINVAL;
2884
2885 /* AER not supported on OC devices yet */
2886 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
2887 return -EPERM;
2888 if (!isdigit(buf[0]))
2889 return -EINVAL;
2890 if (sscanf(buf, "%i", &val) != 1)
2891 return -EINVAL;
2892
2893 switch (val) {
2894 case 0:
2895 if (phba->hba_flag & HBA_AER_ENABLED) {
2896 rc = pci_disable_pcie_error_reporting(phba->pcidev);
2897 if (!rc) {
2898 spin_lock_irq(&phba->hbalock);
2899 phba->hba_flag &= ~HBA_AER_ENABLED;
2900 spin_unlock_irq(&phba->hbalock);
2901 phba->cfg_aer_support = 0;
2902 rc = strlen(buf);
2903 } else
2904 rc = -EPERM;
2905 } else {
2906 phba->cfg_aer_support = 0;
2907 rc = strlen(buf);
2908 }
2909 break;
2910 case 1:
2911 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
2912 rc = pci_enable_pcie_error_reporting(phba->pcidev);
2913 if (!rc) {
2914 spin_lock_irq(&phba->hbalock);
2915 phba->hba_flag |= HBA_AER_ENABLED;
2916 spin_unlock_irq(&phba->hbalock);
2917 phba->cfg_aer_support = 1;
2918 rc = strlen(buf);
2919 } else
2920 rc = -EPERM;
2921 } else {
2922 phba->cfg_aer_support = 1;
2923 rc = strlen(buf);
2924 }
2925 break;
2926 default:
2927 rc = -EINVAL;
2928 break;
2929 }
2930 return rc;
2931}
2932
2933static int lpfc_aer_support = 1;
2934module_param(lpfc_aer_support, int, 1);
2935MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
2936lpfc_param_show(aer_support)
2937
2938/**
2939 * lpfc_aer_support_init - Set the initial adapters aer support flag
2940 * @phba: lpfc_hba pointer.
2941 * @val: link speed value.
2942 *
2943 * Description:
2944 * If val is in a valid range [0,1], then set the adapter's initial
2945 * cfg_aer_support field. It will be up to the driver's probe_one
2946 * routine to determine whether the device's AER support can be set
2947 * or not.
2948 *
2949 * Notes:
2950 * If the value is not in range log a kernel error message, and
2951 * choose the default value of setting AER support and return.
2952 *
2953 * Returns:
2954 * zero if val saved.
2955 * -EINVAL val out of range
2956 **/
2957static int
2958lpfc_aer_support_init(struct lpfc_hba *phba, int val)
2959{
2960 /* AER not supported on OC devices yet */
2961 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
2962 phba->cfg_aer_support = 0;
2963 return -EPERM;
2964 }
2965
2966 if (val == 0 || val == 1) {
2967 phba->cfg_aer_support = val;
2968 return 0;
2969 }
2970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2971 "2712 lpfc_aer_support attribute value %d out "
2972 "of range, allowed values are 0|1, setting it "
2973 "to default value of 1\n", val);
2974 /* By default, try to enable AER on a device */
2975 phba->cfg_aer_support = 1;
2976 return -EINVAL;
2977}
2978
2979static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
2980 lpfc_aer_support_show, lpfc_aer_support_store);
2981
2982/**
2983 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
2984 * @dev: class device that is converted into a Scsi_host.
2985 * @attr: device attribute, not used.
2986 * @buf: containing the string "selective".
2987 * @count: unused variable.
2988 *
2989 * Description:
2990 * If the @buf contains 1 and the device currently has the AER support
2991 * enabled, then invokes the kernel AER helper routine
2992 * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
2993 * error status register.
2994 *
2995 * Notes:
2996 *
2997 * Returns:
2998 * -EINVAL if the buf does not contain the 1 or the device is not currently
2999 * enabled with the AER support.
3000 **/
3001static ssize_t
3002lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
3003 const char *buf, size_t count)
3004{
3005 struct Scsi_Host *shost = class_to_shost(dev);
3006 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3007 struct lpfc_hba *phba = vport->phba;
3008 int val, rc = -1;
3009
3010 /* AER not supported on OC devices yet */
3011 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
3012 return -EPERM;
3013 if (!isdigit(buf[0]))
3014 return -EINVAL;
3015 if (sscanf(buf, "%i", &val) != 1)
3016 return -EINVAL;
3017 if (val != 1)
3018 return -EINVAL;
3019
3020 if (phba->hba_flag & HBA_AER_ENABLED)
3021 rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
3022
3023 if (rc == 0)
3024 return strlen(buf);
3025 else
3026 return -EPERM;
3027}
3028
3029static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
3030 lpfc_aer_cleanup_state);
3031
3032/*
2762# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3033# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
2763# Value range is [2,3]. Default value is 3. 3034# Value range is [2,3]. Default value is 3.
2764*/ 3035*/
@@ -2846,7 +3117,7 @@ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
2846# identifies what rctl value to configure the additional ring for. 3117# identifies what rctl value to configure the additional ring for.
2847# Value range is [1,0xff]. Default value is 4 (Unsolicated Data). 3118# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
2848*/ 3119*/
2849LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1, 3120LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
2850 255, "Identifies RCTL for additional ring configuration"); 3121 255, "Identifies RCTL for additional ring configuration");
2851 3122
2852/* 3123/*
@@ -2854,7 +3125,7 @@ LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
2854# identifies what type value to configure the additional ring for. 3125# identifies what type value to configure the additional ring for.
2855# Value range is [1,0xff]. Default value is 5 (LLC/SNAP). 3126# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
2856*/ 3127*/
2857LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1, 3128LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
2858 255, "Identifies TYPE for additional ring configuration"); 3129 255, "Identifies TYPE for additional ring configuration");
2859 3130
2860/* 3131/*
@@ -2890,12 +3161,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2890/* 3161/*
2891# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 3162# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2892# support this feature 3163# support this feature
2893# 0 = MSI disabled (default) 3164# 0 = MSI disabled
2894# 1 = MSI enabled 3165# 1 = MSI enabled
2895# 2 = MSI-X enabled 3166# 2 = MSI-X enabled (default)
2896# Value range is [0,2]. Default value is 0. 3167# Value range is [0,2]. Default value is 2.
2897*/ 3168*/
2898LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " 3169LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
2899 "MSI-X (2), if possible"); 3170 "MSI-X (2), if possible");
2900 3171
2901/* 3172/*
@@ -2947,15 +3218,6 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2947LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 3218LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2948 3219
2949/* 3220/*
2950# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2951# set, the driver will add an FCF record manually if the port has no
2952# FCF records available and start discovery.
2953# Value range is [0,1]. Default value is 1 (enabled)
2954*/
2955LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2956
2957
2958/*
2959# lpfc_prot_mask: i 3221# lpfc_prot_mask: i
2960# - Bit mask of host protection capabilities used to register with the 3222# - Bit mask of host protection capabilities used to register with the
2961# SCSI mid-layer 3223# SCSI mid-layer
@@ -3013,6 +3275,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3013 &dev_attr_num_discovered_ports, 3275 &dev_attr_num_discovered_ports,
3014 &dev_attr_menlo_mgmt_mode, 3276 &dev_attr_menlo_mgmt_mode,
3015 &dev_attr_lpfc_drvr_version, 3277 &dev_attr_lpfc_drvr_version,
3278 &dev_attr_lpfc_enable_fip,
3016 &dev_attr_lpfc_temp_sensor, 3279 &dev_attr_lpfc_temp_sensor,
3017 &dev_attr_lpfc_log_verbose, 3280 &dev_attr_lpfc_log_verbose,
3018 &dev_attr_lpfc_lun_queue_depth, 3281 &dev_attr_lpfc_lun_queue_depth,
@@ -3020,7 +3283,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
3020 &dev_attr_lpfc_peer_port_login, 3283 &dev_attr_lpfc_peer_port_login,
3021 &dev_attr_lpfc_nodev_tmo, 3284 &dev_attr_lpfc_nodev_tmo,
3022 &dev_attr_lpfc_devloss_tmo, 3285 &dev_attr_lpfc_devloss_tmo,
3023 &dev_attr_lpfc_enable_fip,
3024 &dev_attr_lpfc_fcp_class, 3286 &dev_attr_lpfc_fcp_class,
3025 &dev_attr_lpfc_use_adisc, 3287 &dev_attr_lpfc_use_adisc,
3026 &dev_attr_lpfc_ack0, 3288 &dev_attr_lpfc_ack0,
@@ -3061,6 +3323,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
3061 &dev_attr_lpfc_max_scsicmpl_time, 3323 &dev_attr_lpfc_max_scsicmpl_time,
3062 &dev_attr_lpfc_stat_data_ctrl, 3324 &dev_attr_lpfc_stat_data_ctrl,
3063 &dev_attr_lpfc_prot_sg_seg_cnt, 3325 &dev_attr_lpfc_prot_sg_seg_cnt,
3326 &dev_attr_lpfc_aer_support,
3327 &dev_attr_lpfc_aer_state_cleanup,
3328 &dev_attr_lpfc_suppress_link_up,
3064 NULL, 3329 NULL,
3065}; 3330};
3066 3331
@@ -3073,7 +3338,6 @@ struct device_attribute *lpfc_vport_attrs[] = {
3073 &dev_attr_lpfc_lun_queue_depth, 3338 &dev_attr_lpfc_lun_queue_depth,
3074 &dev_attr_lpfc_nodev_tmo, 3339 &dev_attr_lpfc_nodev_tmo,
3075 &dev_attr_lpfc_devloss_tmo, 3340 &dev_attr_lpfc_devloss_tmo,
3076 &dev_attr_lpfc_enable_fip,
3077 &dev_attr_lpfc_hba_queue_depth, 3341 &dev_attr_lpfc_hba_queue_depth,
3078 &dev_attr_lpfc_peer_port_login, 3342 &dev_attr_lpfc_peer_port_login,
3079 &dev_attr_lpfc_restrict_login, 3343 &dev_attr_lpfc_restrict_login,
@@ -3147,7 +3411,7 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3147 * sysfs_ctlreg_read - Read method for reading from ctlreg 3411 * sysfs_ctlreg_read - Read method for reading from ctlreg
3148 * @kobj: kernel kobject that contains the kernel class device. 3412 * @kobj: kernel kobject that contains the kernel class device.
3149 * @bin_attr: kernel attributes passed to us. 3413 * @bin_attr: kernel attributes passed to us.
3150 * @buf: if succesful contains the data from the adapter IOREG space. 3414 * @buf: if successful contains the data from the adapter IOREG space.
3151 * @off: offset into buffer to beginning of data. 3415 * @off: offset into buffer to beginning of data.
3152 * @count: bytes to transfer. 3416 * @count: bytes to transfer.
3153 * 3417 *
@@ -3815,7 +4079,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
3815 hs->invalid_crc_count -= lso->invalid_crc_count; 4079 hs->invalid_crc_count -= lso->invalid_crc_count;
3816 hs->error_frames -= lso->error_frames; 4080 hs->error_frames -= lso->error_frames;
3817 4081
3818 if (phba->fc_topology == TOPOLOGY_LOOP) { 4082 if (phba->hba_flag & HBA_FCOE_SUPPORT) {
4083 hs->lip_count = -1;
4084 hs->nos_count = (phba->link_events >> 1);
4085 hs->nos_count -= lso->link_events;
4086 } else if (phba->fc_topology == TOPOLOGY_LOOP) {
3819 hs->lip_count = (phba->fc_eventTag >> 1); 4087 hs->lip_count = (phba->fc_eventTag >> 1);
3820 hs->lip_count -= lso->link_events; 4088 hs->lip_count -= lso->link_events;
3821 hs->nos_count = -1; 4089 hs->nos_count = -1;
@@ -3906,7 +4174,10 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3906 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 4174 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
3907 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 4175 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
3908 lso->error_frames = pmb->un.varRdLnk.crcCnt; 4176 lso->error_frames = pmb->un.varRdLnk.crcCnt;
3909 lso->link_events = (phba->fc_eventTag >> 1); 4177 if (phba->hba_flag & HBA_FCOE_SUPPORT)
4178 lso->link_events = (phba->link_events >> 1);
4179 else
4180 lso->link_events = (phba->fc_eventTag >> 1);
3910 4181
3911 psli->stats_start = get_seconds(); 4182 psli->stats_start = get_seconds();
3912 4183
@@ -4222,15 +4493,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4222 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4493 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4223 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4494 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4224 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4495 lpfc_enable_bg_init(phba, lpfc_enable_bg);
4496 if (phba->sli_rev == LPFC_SLI_REV4)
4497 phba->cfg_poll = 0;
4498 else
4225 phba->cfg_poll = lpfc_poll; 4499 phba->cfg_poll = lpfc_poll;
4226 phba->cfg_soft_wwnn = 0L; 4500 phba->cfg_soft_wwnn = 0L;
4227 phba->cfg_soft_wwpn = 0L; 4501 phba->cfg_soft_wwpn = 0L;
4228 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4502 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4229 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4503 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4230 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4504 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4231 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4232 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 4505 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4233 4506 lpfc_aer_support_init(phba, lpfc_aer_support);
4507 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
4234 return; 4508 return;
4235} 4509}
4236 4510
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index da6bf5aac9dd..d62b3e467926 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -21,17 +21,21 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/mempool.h> 22#include <linux/mempool.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
24 26
25#include <scsi/scsi.h> 27#include <scsi/scsi.h>
26#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
28#include <scsi/scsi_bsg_fc.h> 30#include <scsi/scsi_bsg_fc.h>
31#include <scsi/fc/fc_fs.h>
29 32
30#include "lpfc_hw4.h" 33#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 34#include "lpfc_hw.h"
32#include "lpfc_sli.h" 35#include "lpfc_sli.h"
33#include "lpfc_sli4.h" 36#include "lpfc_sli4.h"
34#include "lpfc_nl.h" 37#include "lpfc_nl.h"
38#include "lpfc_bsg.h"
35#include "lpfc_disc.h" 39#include "lpfc_disc.h"
36#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
37#include "lpfc.h" 41#include "lpfc.h"
@@ -40,14 +44,196 @@
40#include "lpfc_vport.h" 44#include "lpfc_vport.h"
41#include "lpfc_version.h" 45#include "lpfc_version.h"
42 46
47struct lpfc_bsg_event {
48 struct list_head node;
49 struct kref kref;
50 wait_queue_head_t wq;
51
52 /* Event type and waiter identifiers */
53 uint32_t type_mask;
54 uint32_t req_id;
55 uint32_t reg_id;
56
57 /* next two flags are here for the auto-delete logic */
58 unsigned long wait_time_stamp;
59 int waiting;
60
61 /* seen and not seen events */
62 struct list_head events_to_get;
63 struct list_head events_to_see;
64
65 /* job waiting for this event to finish */
66 struct fc_bsg_job *set_job;
67};
68
69struct lpfc_bsg_iocb {
70 struct lpfc_iocbq *cmdiocbq;
71 struct lpfc_iocbq *rspiocbq;
72 struct lpfc_dmabuf *bmp;
73 struct lpfc_nodelist *ndlp;
74
75 /* job waiting for this iocb to finish */
76 struct fc_bsg_job *set_job;
77};
78
79struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb;
82
83 /* job waiting for this mbox command to finish */
84 struct fc_bsg_job *set_job;
85};
86
87#define MENLO_DID 0x0000FC0E
88
89struct lpfc_bsg_menlo {
90 struct lpfc_iocbq *cmdiocbq;
91 struct lpfc_iocbq *rspiocbq;
92 struct lpfc_dmabuf *bmp;
93
94 /* job waiting for this iocb to finish */
95 struct fc_bsg_job *set_job;
96};
97
98#define TYPE_EVT 1
99#define TYPE_IOCB 2
100#define TYPE_MBOX 3
101#define TYPE_MENLO 4
102struct bsg_job_data {
103 uint32_t type;
104 union {
105 struct lpfc_bsg_event *evt;
106 struct lpfc_bsg_iocb iocb;
107 struct lpfc_bsg_mbox mbox;
108 struct lpfc_bsg_menlo menlo;
109 } context_un;
110};
111
112struct event_data {
113 struct list_head node;
114 uint32_t type;
115 uint32_t immed_dat;
116 void *data;
117 uint32_t len;
118};
119
120#define BUF_SZ_4K 4096
121#define SLI_CT_ELX_LOOPBACK 0x10
122
123enum ELX_LOOPBACK_CMD {
124 ELX_LOOPBACK_XRI_SETUP,
125 ELX_LOOPBACK_DATA,
126};
127
128#define ELX_LOOPBACK_HEADER_SZ \
129 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
130
131struct lpfc_dmabufext {
132 struct lpfc_dmabuf dma;
133 uint32_t size;
134 uint32_t flag;
135};
136
137/**
138 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
139 * @phba: Pointer to HBA context object.
140 * @cmdiocbq: Pointer to command iocb.
141 * @rspiocbq: Pointer to response iocb.
142 *
143 * This function is the completion handler for iocbs issued using
144 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
145 * ring event handler function without any lock held. This function
146 * can be called from both worker thread context and interrupt
147 * context. This function also can be called from another thread which
148 * cleans up the SLI layer objects.
149 * This function copies the contents of the response iocb to the
150 * response iocb memory object provided by the caller of
151 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
152 * sleeps for the iocb completion.
153 **/
154static void
155lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
156 struct lpfc_iocbq *cmdiocbq,
157 struct lpfc_iocbq *rspiocbq)
158{
159 unsigned long iflags;
160 struct bsg_job_data *dd_data;
161 struct fc_bsg_job *job;
162 IOCB_t *rsp;
163 struct lpfc_dmabuf *bmp;
164 struct lpfc_nodelist *ndlp;
165 struct lpfc_bsg_iocb *iocb;
166 unsigned long flags;
167 int rc = 0;
168
169 spin_lock_irqsave(&phba->ct_ev_lock, flags);
170 dd_data = cmdiocbq->context1;
171 if (!dd_data) {
172 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
173 return;
174 }
175
176 iocb = &dd_data->context_un.iocb;
177 job = iocb->set_job;
178 job->dd_data = NULL; /* so timeout handler does not reply */
179
180 spin_lock_irqsave(&phba->hbalock, iflags);
181 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
182 if (cmdiocbq->context2 && rspiocbq)
183 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
184 &rspiocbq->iocb, sizeof(IOCB_t));
185 spin_unlock_irqrestore(&phba->hbalock, iflags);
186
187 bmp = iocb->bmp;
188 rspiocbq = iocb->rspiocbq;
189 rsp = &rspiocbq->iocb;
190 ndlp = iocb->ndlp;
191
192 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
193 job->request_payload.sg_cnt, DMA_TO_DEVICE);
194 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
195 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
196
197 if (rsp->ulpStatus) {
198 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
199 switch (rsp->un.ulpWord[4] & 0xff) {
200 case IOERR_SEQUENCE_TIMEOUT:
201 rc = -ETIMEDOUT;
202 break;
203 case IOERR_INVALID_RPI:
204 rc = -EFAULT;
205 break;
206 default:
207 rc = -EACCES;
208 break;
209 }
210 } else
211 rc = -EACCES;
212 } else
213 job->reply->reply_payload_rcv_len =
214 rsp->un.genreq64.bdl.bdeSize;
215
216 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
217 lpfc_sli_release_iocbq(phba, rspiocbq);
218 lpfc_sli_release_iocbq(phba, cmdiocbq);
219 lpfc_nlp_put(ndlp);
220 kfree(bmp);
221 kfree(dd_data);
222 /* make error code available to userspace */
223 job->reply->result = rc;
224 /* complete the job back to userspace */
225 job->job_done(job);
226 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
227 return;
228}
229
43/** 230/**
44 * lpfc_bsg_rport_ct - send a CT command from a bsg request 231 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
45 * @job: fc_bsg_job to handle 232 * @job: fc_bsg_job to handle
46 */ 233 **/
47static int 234static int
48lpfc_bsg_rport_ct(struct fc_bsg_job *job) 235lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
49{ 236{
50 struct Scsi_Host *shost = job->shost;
51 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 237 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
52 struct lpfc_hba *phba = vport->phba; 238 struct lpfc_hba *phba = vport->phba;
53 struct lpfc_rport_data *rdata = job->rport->dd_data; 239 struct lpfc_rport_data *rdata = job->rport->dd_data;
@@ -64,57 +250,60 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
64 struct scatterlist *sgel = NULL; 250 struct scatterlist *sgel = NULL;
65 int numbde; 251 int numbde;
66 dma_addr_t busaddr; 252 dma_addr_t busaddr;
253 struct bsg_job_data *dd_data;
254 uint32_t creg_val;
67 int rc = 0; 255 int rc = 0;
68 256
69 /* in case no data is transferred */ 257 /* in case no data is transferred */
70 job->reply->reply_payload_rcv_len = 0; 258 job->reply->reply_payload_rcv_len = 0;
71 259
260 /* allocate our bsg tracking structure */
261 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
262 if (!dd_data) {
263 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
264 "2733 Failed allocation of dd_data\n");
265 rc = -ENOMEM;
266 goto no_dd_data;
267 }
268
72 if (!lpfc_nlp_get(ndlp)) { 269 if (!lpfc_nlp_get(ndlp)) {
73 job->reply->result = -ENODEV; 270 rc = -ENODEV;
74 return 0; 271 goto no_ndlp;
272 }
273
274 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
275 if (!bmp) {
276 rc = -ENOMEM;
277 goto free_ndlp;
75 } 278 }
76 279
77 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 280 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
78 rc = -ENODEV; 281 rc = -ENODEV;
79 goto free_ndlp_exit; 282 goto free_bmp;
80 } 283 }
81 284
82 spin_lock_irq(shost->host_lock);
83 cmdiocbq = lpfc_sli_get_iocbq(phba); 285 cmdiocbq = lpfc_sli_get_iocbq(phba);
84 if (!cmdiocbq) { 286 if (!cmdiocbq) {
85 rc = -ENOMEM; 287 rc = -ENOMEM;
86 spin_unlock_irq(shost->host_lock); 288 goto free_bmp;
87 goto free_ndlp_exit;
88 } 289 }
89 cmd = &cmdiocbq->iocb;
90 290
291 cmd = &cmdiocbq->iocb;
91 rspiocbq = lpfc_sli_get_iocbq(phba); 292 rspiocbq = lpfc_sli_get_iocbq(phba);
92 if (!rspiocbq) { 293 if (!rspiocbq) {
93 rc = -ENOMEM; 294 rc = -ENOMEM;
94 goto free_cmdiocbq; 295 goto free_cmdiocbq;
95 } 296 }
96 spin_unlock_irq(shost->host_lock);
97 297
98 rsp = &rspiocbq->iocb; 298 rsp = &rspiocbq->iocb;
99
100 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
101 if (!bmp) {
102 rc = -ENOMEM;
103 spin_lock_irq(shost->host_lock);
104 goto free_rspiocbq;
105 }
106
107 spin_lock_irq(shost->host_lock);
108 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 299 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
109 if (!bmp->virt) { 300 if (!bmp->virt) {
110 rc = -ENOMEM; 301 rc = -ENOMEM;
111 goto free_bmp; 302 goto free_rspiocbq;
112 } 303 }
113 spin_unlock_irq(shost->host_lock);
114 304
115 INIT_LIST_HEAD(&bmp->list); 305 INIT_LIST_HEAD(&bmp->list);
116 bpl = (struct ulp_bde64 *) bmp->virt; 306 bpl = (struct ulp_bde64 *) bmp->virt;
117
118 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 307 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
119 job->request_payload.sg_cnt, DMA_TO_DEVICE); 308 job->request_payload.sg_cnt, DMA_TO_DEVICE);
120 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 309 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
@@ -148,86 +337,160 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
148 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 337 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
149 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 338 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
150 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 339 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
151 cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; 340 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
152 cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; 341 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
153 cmd->ulpBdeCount = 1; 342 cmd->ulpBdeCount = 1;
154 cmd->ulpLe = 1; 343 cmd->ulpLe = 1;
155 cmd->ulpClass = CLASS3; 344 cmd->ulpClass = CLASS3;
156 cmd->ulpContext = ndlp->nlp_rpi; 345 cmd->ulpContext = ndlp->nlp_rpi;
157 cmd->ulpOwner = OWN_CHIP; 346 cmd->ulpOwner = OWN_CHIP;
158 cmdiocbq->vport = phba->pport; 347 cmdiocbq->vport = phba->pport;
159 cmdiocbq->context1 = NULL; 348 cmdiocbq->context3 = bmp;
160 cmdiocbq->context2 = NULL;
161 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 349 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
162
163 timeout = phba->fc_ratov * 2; 350 timeout = phba->fc_ratov * 2;
164 job->dd_data = cmdiocbq; 351 cmd->ulpTimeout = timeout;
165 352
166 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, 353 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
167 timeout + LPFC_DRVR_TIMEOUT); 354 cmdiocbq->context1 = dd_data;
168 355 cmdiocbq->context2 = rspiocbq;
169 if (rc != IOCB_TIMEDOUT) { 356 dd_data->type = TYPE_IOCB;
170 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 357 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
171 job->request_payload.sg_cnt, DMA_TO_DEVICE); 358 dd_data->context_un.iocb.rspiocbq = rspiocbq;
172 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 359 dd_data->context_un.iocb.set_job = job;
173 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 360 dd_data->context_un.iocb.bmp = bmp;
361 dd_data->context_un.iocb.ndlp = ndlp;
362
363 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
364 creg_val = readl(phba->HCregaddr);
365 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
366 writel(creg_val, phba->HCregaddr);
367 readl(phba->HCregaddr); /* flush */
174 } 368 }
175 369
176 if (rc == IOCB_TIMEDOUT) { 370 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
177 lpfc_sli_release_iocbq(phba, rspiocbq);
178 rc = -EACCES;
179 goto free_ndlp_exit;
180 }
181 371
182 if (rc != IOCB_SUCCESS) { 372 if (rc == IOCB_SUCCESS)
183 rc = -EACCES; 373 return 0; /* done for now */
184 goto free_outdmp;
185 }
186 374
187 if (rsp->ulpStatus) { 375 /* iocb failed so cleanup */
188 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 376 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
189 switch (rsp->un.ulpWord[4] & 0xff) { 377 job->request_payload.sg_cnt, DMA_TO_DEVICE);
190 case IOERR_SEQUENCE_TIMEOUT: 378 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
191 rc = -ETIMEDOUT; 379 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
192 break;
193 case IOERR_INVALID_RPI:
194 rc = -EFAULT;
195 break;
196 default:
197 rc = -EACCES;
198 break;
199 }
200 goto free_outdmp;
201 }
202 } else
203 job->reply->reply_payload_rcv_len =
204 rsp->un.genreq64.bdl.bdeSize;
205 380
206free_outdmp:
207 spin_lock_irq(shost->host_lock);
208 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 381 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
209free_bmp: 382
210 kfree(bmp);
211free_rspiocbq: 383free_rspiocbq:
212 lpfc_sli_release_iocbq(phba, rspiocbq); 384 lpfc_sli_release_iocbq(phba, rspiocbq);
213free_cmdiocbq: 385free_cmdiocbq:
214 lpfc_sli_release_iocbq(phba, cmdiocbq); 386 lpfc_sli_release_iocbq(phba, cmdiocbq);
215 spin_unlock_irq(shost->host_lock); 387free_bmp:
216free_ndlp_exit: 388 kfree(bmp);
389free_ndlp:
217 lpfc_nlp_put(ndlp); 390 lpfc_nlp_put(ndlp);
391no_ndlp:
392 kfree(dd_data);
393no_dd_data:
394 /* make error code available to userspace */
395 job->reply->result = rc;
396 job->dd_data = NULL;
397 return rc;
398}
218 399
400/**
401 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
402 * @phba: Pointer to HBA context object.
403 * @cmdiocbq: Pointer to command iocb.
404 * @rspiocbq: Pointer to response iocb.
405 *
406 * This function is the completion handler for iocbs issued using
407 * lpfc_bsg_rport_els_cmp function. This function is called by the
408 * ring event handler function without any lock held. This function
409 * can be called from both worker thread context and interrupt
410 * context. This function also can be called from other thread which
411 * cleans up the SLI layer objects.
412 * This function copies the contents of the response iocb to the
413 * response iocb memory object provided by the caller of
414 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
415 * sleeps for the iocb completion.
416 **/
417static void
418lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
419 struct lpfc_iocbq *cmdiocbq,
420 struct lpfc_iocbq *rspiocbq)
421{
422 struct bsg_job_data *dd_data;
423 struct fc_bsg_job *job;
424 IOCB_t *rsp;
425 struct lpfc_nodelist *ndlp;
426 struct lpfc_dmabuf *pbuflist = NULL;
427 struct fc_bsg_ctels_reply *els_reply;
428 uint8_t *rjt_data;
429 unsigned long flags;
430 int rc = 0;
431
432 spin_lock_irqsave(&phba->ct_ev_lock, flags);
433 dd_data = cmdiocbq->context1;
434 /* normal completion and timeout crossed paths, already done */
435 if (!dd_data) {
436 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
437 return;
438 }
439
440 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
441 if (cmdiocbq->context2 && rspiocbq)
442 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
443 &rspiocbq->iocb, sizeof(IOCB_t));
444
445 job = dd_data->context_un.iocb.set_job;
446 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
447 rspiocbq = dd_data->context_un.iocb.rspiocbq;
448 rsp = &rspiocbq->iocb;
449 ndlp = dd_data->context_un.iocb.ndlp;
450
451 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
452 job->request_payload.sg_cnt, DMA_TO_DEVICE);
453 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
454 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
455
456 if (job->reply->result == -EAGAIN)
457 rc = -EAGAIN;
458 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
459 job->reply->reply_payload_rcv_len =
460 rsp->un.elsreq64.bdl.bdeSize;
461 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
462 job->reply->reply_payload_rcv_len =
463 sizeof(struct fc_bsg_ctels_reply);
464 /* LS_RJT data returned in word 4 */
465 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
466 els_reply = &job->reply->reply_data.ctels_reply;
467 els_reply->status = FC_CTELS_STATUS_REJECT;
468 els_reply->rjt_data.action = rjt_data[3];
469 els_reply->rjt_data.reason_code = rjt_data[2];
470 els_reply->rjt_data.reason_explanation = rjt_data[1];
471 els_reply->rjt_data.vendor_unique = rjt_data[0];
472 } else
473 rc = -EIO;
474
475 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
476 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
477 lpfc_sli_release_iocbq(phba, rspiocbq);
478 lpfc_sli_release_iocbq(phba, cmdiocbq);
479 lpfc_nlp_put(ndlp);
480 kfree(dd_data);
219 /* make error code available to userspace */ 481 /* make error code available to userspace */
220 job->reply->result = rc; 482 job->reply->result = rc;
483 job->dd_data = NULL;
221 /* complete the job back to userspace */ 484 /* complete the job back to userspace */
222 job->job_done(job); 485 job->job_done(job);
223 486 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
224 return 0; 487 return;
225} 488}
226 489
227/** 490/**
228 * lpfc_bsg_rport_els - send an ELS command from a bsg request 491 * lpfc_bsg_rport_els - send an ELS command from a bsg request
229 * @job: fc_bsg_job to handle 492 * @job: fc_bsg_job to handle
230 */ 493 **/
231static int 494static int
232lpfc_bsg_rport_els(struct fc_bsg_job *job) 495lpfc_bsg_rport_els(struct fc_bsg_job *job)
233{ 496{
@@ -235,7 +498,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
235 struct lpfc_hba *phba = vport->phba; 498 struct lpfc_hba *phba = vport->phba;
236 struct lpfc_rport_data *rdata = job->rport->dd_data; 499 struct lpfc_rport_data *rdata = job->rport->dd_data;
237 struct lpfc_nodelist *ndlp = rdata->pnode; 500 struct lpfc_nodelist *ndlp = rdata->pnode;
238
239 uint32_t elscmd; 501 uint32_t elscmd;
240 uint32_t cmdsize; 502 uint32_t cmdsize;
241 uint32_t rspsize; 503 uint32_t rspsize;
@@ -247,20 +509,30 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
247 struct lpfc_dmabuf *prsp; 509 struct lpfc_dmabuf *prsp;
248 struct lpfc_dmabuf *pbuflist = NULL; 510 struct lpfc_dmabuf *pbuflist = NULL;
249 struct ulp_bde64 *bpl; 511 struct ulp_bde64 *bpl;
250 int iocb_status;
251 int request_nseg; 512 int request_nseg;
252 int reply_nseg; 513 int reply_nseg;
253 struct scatterlist *sgel = NULL; 514 struct scatterlist *sgel = NULL;
254 int numbde; 515 int numbde;
255 dma_addr_t busaddr; 516 dma_addr_t busaddr;
517 struct bsg_job_data *dd_data;
518 uint32_t creg_val;
256 int rc = 0; 519 int rc = 0;
257 520
258 /* in case no data is transferred */ 521 /* in case no data is transferred */
259 job->reply->reply_payload_rcv_len = 0; 522 job->reply->reply_payload_rcv_len = 0;
260 523
524 /* allocate our bsg tracking structure */
525 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
526 if (!dd_data) {
527 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
528 "2735 Failed allocation of dd_data\n");
529 rc = -ENOMEM;
530 goto no_dd_data;
531 }
532
261 if (!lpfc_nlp_get(ndlp)) { 533 if (!lpfc_nlp_get(ndlp)) {
262 rc = -ENODEV; 534 rc = -ENODEV;
263 goto out; 535 goto free_dd_data;
264 } 536 }
265 537
266 elscmd = job->request->rqst_data.r_els.els_code; 538 elscmd = job->request->rqst_data.r_els.els_code;
@@ -270,24 +542,24 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
270 if (!rspiocbq) { 542 if (!rspiocbq) {
271 lpfc_nlp_put(ndlp); 543 lpfc_nlp_put(ndlp);
272 rc = -ENOMEM; 544 rc = -ENOMEM;
273 goto out; 545 goto free_dd_data;
274 } 546 }
275 547
276 rsp = &rspiocbq->iocb; 548 rsp = &rspiocbq->iocb;
277 rpi = ndlp->nlp_rpi; 549 rpi = ndlp->nlp_rpi;
278 550
279 cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp, 551 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
280 ndlp->nlp_DID, elscmd); 552 ndlp->nlp_DID, elscmd);
281
282 if (!cmdiocbq) { 553 if (!cmdiocbq) {
283 lpfc_sli_release_iocbq(phba, rspiocbq); 554 rc = -EIO;
284 return -EIO; 555 goto free_rspiocbq;
285 } 556 }
286 557
287 job->dd_data = cmdiocbq; 558 /* prep els iocb set context1 to the ndlp, context2 to the command
559 * dmabuf, context3 holds the data dmabuf
560 */
288 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 561 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
289 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 562 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
290
291 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 563 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
292 kfree(pcmd); 564 kfree(pcmd);
293 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 565 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
@@ -299,7 +571,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
299 571
300 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 572 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
301 job->request_payload.sg_cnt, DMA_TO_DEVICE); 573 job->request_payload.sg_cnt, DMA_TO_DEVICE);
302
303 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 574 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
304 busaddr = sg_dma_address(sgel); 575 busaddr = sg_dma_address(sgel);
305 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 576 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
@@ -321,7 +592,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
321 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 592 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
322 bpl++; 593 bpl++;
323 } 594 }
324
325 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = 595 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
326 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 596 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327 cmdiocbq->iocb.ulpContext = rpi; 597 cmdiocbq->iocb.ulpContext = rpi;
@@ -329,102 +599,62 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
329 cmdiocbq->context1 = NULL; 599 cmdiocbq->context1 = NULL;
330 cmdiocbq->context2 = NULL; 600 cmdiocbq->context2 = NULL;
331 601
332 iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 602 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
333 rspiocbq, (phba->fc_ratov * 2) 603 cmdiocbq->context1 = dd_data;
334 + LPFC_DRVR_TIMEOUT); 604 cmdiocbq->context2 = rspiocbq;
335 605 dd_data->type = TYPE_IOCB;
336 /* release the new ndlp once the iocb completes */ 606 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
337 lpfc_nlp_put(ndlp); 607 dd_data->context_un.iocb.rspiocbq = rspiocbq;
338 if (iocb_status != IOCB_TIMEDOUT) { 608 dd_data->context_un.iocb.set_job = job;
339 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 609 dd_data->context_un.iocb.bmp = NULL;;
340 job->request_payload.sg_cnt, DMA_TO_DEVICE); 610 dd_data->context_un.iocb.ndlp = ndlp;
341 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 611
342 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 612 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
613 creg_val = readl(phba->HCregaddr);
614 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
615 writel(creg_val, phba->HCregaddr);
616 readl(phba->HCregaddr); /* flush */
343 } 617 }
618 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
619 lpfc_nlp_put(ndlp);
620 if (rc == IOCB_SUCCESS)
621 return 0; /* done for now */
344 622
345 if (iocb_status == IOCB_SUCCESS) { 623 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
346 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 624 job->request_payload.sg_cnt, DMA_TO_DEVICE);
347 job->reply->reply_payload_rcv_len = 625 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
348 rsp->un.elsreq64.bdl.bdeSize; 626 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
349 rc = 0;
350 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
351 struct fc_bsg_ctels_reply *els_reply;
352 /* LS_RJT data returned in word 4 */
353 uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
354
355 els_reply = &job->reply->reply_data.ctels_reply;
356 job->reply->result = 0;
357 els_reply->status = FC_CTELS_STATUS_REJECT;
358 els_reply->rjt_data.action = rjt_data[0];
359 els_reply->rjt_data.reason_code = rjt_data[1];
360 els_reply->rjt_data.reason_explanation = rjt_data[2];
361 els_reply->rjt_data.vendor_unique = rjt_data[3];
362 } else
363 rc = -EIO;
364 } else
365 rc = -EIO;
366 627
367 if (iocb_status != IOCB_TIMEDOUT) 628 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
368 lpfc_els_free_iocb(phba, cmdiocbq); 629
630 lpfc_sli_release_iocbq(phba, cmdiocbq);
369 631
632free_rspiocbq:
370 lpfc_sli_release_iocbq(phba, rspiocbq); 633 lpfc_sli_release_iocbq(phba, rspiocbq);
371 634
372out: 635free_dd_data:
636 kfree(dd_data);
637
638no_dd_data:
373 /* make error code available to userspace */ 639 /* make error code available to userspace */
374 job->reply->result = rc; 640 job->reply->result = rc;
375 /* complete the job back to userspace */ 641 job->dd_data = NULL;
376 job->job_done(job); 642 return rc;
377
378 return 0;
379}
380
381struct lpfc_ct_event {
382 struct list_head node;
383 int ref;
384 wait_queue_head_t wq;
385
386 /* Event type and waiter identifiers */
387 uint32_t type_mask;
388 uint32_t req_id;
389 uint32_t reg_id;
390
391 /* next two flags are here for the auto-delete logic */
392 unsigned long wait_time_stamp;
393 int waiting;
394
395 /* seen and not seen events */
396 struct list_head events_to_get;
397 struct list_head events_to_see;
398};
399
400struct event_data {
401 struct list_head node;
402 uint32_t type;
403 uint32_t immed_dat;
404 void *data;
405 uint32_t len;
406};
407
408static struct lpfc_ct_event *
409lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
410{
411 struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
412 if (!evt)
413 return NULL;
414
415 INIT_LIST_HEAD(&evt->events_to_get);
416 INIT_LIST_HEAD(&evt->events_to_see);
417 evt->req_id = ev_req_id;
418 evt->reg_id = ev_reg_id;
419 evt->wait_time_stamp = jiffies;
420 init_waitqueue_head(&evt->wq);
421
422 return evt;
423} 643}
424 644
645/**
646 * lpfc_bsg_event_free - frees an allocated event structure
647 * @kref: Pointer to a kref.
648 *
649 * Called from kref_put. Back cast the kref into an event structure address.
650 * Free any events to get, delete associated nodes, free any events to see,
651 * free any data then free the event itself.
652 **/
425static void 653static void
426lpfc_ct_event_free(struct lpfc_ct_event *evt) 654lpfc_bsg_event_free(struct kref *kref)
427{ 655{
656 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
657 kref);
428 struct event_data *ed; 658 struct event_data *ed;
429 659
430 list_del(&evt->node); 660 list_del(&evt->node);
@@ -446,25 +676,82 @@ lpfc_ct_event_free(struct lpfc_ct_event *evt)
446 kfree(evt); 676 kfree(evt);
447} 677}
448 678
679/**
680 * lpfc_bsg_event_ref - increments the kref for an event
681 * @evt: Pointer to an event structure.
682 **/
449static inline void 683static inline void
450lpfc_ct_event_ref(struct lpfc_ct_event *evt) 684lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
451{ 685{
452 evt->ref++; 686 kref_get(&evt->kref);
453} 687}
454 688
689/**
690 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
691 * @evt: Pointer to an event structure.
692 **/
455static inline void 693static inline void
456lpfc_ct_event_unref(struct lpfc_ct_event *evt) 694lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
457{ 695{
458 if (--evt->ref < 0) 696 kref_put(&evt->kref, lpfc_bsg_event_free);
459 lpfc_ct_event_free(evt);
460} 697}
461 698
462#define SLI_CT_ELX_LOOPBACK 0x10 699/**
700 * lpfc_bsg_event_new - allocate and initialize a event structure
701 * @ev_mask: Mask of events.
702 * @ev_reg_id: Event reg id.
703 * @ev_req_id: Event request id.
704 **/
705static struct lpfc_bsg_event *
706lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
707{
708 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
463 709
464enum ELX_LOOPBACK_CMD { 710 if (!evt)
465 ELX_LOOPBACK_XRI_SETUP, 711 return NULL;
466 ELX_LOOPBACK_DATA, 712
467}; 713 INIT_LIST_HEAD(&evt->events_to_get);
714 INIT_LIST_HEAD(&evt->events_to_see);
715 evt->type_mask = ev_mask;
716 evt->req_id = ev_req_id;
717 evt->reg_id = ev_reg_id;
718 evt->wait_time_stamp = jiffies;
719 init_waitqueue_head(&evt->wq);
720 kref_init(&evt->kref);
721 return evt;
722}
723
724/**
725 * diag_cmd_data_free - Frees an lpfc dma buffer extension
726 * @phba: Pointer to HBA context object.
727 * @mlist: Pointer to an lpfc dma buffer extension.
728 **/
729static int
730diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
731{
732 struct lpfc_dmabufext *mlast;
733 struct pci_dev *pcidev;
734 struct list_head head, *curr, *next;
735
736 if ((!mlist) || (!lpfc_is_link_up(phba) &&
737 (phba->link_flag & LS_LOOPBACK_MODE))) {
738 return 0;
739 }
740
741 pcidev = phba->pcidev;
742 list_add_tail(&head, &mlist->dma.list);
743
744 list_for_each_safe(curr, next, &head) {
745 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
746 if (mlast->dma.virt)
747 dma_free_coherent(&pcidev->dev,
748 mlast->size,
749 mlast->dma.virt,
750 mlast->dma.phys);
751 kfree(mlast);
752 }
753 return 0;
754}
468 755
469/** 756/**
470 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 757 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
@@ -473,9 +760,9 @@ enum ELX_LOOPBACK_CMD {
473 * @piocbq: 760 * @piocbq:
474 * 761 *
475 * This function is called when an unsolicited CT command is received. It 762 * This function is called when an unsolicited CT command is received. It
476 * forwards the event to any processes registerd to receive CT events. 763 * forwards the event to any processes registered to receive CT events.
477 */ 764 **/
478void 765int
479lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 766lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
480 struct lpfc_iocbq *piocbq) 767 struct lpfc_iocbq *piocbq)
481{ 768{
@@ -483,7 +770,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
483 uint32_t cmd; 770 uint32_t cmd;
484 uint32_t len; 771 uint32_t len;
485 struct lpfc_dmabuf *dmabuf = NULL; 772 struct lpfc_dmabuf *dmabuf = NULL;
486 struct lpfc_ct_event *evt; 773 struct lpfc_bsg_event *evt;
487 struct event_data *evt_dat = NULL; 774 struct event_data *evt_dat = NULL;
488 struct lpfc_iocbq *iocbq; 775 struct lpfc_iocbq *iocbq;
489 size_t offset = 0; 776 size_t offset = 0;
@@ -495,6 +782,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
495 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 782 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
496 struct lpfc_hbq_entry *hbqe; 783 struct lpfc_hbq_entry *hbqe;
497 struct lpfc_sli_ct_request *ct_req; 784 struct lpfc_sli_ct_request *ct_req;
785 struct fc_bsg_job *job = NULL;
786 unsigned long flags;
787 int size = 0;
498 788
499 INIT_LIST_HEAD(&head); 789 INIT_LIST_HEAD(&head);
500 list_add_tail(&head, &piocbq->list); 790 list_add_tail(&head, &piocbq->list);
@@ -503,6 +793,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
503 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 793 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
504 goto error_ct_unsol_exit; 794 goto error_ct_unsol_exit;
505 795
796 if (phba->link_state == LPFC_HBA_ERROR ||
797 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
798 goto error_ct_unsol_exit;
799
506 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 800 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
507 dmabuf = bdeBuf1; 801 dmabuf = bdeBuf1;
508 else { 802 else {
@@ -510,7 +804,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
510 piocbq->iocb.un.cont64[0].addrLow); 804 piocbq->iocb.un.cont64[0].addrLow);
511 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 805 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
512 } 806 }
513 807 if (dmabuf == NULL)
808 goto error_ct_unsol_exit;
514 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 809 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
515 evt_req_id = ct_req->FsType; 810 evt_req_id = ct_req->FsType;
516 cmd = ct_req->CommandResponse.bits.CmdRsp; 811 cmd = ct_req->CommandResponse.bits.CmdRsp;
@@ -518,24 +813,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
518 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 813 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
519 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 814 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
520 815
521 mutex_lock(&phba->ct_event_mutex); 816 spin_lock_irqsave(&phba->ct_ev_lock, flags);
522 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 817 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
523 if (evt->req_id != evt_req_id) 818 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
819 evt->req_id != evt_req_id)
524 continue; 820 continue;
525 821
526 lpfc_ct_event_ref(evt); 822 lpfc_bsg_event_ref(evt);
527 823 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
528 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 824 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
529 if (!evt_dat) { 825 if (evt_dat == NULL) {
530 lpfc_ct_event_unref(evt); 826 spin_lock_irqsave(&phba->ct_ev_lock, flags);
827 lpfc_bsg_event_unref(evt);
531 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 828 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
532 "2614 Memory allocation failed for " 829 "2614 Memory allocation failed for "
533 "CT event\n"); 830 "CT event\n");
534 break; 831 break;
535 } 832 }
536 833
537 mutex_unlock(&phba->ct_event_mutex);
538
539 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 834 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
540 /* take accumulated byte count from the last iocbq */ 835 /* take accumulated byte count from the last iocbq */
541 iocbq = list_entry(head.prev, typeof(*iocbq), list); 836 iocbq = list_entry(head.prev, typeof(*iocbq), list);
@@ -549,25 +844,25 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
549 } 844 }
550 845
551 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 846 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
552 if (!evt_dat->data) { 847 if (evt_dat->data == NULL) {
553 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 848 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
554 "2615 Memory allocation failed for " 849 "2615 Memory allocation failed for "
555 "CT event data, size %d\n", 850 "CT event data, size %d\n",
556 evt_dat->len); 851 evt_dat->len);
557 kfree(evt_dat); 852 kfree(evt_dat);
558 mutex_lock(&phba->ct_event_mutex); 853 spin_lock_irqsave(&phba->ct_ev_lock, flags);
559 lpfc_ct_event_unref(evt); 854 lpfc_bsg_event_unref(evt);
560 mutex_unlock(&phba->ct_event_mutex); 855 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
561 goto error_ct_unsol_exit; 856 goto error_ct_unsol_exit;
562 } 857 }
563 858
564 list_for_each_entry(iocbq, &head, list) { 859 list_for_each_entry(iocbq, &head, list) {
860 size = 0;
565 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 861 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
566 bdeBuf1 = iocbq->context2; 862 bdeBuf1 = iocbq->context2;
567 bdeBuf2 = iocbq->context3; 863 bdeBuf2 = iocbq->context3;
568 } 864 }
569 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 865 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
570 int size = 0;
571 if (phba->sli3_options & 866 if (phba->sli3_options &
572 LPFC_SLI3_HBQ_ENABLED) { 867 LPFC_SLI3_HBQ_ENABLED) {
573 if (i == 0) { 868 if (i == 0) {
@@ -600,9 +895,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
600 iocbq); 895 iocbq);
601 kfree(evt_dat->data); 896 kfree(evt_dat->data);
602 kfree(evt_dat); 897 kfree(evt_dat);
603 mutex_lock(&phba->ct_event_mutex); 898 spin_lock_irqsave(&phba->ct_ev_lock,
604 lpfc_ct_event_unref(evt); 899 flags);
605 mutex_unlock(&phba->ct_event_mutex); 900 lpfc_bsg_event_unref(evt);
901 spin_unlock_irqrestore(
902 &phba->ct_ev_lock, flags);
606 goto error_ct_unsol_exit; 903 goto error_ct_unsol_exit;
607 } 904 }
608 memcpy((char *)(evt_dat->data) + offset, 905 memcpy((char *)(evt_dat->data) + offset,
@@ -615,15 +912,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
615 dmabuf); 912 dmabuf);
616 } else { 913 } else {
617 switch (cmd) { 914 switch (cmd) {
915 case ELX_LOOPBACK_DATA:
916 diag_cmd_data_free(phba,
917 (struct lpfc_dmabufext *)
918 dmabuf);
919 break;
618 case ELX_LOOPBACK_XRI_SETUP: 920 case ELX_LOOPBACK_XRI_SETUP:
619 if (!(phba->sli3_options & 921 if ((phba->sli_rev ==
620 LPFC_SLI3_HBQ_ENABLED)) 922 LPFC_SLI_REV2) ||
923 (phba->sli3_options &
924 LPFC_SLI3_HBQ_ENABLED
925 )) {
926 lpfc_in_buf_free(phba,
927 dmabuf);
928 } else {
621 lpfc_post_buffer(phba, 929 lpfc_post_buffer(phba,
622 pring, 930 pring,
623 1); 931 1);
624 else 932 }
625 lpfc_in_buf_free(phba,
626 dmabuf);
627 break; 933 break;
628 default: 934 default:
629 if (!(phba->sli3_options & 935 if (!(phba->sli3_options &
@@ -637,7 +943,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
637 } 943 }
638 } 944 }
639 945
640 mutex_lock(&phba->ct_event_mutex); 946 spin_lock_irqsave(&phba->ct_ev_lock, flags);
641 if (phba->sli_rev == LPFC_SLI_REV4) { 947 if (phba->sli_rev == LPFC_SLI_REV4) {
642 evt_dat->immed_dat = phba->ctx_idx; 948 evt_dat->immed_dat = phba->ctx_idx;
643 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 949 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
@@ -650,122 +956,144 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
650 956
651 evt_dat->type = FC_REG_CT_EVENT; 957 evt_dat->type = FC_REG_CT_EVENT;
652 list_add(&evt_dat->node, &evt->events_to_see); 958 list_add(&evt_dat->node, &evt->events_to_see);
653 wake_up_interruptible(&evt->wq); 959 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
654 lpfc_ct_event_unref(evt); 960 wake_up_interruptible(&evt->wq);
655 if (evt_req_id == SLI_CT_ELX_LOOPBACK) 961 lpfc_bsg_event_unref(evt);
656 break; 962 break;
963 }
964
965 list_move(evt->events_to_see.prev, &evt->events_to_get);
966 lpfc_bsg_event_unref(evt);
967
968 job = evt->set_job;
969 evt->set_job = NULL;
970 if (job) {
971 job->reply->reply_payload_rcv_len = size;
972 /* make error code available to userspace */
973 job->reply->result = 0;
974 job->dd_data = NULL;
975 /* complete the job back to userspace */
976 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
977 job->job_done(job);
978 spin_lock_irqsave(&phba->ct_ev_lock, flags);
979 }
657 } 980 }
658 mutex_unlock(&phba->ct_event_mutex); 981 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
659 982
660error_ct_unsol_exit: 983error_ct_unsol_exit:
661 if (!list_empty(&head)) 984 if (!list_empty(&head))
662 list_del(&head); 985 list_del(&head);
663 986 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
664 return; 987 return 0;
988 return 1;
665} 989}
666 990
667/** 991/**
668 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command 992 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
669 * @job: SET_EVENT fc_bsg_job 993 * @job: SET_EVENT fc_bsg_job
670 */ 994 **/
671static int 995static int
672lpfc_bsg_set_event(struct fc_bsg_job *job) 996lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
673{ 997{
674 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 998 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
675 struct lpfc_hba *phba = vport->phba; 999 struct lpfc_hba *phba = vport->phba;
676 struct set_ct_event *event_req; 1000 struct set_ct_event *event_req;
677 struct lpfc_ct_event *evt; 1001 struct lpfc_bsg_event *evt;
678 int rc = 0; 1002 int rc = 0;
1003 struct bsg_job_data *dd_data = NULL;
1004 uint32_t ev_mask;
1005 unsigned long flags;
679 1006
680 if (job->request_len < 1007 if (job->request_len <
681 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1008 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
682 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1009 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
683 "2612 Received SET_CT_EVENT below minimum " 1010 "2612 Received SET_CT_EVENT below minimum "
684 "size\n"); 1011 "size\n");
685 return -EINVAL; 1012 rc = -EINVAL;
1013 goto job_error;
1014 }
1015
1016 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1017 if (dd_data == NULL) {
1018 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1019 "2734 Failed allocation of dd_data\n");
1020 rc = -ENOMEM;
1021 goto job_error;
686 } 1022 }
687 1023
688 event_req = (struct set_ct_event *) 1024 event_req = (struct set_ct_event *)
689 job->request->rqst_data.h_vendor.vendor_cmd; 1025 job->request->rqst_data.h_vendor.vendor_cmd;
690 1026 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
691 mutex_lock(&phba->ct_event_mutex); 1027 FC_REG_EVENT_MASK);
1028 spin_lock_irqsave(&phba->ct_ev_lock, flags);
692 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1029 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
693 if (evt->reg_id == event_req->ev_reg_id) { 1030 if (evt->reg_id == event_req->ev_reg_id) {
694 lpfc_ct_event_ref(evt); 1031 lpfc_bsg_event_ref(evt);
695 evt->wait_time_stamp = jiffies; 1032 evt->wait_time_stamp = jiffies;
696 break; 1033 break;
697 } 1034 }
698 } 1035 }
699 mutex_unlock(&phba->ct_event_mutex); 1036 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
700 1037
701 if (&evt->node == &phba->ct_ev_waiters) { 1038 if (&evt->node == &phba->ct_ev_waiters) {
702 /* no event waiting struct yet - first call */ 1039 /* no event waiting struct yet - first call */
703 evt = lpfc_ct_event_new(event_req->ev_reg_id, 1040 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
704 event_req->ev_req_id); 1041 event_req->ev_req_id);
705 if (!evt) { 1042 if (!evt) {
706 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1043 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
707 "2617 Failed allocation of event " 1044 "2617 Failed allocation of event "
708 "waiter\n"); 1045 "waiter\n");
709 return -ENOMEM; 1046 rc = -ENOMEM;
1047 goto job_error;
710 } 1048 }
711 1049
712 mutex_lock(&phba->ct_event_mutex); 1050 spin_lock_irqsave(&phba->ct_ev_lock, flags);
713 list_add(&evt->node, &phba->ct_ev_waiters); 1051 list_add(&evt->node, &phba->ct_ev_waiters);
714 lpfc_ct_event_ref(evt); 1052 lpfc_bsg_event_ref(evt);
715 mutex_unlock(&phba->ct_event_mutex); 1053 evt->wait_time_stamp = jiffies;
1054 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
716 } 1055 }
717 1056
1057 spin_lock_irqsave(&phba->ct_ev_lock, flags);
718 evt->waiting = 1; 1058 evt->waiting = 1;
719 if (wait_event_interruptible(evt->wq, 1059 dd_data->type = TYPE_EVT;
720 !list_empty(&evt->events_to_see))) { 1060 dd_data->context_un.evt = evt;
721 mutex_lock(&phba->ct_event_mutex); 1061 evt->set_job = job; /* for unsolicited command */
722 lpfc_ct_event_unref(evt); /* release ref */ 1062 job->dd_data = dd_data; /* for fc transport timeout callback*/
723 lpfc_ct_event_unref(evt); /* delete */ 1063 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
724 mutex_unlock(&phba->ct_event_mutex); 1064 return 0; /* call job done later */
725 rc = -EINTR; 1065
726 goto set_event_out; 1066job_error:
727 } 1067 if (dd_data != NULL)
728 1068 kfree(dd_data);
729 evt->wait_time_stamp = jiffies; 1069
730 evt->waiting = 0; 1070 job->dd_data = NULL;
731 1071 return rc;
732 mutex_lock(&phba->ct_event_mutex);
733 list_move(evt->events_to_see.prev, &evt->events_to_get);
734 lpfc_ct_event_unref(evt); /* release ref */
735 mutex_unlock(&phba->ct_event_mutex);
736
737set_event_out:
738 /* set_event carries no reply payload */
739 job->reply->reply_payload_rcv_len = 0;
740 /* make error code available to userspace */
741 job->reply->result = rc;
742 /* complete the job back to userspace */
743 job->job_done(job);
744
745 return 0;
746} 1072}
747 1073
748/** 1074/**
749 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command 1075 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
750 * @job: GET_EVENT fc_bsg_job 1076 * @job: GET_EVENT fc_bsg_job
751 */ 1077 **/
752static int 1078static int
753lpfc_bsg_get_event(struct fc_bsg_job *job) 1079lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
754{ 1080{
755 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1081 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
756 struct lpfc_hba *phba = vport->phba; 1082 struct lpfc_hba *phba = vport->phba;
757 struct get_ct_event *event_req; 1083 struct get_ct_event *event_req;
758 struct get_ct_event_reply *event_reply; 1084 struct get_ct_event_reply *event_reply;
759 struct lpfc_ct_event *evt; 1085 struct lpfc_bsg_event *evt;
760 struct event_data *evt_dat = NULL; 1086 struct event_data *evt_dat = NULL;
761 int rc = 0; 1087 unsigned long flags;
1088 uint32_t rc = 0;
762 1089
763 if (job->request_len < 1090 if (job->request_len <
764 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1091 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
765 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1092 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
766 "2613 Received GET_CT_EVENT request below " 1093 "2613 Received GET_CT_EVENT request below "
767 "minimum size\n"); 1094 "minimum size\n");
768 return -EINVAL; 1095 rc = -EINVAL;
1096 goto job_error;
769 } 1097 }
770 1098
771 event_req = (struct get_ct_event *) 1099 event_req = (struct get_ct_event *)
@@ -773,13 +1101,12 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
773 1101
774 event_reply = (struct get_ct_event_reply *) 1102 event_reply = (struct get_ct_event_reply *)
775 job->reply->reply_data.vendor_reply.vendor_rsp; 1103 job->reply->reply_data.vendor_reply.vendor_rsp;
776 1104 spin_lock_irqsave(&phba->ct_ev_lock, flags);
777 mutex_lock(&phba->ct_event_mutex);
778 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1105 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
779 if (evt->reg_id == event_req->ev_reg_id) { 1106 if (evt->reg_id == event_req->ev_reg_id) {
780 if (list_empty(&evt->events_to_get)) 1107 if (list_empty(&evt->events_to_get))
781 break; 1108 break;
782 lpfc_ct_event_ref(evt); 1109 lpfc_bsg_event_ref(evt);
783 evt->wait_time_stamp = jiffies; 1110 evt->wait_time_stamp = jiffies;
784 evt_dat = list_entry(evt->events_to_get.prev, 1111 evt_dat = list_entry(evt->events_to_get.prev,
785 struct event_data, node); 1112 struct event_data, node);
@@ -787,84 +1114,1904 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
787 break; 1114 break;
788 } 1115 }
789 } 1116 }
790 mutex_unlock(&phba->ct_event_mutex); 1117 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
791 1118
792 if (!evt_dat) { 1119 /* The app may continue to ask for event data until it gets
1120 * an error indicating that there isn't anymore
1121 */
1122 if (evt_dat == NULL) {
793 job->reply->reply_payload_rcv_len = 0; 1123 job->reply->reply_payload_rcv_len = 0;
794 rc = -ENOENT; 1124 rc = -ENOENT;
795 goto error_get_event_exit; 1125 goto job_error;
796 } 1126 }
797 1127
798 if (evt_dat->len > job->reply_payload.payload_len) { 1128 if (evt_dat->len > job->request_payload.payload_len) {
799 evt_dat->len = job->reply_payload.payload_len; 1129 evt_dat->len = job->request_payload.payload_len;
800 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1130 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
801 "2618 Truncated event data at %d " 1131 "2618 Truncated event data at %d "
802 "bytes\n", 1132 "bytes\n",
803 job->reply_payload.payload_len); 1133 job->request_payload.payload_len);
804 } 1134 }
805 1135
1136 event_reply->type = evt_dat->type;
806 event_reply->immed_data = evt_dat->immed_dat; 1137 event_reply->immed_data = evt_dat->immed_dat;
807
808 if (evt_dat->len > 0) 1138 if (evt_dat->len > 0)
809 job->reply->reply_payload_rcv_len = 1139 job->reply->reply_payload_rcv_len =
810 sg_copy_from_buffer(job->reply_payload.sg_list, 1140 sg_copy_from_buffer(job->request_payload.sg_list,
811 job->reply_payload.sg_cnt, 1141 job->request_payload.sg_cnt,
812 evt_dat->data, evt_dat->len); 1142 evt_dat->data, evt_dat->len);
813 else 1143 else
814 job->reply->reply_payload_rcv_len = 0; 1144 job->reply->reply_payload_rcv_len = 0;
815 rc = 0;
816 1145
817 if (evt_dat) 1146 if (evt_dat) {
818 kfree(evt_dat->data); 1147 kfree(evt_dat->data);
819 kfree(evt_dat); 1148 kfree(evt_dat);
820 mutex_lock(&phba->ct_event_mutex); 1149 }
821 lpfc_ct_event_unref(evt); 1150
822 mutex_unlock(&phba->ct_event_mutex); 1151 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1152 lpfc_bsg_event_unref(evt);
1153 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1154 job->dd_data = NULL;
1155 job->reply->result = 0;
1156 job->job_done(job);
1157 return 0;
1158
1159job_error:
1160 job->dd_data = NULL;
1161 job->reply->result = rc;
1162 return rc;
1163}
1164
1165/**
1166 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1167 * @phba: Pointer to HBA context object.
1168 * @cmdiocbq: Pointer to command iocb.
1169 * @rspiocbq: Pointer to response iocb.
1170 *
1171 * This function is the completion handler for iocbs issued using
1172 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1173 * ring event handler function without any lock held. This function
1174 * can be called from both worker thread context and interrupt
1175 * context. This function also can be called from other thread which
1176 * cleans up the SLI layer objects.
1177 * This function copy the contents of the response iocb to the
1178 * response iocb memory object provided by the caller of
1179 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1180 * sleeps for the iocb completion.
1181 **/
1182static void
1183lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1184 struct lpfc_iocbq *cmdiocbq,
1185 struct lpfc_iocbq *rspiocbq)
1186{
1187 struct bsg_job_data *dd_data;
1188 struct fc_bsg_job *job;
1189 IOCB_t *rsp;
1190 struct lpfc_dmabuf *bmp;
1191 struct lpfc_nodelist *ndlp;
1192 unsigned long flags;
1193 int rc = 0;
823 1194
824error_get_event_exit: 1195 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1196 dd_data = cmdiocbq->context1;
1197 /* normal completion and timeout crossed paths, already done */
1198 if (!dd_data) {
1199 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1200 return;
1201 }
1202
1203 job = dd_data->context_un.iocb.set_job;
1204 bmp = dd_data->context_un.iocb.bmp;
1205 rsp = &rspiocbq->iocb;
1206 ndlp = dd_data->context_un.iocb.ndlp;
1207
1208 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1209 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1210
1211 if (rsp->ulpStatus) {
1212 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1213 switch (rsp->un.ulpWord[4] & 0xff) {
1214 case IOERR_SEQUENCE_TIMEOUT:
1215 rc = -ETIMEDOUT;
1216 break;
1217 case IOERR_INVALID_RPI:
1218 rc = -EFAULT;
1219 break;
1220 default:
1221 rc = -EACCES;
1222 break;
1223 }
1224 } else
1225 rc = -EACCES;
1226 } else
1227 job->reply->reply_payload_rcv_len =
1228 rsp->un.genreq64.bdl.bdeSize;
1229
1230 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1231 lpfc_sli_release_iocbq(phba, cmdiocbq);
1232 lpfc_nlp_put(ndlp);
1233 kfree(bmp);
1234 kfree(dd_data);
825 /* make error code available to userspace */ 1235 /* make error code available to userspace */
826 job->reply->result = rc; 1236 job->reply->result = rc;
1237 job->dd_data = NULL;
827 /* complete the job back to userspace */ 1238 /* complete the job back to userspace */
828 job->job_done(job); 1239 job->job_done(job);
1240 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1241 return;
1242}
829 1243
1244/**
1245 * lpfc_issue_ct_rsp - issue a ct response
1246 * @phba: Pointer to HBA context object.
1247 * @job: Pointer to the job object.
1248 * @tag: tag index value into the ports context exchange array.
1249 * @bmp: Pointer to a dma buffer descriptor.
1250 * @num_entry: Number of enties in the bde.
1251 **/
1252static int
1253lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1254 struct lpfc_dmabuf *bmp, int num_entry)
1255{
1256 IOCB_t *icmd;
1257 struct lpfc_iocbq *ctiocb = NULL;
1258 int rc = 0;
1259 struct lpfc_nodelist *ndlp = NULL;
1260 struct bsg_job_data *dd_data;
1261 uint32_t creg_val;
1262
1263 /* allocate our bsg tracking structure */
1264 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1265 if (!dd_data) {
1266 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1267 "2736 Failed allocation of dd_data\n");
1268 rc = -ENOMEM;
1269 goto no_dd_data;
1270 }
1271
1272 /* Allocate buffer for command iocb */
1273 ctiocb = lpfc_sli_get_iocbq(phba);
1274 if (!ctiocb) {
1275 rc = ENOMEM;
1276 goto no_ctiocb;
1277 }
1278
1279 icmd = &ctiocb->iocb;
1280 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1281 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1282 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1283 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1284 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1285 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1286 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1287 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1288 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1289
1290 /* Fill in rest of iocb */
1291 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1292 icmd->ulpBdeCount = 1;
1293 icmd->ulpLe = 1;
1294 icmd->ulpClass = CLASS3;
1295 if (phba->sli_rev == LPFC_SLI_REV4) {
1296 /* Do not issue unsol response if oxid not marked as valid */
1297 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1298 rc = IOCB_ERROR;
1299 goto issue_ct_rsp_exit;
1300 }
1301 icmd->ulpContext = phba->ct_ctx[tag].oxid;
1302 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1303 if (!ndlp) {
1304 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1305 "2721 ndlp null for oxid %x SID %x\n",
1306 icmd->ulpContext,
1307 phba->ct_ctx[tag].SID);
1308 rc = IOCB_ERROR;
1309 goto issue_ct_rsp_exit;
1310 }
1311 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1312 /* The exchange is done, mark the entry as invalid */
1313 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1314 } else
1315 icmd->ulpContext = (ushort) tag;
1316
1317 icmd->ulpTimeout = phba->fc_ratov * 2;
1318
1319 /* Xmit CT response on exchange <xid> */
1320 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1321 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1322 icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
1323
1324 ctiocb->iocb_cmpl = NULL;
1325 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1326 ctiocb->vport = phba->pport;
1327 ctiocb->context3 = bmp;
1328
1329 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1330 ctiocb->context1 = dd_data;
1331 ctiocb->context2 = NULL;
1332 dd_data->type = TYPE_IOCB;
1333 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1334 dd_data->context_un.iocb.rspiocbq = NULL;
1335 dd_data->context_un.iocb.set_job = job;
1336 dd_data->context_un.iocb.bmp = bmp;
1337 dd_data->context_un.iocb.ndlp = ndlp;
1338
1339 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1340 creg_val = readl(phba->HCregaddr);
1341 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1342 writel(creg_val, phba->HCregaddr);
1343 readl(phba->HCregaddr); /* flush */
1344 }
1345
1346 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1347
1348 if (rc == IOCB_SUCCESS)
1349 return 0; /* done for now */
1350
1351issue_ct_rsp_exit:
1352 lpfc_sli_release_iocbq(phba, ctiocb);
1353no_ctiocb:
1354 kfree(dd_data);
1355no_dd_data:
830 return rc; 1356 return rc;
831} 1357}
832 1358
833/** 1359/**
1360 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1361 * @job: SEND_MGMT_RESP fc_bsg_job
1362 **/
1363static int
1364lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1365{
1366 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1367 struct lpfc_hba *phba = vport->phba;
1368 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1369 job->request->rqst_data.h_vendor.vendor_cmd;
1370 struct ulp_bde64 *bpl;
1371 struct lpfc_dmabuf *bmp = NULL;
1372 struct scatterlist *sgel = NULL;
1373 int request_nseg;
1374 int numbde;
1375 dma_addr_t busaddr;
1376 uint32_t tag = mgmt_resp->tag;
1377 unsigned long reqbfrcnt =
1378 (unsigned long)job->request_payload.payload_len;
1379 int rc = 0;
1380
1381 /* in case no data is transferred */
1382 job->reply->reply_payload_rcv_len = 0;
1383
1384 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1385 rc = -ERANGE;
1386 goto send_mgmt_rsp_exit;
1387 }
1388
1389 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1390 if (!bmp) {
1391 rc = -ENOMEM;
1392 goto send_mgmt_rsp_exit;
1393 }
1394
1395 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1396 if (!bmp->virt) {
1397 rc = -ENOMEM;
1398 goto send_mgmt_rsp_free_bmp;
1399 }
1400
1401 INIT_LIST_HEAD(&bmp->list);
1402 bpl = (struct ulp_bde64 *) bmp->virt;
1403 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1404 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1405 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1406 busaddr = sg_dma_address(sgel);
1407 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1408 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1409 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1410 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1411 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1412 bpl++;
1413 }
1414
1415 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1416
1417 if (rc == IOCB_SUCCESS)
1418 return 0; /* done for now */
1419
1420 /* TBD need to handle a timeout */
1421 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1422 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1423 rc = -EACCES;
1424 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1425
1426send_mgmt_rsp_free_bmp:
1427 kfree(bmp);
1428send_mgmt_rsp_exit:
1429 /* make error code available to userspace */
1430 job->reply->result = rc;
1431 job->dd_data = NULL;
1432 return rc;
1433}
1434
1435/**
1436 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1437 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1438 *
1439 * This function is responsible for placing a port into diagnostic loopback
1440 * mode in order to perform a diagnostic loopback test.
1441 * All new scsi requests are blocked, a small delay is used to allow the
1442 * scsi requests to complete then the link is brought down. If the link is
1443 * is placed in loopback mode then scsi requests are again allowed
1444 * so the scsi mid-layer doesn't give up on the port.
1445 * All of this is done in-line.
1446 */
1447static int
1448lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1449{
1450 struct Scsi_Host *shost = job->shost;
1451 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1452 struct lpfc_hba *phba = vport->phba;
1453 struct diag_mode_set *loopback_mode;
1454 struct lpfc_sli *psli = &phba->sli;
1455 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1456 uint32_t link_flags;
1457 uint32_t timeout;
1458 struct lpfc_vport **vports;
1459 LPFC_MBOXQ_t *pmboxq;
1460 int mbxstatus;
1461 int i = 0;
1462 int rc = 0;
1463
1464 /* no data to return just the return code */
1465 job->reply->reply_payload_rcv_len = 0;
1466
1467 if (job->request_len <
1468 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
1469 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1470 "2738 Received DIAG MODE request below minimum "
1471 "size\n");
1472 rc = -EINVAL;
1473 goto job_error;
1474 }
1475
1476 loopback_mode = (struct diag_mode_set *)
1477 job->request->rqst_data.h_vendor.vendor_cmd;
1478 link_flags = loopback_mode->type;
1479 timeout = loopback_mode->timeout;
1480
1481 if ((phba->link_state == LPFC_HBA_ERROR) ||
1482 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1483 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1484 rc = -EACCES;
1485 goto job_error;
1486 }
1487
1488 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1489 if (!pmboxq) {
1490 rc = -ENOMEM;
1491 goto job_error;
1492 }
1493
1494 vports = lpfc_create_vport_work_array(phba);
1495 if (vports) {
1496 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1497 shost = lpfc_shost_from_vport(vports[i]);
1498 scsi_block_requests(shost);
1499 }
1500
1501 lpfc_destroy_vport_work_array(phba, vports);
1502 } else {
1503 shost = lpfc_shost_from_vport(phba->pport);
1504 scsi_block_requests(shost);
1505 }
1506
1507 while (pring->txcmplq_cnt) {
1508 if (i++ > 500) /* wait up to 5 seconds */
1509 break;
1510
1511 msleep(10);
1512 }
1513
1514 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1515 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1516 pmboxq->u.mb.mbxOwner = OWN_HOST;
1517
1518 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1519
1520 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1521 /* wait for link down before proceeding */
1522 i = 0;
1523 while (phba->link_state != LPFC_LINK_DOWN) {
1524 if (i++ > timeout) {
1525 rc = -ETIMEDOUT;
1526 goto loopback_mode_exit;
1527 }
1528
1529 msleep(10);
1530 }
1531
1532 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1533 if (link_flags == INTERNAL_LOOP_BACK)
1534 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1535 else
1536 pmboxq->u.mb.un.varInitLnk.link_flags =
1537 FLAGS_TOPOLOGY_MODE_LOOP;
1538
1539 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1540 pmboxq->u.mb.mbxOwner = OWN_HOST;
1541
1542 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1543 LPFC_MBOX_TMO);
1544
1545 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1546 rc = -ENODEV;
1547 else {
1548 phba->link_flag |= LS_LOOPBACK_MODE;
1549 /* wait for the link attention interrupt */
1550 msleep(100);
1551
1552 i = 0;
1553 while (phba->link_state != LPFC_HBA_READY) {
1554 if (i++ > timeout) {
1555 rc = -ETIMEDOUT;
1556 break;
1557 }
1558
1559 msleep(10);
1560 }
1561 }
1562
1563 } else
1564 rc = -ENODEV;
1565
1566loopback_mode_exit:
1567 vports = lpfc_create_vport_work_array(phba);
1568 if (vports) {
1569 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1570 shost = lpfc_shost_from_vport(vports[i]);
1571 scsi_unblock_requests(shost);
1572 }
1573 lpfc_destroy_vport_work_array(phba, vports);
1574 } else {
1575 shost = lpfc_shost_from_vport(phba->pport);
1576 scsi_unblock_requests(shost);
1577 }
1578
1579 /*
1580 * Let SLI layer release mboxq if mbox command completed after timeout.
1581 */
1582 if (mbxstatus != MBX_TIMEOUT)
1583 mempool_free(pmboxq, phba->mbox_mem_pool);
1584
1585job_error:
1586 /* make error code available to userspace */
1587 job->reply->result = rc;
1588 /* complete the job back to userspace if no error */
1589 if (rc == 0)
1590 job->job_done(job);
1591 return rc;
1592}
1593
1594/**
1595 * lpfcdiag_loop_self_reg - obtains a remote port login id
1596 * @phba: Pointer to HBA context object
1597 * @rpi: Pointer to a remote port login id
1598 *
1599 * This function obtains a remote port login id so the diag loopback test
1600 * can send and receive its own unsolicited CT command.
1601 **/
1602static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1603{
1604 LPFC_MBOXQ_t *mbox;
1605 struct lpfc_dmabuf *dmabuff;
1606 int status;
1607
1608 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1609 if (!mbox)
1610 return ENOMEM;
1611
1612 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1613 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1614 if (status) {
1615 mempool_free(mbox, phba->mbox_mem_pool);
1616 return ENOMEM;
1617 }
1618
1619 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1620 mbox->context1 = NULL;
1621 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1622
1623 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1624 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1625 kfree(dmabuff);
1626 if (status != MBX_TIMEOUT)
1627 mempool_free(mbox, phba->mbox_mem_pool);
1628 return ENODEV;
1629 }
1630
1631 *rpi = mbox->u.mb.un.varWords[0];
1632
1633 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1634 kfree(dmabuff);
1635 mempool_free(mbox, phba->mbox_mem_pool);
1636 return 0;
1637}
1638
1639/**
1640 * lpfcdiag_loop_self_unreg - unregs from the rpi
1641 * @phba: Pointer to HBA context object
1642 * @rpi: Remote port login id
1643 *
1644 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1645 **/
1646static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1647{
1648 LPFC_MBOXQ_t *mbox;
1649 int status;
1650
1651 /* Allocate mboxq structure */
1652 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1653 if (mbox == NULL)
1654 return ENOMEM;
1655
1656 lpfc_unreg_login(phba, 0, rpi, mbox);
1657 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1658
1659 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1660 if (status != MBX_TIMEOUT)
1661 mempool_free(mbox, phba->mbox_mem_pool);
1662 return EIO;
1663 }
1664
1665 mempool_free(mbox, phba->mbox_mem_pool);
1666 return 0;
1667}
1668
1669/**
1670 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1671 * @phba: Pointer to HBA context object
1672 * @rpi: Remote port login id
1673 * @txxri: Pointer to transmit exchange id
1674 * @rxxri: Pointer to response exchabge id
1675 *
1676 * This function obtains the transmit and receive ids required to send
1677 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1678 * flags are used to the unsolicted response handler is able to process
1679 * the ct command sent on the same port.
1680 **/
1681static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1682 uint16_t *txxri, uint16_t * rxxri)
1683{
1684 struct lpfc_bsg_event *evt;
1685 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
1686 IOCB_t *cmd, *rsp;
1687 struct lpfc_dmabuf *dmabuf;
1688 struct ulp_bde64 *bpl = NULL;
1689 struct lpfc_sli_ct_request *ctreq = NULL;
1690 int ret_val = 0;
1691 unsigned long flags;
1692
1693 *txxri = 0;
1694 *rxxri = 0;
1695 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1696 SLI_CT_ELX_LOOPBACK);
1697 if (!evt)
1698 return ENOMEM;
1699
1700 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1701 list_add(&evt->node, &phba->ct_ev_waiters);
1702 lpfc_bsg_event_ref(evt);
1703 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1704
1705 cmdiocbq = lpfc_sli_get_iocbq(phba);
1706 rspiocbq = lpfc_sli_get_iocbq(phba);
1707
1708 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1709 if (dmabuf) {
1710 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
1711 INIT_LIST_HEAD(&dmabuf->list);
1712 bpl = (struct ulp_bde64 *) dmabuf->virt;
1713 memset(bpl, 0, sizeof(*bpl));
1714 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1715 bpl->addrHigh =
1716 le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
1717 bpl->addrLow =
1718 le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
1719 bpl->tus.f.bdeFlags = 0;
1720 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1721 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1722 }
1723
1724 if (cmdiocbq == NULL || rspiocbq == NULL ||
1725 dmabuf == NULL || bpl == NULL || ctreq == NULL) {
1726 ret_val = ENOMEM;
1727 goto err_get_xri_exit;
1728 }
1729
1730 cmd = &cmdiocbq->iocb;
1731 rsp = &rspiocbq->iocb;
1732
1733 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
1734
1735 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
1736 ctreq->RevisionId.bits.InId = 0;
1737 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
1738 ctreq->FsSubType = 0;
1739 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
1740 ctreq->CommandResponse.bits.Size = 0;
1741
1742
1743 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
1744 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
1745 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1746 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
1747
1748 cmd->un.xseq64.w5.hcsw.Fctl = LA;
1749 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
1750 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
1751 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1752
1753 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1754 cmd->ulpBdeCount = 1;
1755 cmd->ulpLe = 1;
1756 cmd->ulpClass = CLASS3;
1757 cmd->ulpContext = rpi;
1758
1759 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1760 cmdiocbq->vport = phba->pport;
1761
1762 ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1763 rspiocbq,
1764 (phba->fc_ratov * 2)
1765 + LPFC_DRVR_TIMEOUT);
1766 if (ret_val)
1767 goto err_get_xri_exit;
1768
1769 *txxri = rsp->ulpContext;
1770
1771 evt->waiting = 1;
1772 evt->wait_time_stamp = jiffies;
1773 ret_val = wait_event_interruptible_timeout(
1774 evt->wq, !list_empty(&evt->events_to_see),
1775 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1776 if (list_empty(&evt->events_to_see))
1777 ret_val = (ret_val) ? EINTR : ETIMEDOUT;
1778 else {
1779 ret_val = IOCB_SUCCESS;
1780 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1781 list_move(evt->events_to_see.prev, &evt->events_to_get);
1782 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1783 *rxxri = (list_entry(evt->events_to_get.prev,
1784 typeof(struct event_data),
1785 node))->immed_dat;
1786 }
1787 evt->waiting = 0;
1788
1789err_get_xri_exit:
1790 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1791 lpfc_bsg_event_unref(evt); /* release ref */
1792 lpfc_bsg_event_unref(evt); /* delete */
1793 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1794
1795 if (dmabuf) {
1796 if (dmabuf->virt)
1797 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1798 kfree(dmabuf);
1799 }
1800
1801 if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
1802 lpfc_sli_release_iocbq(phba, cmdiocbq);
1803 if (rspiocbq)
1804 lpfc_sli_release_iocbq(phba, rspiocbq);
1805 return ret_val;
1806}
1807
1808/**
1809 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1810 * @phba: Pointer to HBA context object
1811 * @bpl: Pointer to 64 bit bde structure
1812 * @size: Number of bytes to process
1813 * @nocopydata: Flag to copy user data into the allocated buffer
1814 *
1815 * This function allocates page size buffers and populates an lpfc_dmabufext.
1816 * If allowed the user data pointed to with indataptr is copied into the kernel
1817 * memory. The chained list of page size buffers is returned.
1818 **/
1819static struct lpfc_dmabufext *
1820diag_cmd_data_alloc(struct lpfc_hba *phba,
1821 struct ulp_bde64 *bpl, uint32_t size,
1822 int nocopydata)
1823{
1824 struct lpfc_dmabufext *mlist = NULL;
1825 struct lpfc_dmabufext *dmp;
1826 int cnt, offset = 0, i = 0;
1827 struct pci_dev *pcidev;
1828
1829 pcidev = phba->pcidev;
1830
1831 while (size) {
1832 /* We get chunks of 4K */
1833 if (size > BUF_SZ_4K)
1834 cnt = BUF_SZ_4K;
1835 else
1836 cnt = size;
1837
1838 /* allocate struct lpfc_dmabufext buffer header */
1839 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
1840 if (!dmp)
1841 goto out;
1842
1843 INIT_LIST_HEAD(&dmp->dma.list);
1844
1845 /* Queue it to a linked list */
1846 if (mlist)
1847 list_add_tail(&dmp->dma.list, &mlist->dma.list);
1848 else
1849 mlist = dmp;
1850
1851 /* allocate buffer */
1852 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1853 cnt,
1854 &(dmp->dma.phys),
1855 GFP_KERNEL);
1856
1857 if (!dmp->dma.virt)
1858 goto out;
1859
1860 dmp->size = cnt;
1861
1862 if (nocopydata) {
1863 bpl->tus.f.bdeFlags = 0;
1864 pci_dma_sync_single_for_device(phba->pcidev,
1865 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1866
1867 } else {
1868 memset((uint8_t *)dmp->dma.virt, 0, cnt);
1869 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1870 }
1871
1872 /* build buffer ptr list for IOCB */
1873 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
1874 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
1875 bpl->tus.f.bdeSize = (ushort) cnt;
1876 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1877 bpl++;
1878
1879 i++;
1880 offset += cnt;
1881 size -= cnt;
1882 }
1883
1884 mlist->flag = i;
1885 return mlist;
1886out:
1887 diag_cmd_data_free(phba, mlist);
1888 return NULL;
1889}
1890
1891/**
1892 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1893 * @phba: Pointer to HBA context object
1894 * @rxxri: Receive exchange id
1895 * @len: Number of data bytes
1896 *
1897 * This function allocates and posts a data buffer of sufficient size to recieve
1898 * an unsolicted CT command.
1899 **/
1900static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1901 size_t len)
1902{
1903 struct lpfc_sli *psli = &phba->sli;
1904 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1905 struct lpfc_iocbq *cmdiocbq;
1906 IOCB_t *cmd = NULL;
1907 struct list_head head, *curr, *next;
1908 struct lpfc_dmabuf *rxbmp;
1909 struct lpfc_dmabuf *dmp;
1910 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
1911 struct ulp_bde64 *rxbpl = NULL;
1912 uint32_t num_bde;
1913 struct lpfc_dmabufext *rxbuffer = NULL;
1914 int ret_val = 0;
1915 int i = 0;
1916
1917 cmdiocbq = lpfc_sli_get_iocbq(phba);
1918 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1919 if (rxbmp != NULL) {
1920 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
1921 INIT_LIST_HEAD(&rxbmp->list);
1922 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1923 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1924 }
1925
1926 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1927 ret_val = ENOMEM;
1928 goto err_post_rxbufs_exit;
1929 }
1930
1931 /* Queue buffers for the receive exchange */
1932 num_bde = (uint32_t)rxbuffer->flag;
1933 dmp = &rxbuffer->dma;
1934
1935 cmd = &cmdiocbq->iocb;
1936 i = 0;
1937
1938 INIT_LIST_HEAD(&head);
1939 list_add_tail(&head, &dmp->list);
1940 list_for_each_safe(curr, next, &head) {
1941 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
1942 list_del(curr);
1943
1944 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1945 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
1946 cmd->un.quexri64cx.buff.bde.addrHigh =
1947 putPaddrHigh(mp[i]->phys);
1948 cmd->un.quexri64cx.buff.bde.addrLow =
1949 putPaddrLow(mp[i]->phys);
1950 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
1951 ((struct lpfc_dmabufext *)mp[i])->size;
1952 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
1953 cmd->ulpCommand = CMD_QUE_XRI64_CX;
1954 cmd->ulpPU = 0;
1955 cmd->ulpLe = 1;
1956 cmd->ulpBdeCount = 1;
1957 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
1958
1959 } else {
1960 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
1961 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
1962 cmd->un.cont64[i].tus.f.bdeSize =
1963 ((struct lpfc_dmabufext *)mp[i])->size;
1964 cmd->ulpBdeCount = ++i;
1965
1966 if ((--num_bde > 0) && (i < 2))
1967 continue;
1968
1969 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
1970 cmd->ulpLe = 1;
1971 }
1972
1973 cmd->ulpClass = CLASS3;
1974 cmd->ulpContext = rxxri;
1975
1976 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
1977
1978 if (ret_val == IOCB_ERROR) {
1979 diag_cmd_data_free(phba,
1980 (struct lpfc_dmabufext *)mp[0]);
1981 if (mp[1])
1982 diag_cmd_data_free(phba,
1983 (struct lpfc_dmabufext *)mp[1]);
1984 dmp = list_entry(next, struct lpfc_dmabuf, list);
1985 ret_val = EIO;
1986 goto err_post_rxbufs_exit;
1987 }
1988
1989 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
1990 if (mp[1]) {
1991 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
1992 mp[1] = NULL;
1993 }
1994
1995 /* The iocb was freed by lpfc_sli_issue_iocb */
1996 cmdiocbq = lpfc_sli_get_iocbq(phba);
1997 if (!cmdiocbq) {
1998 dmp = list_entry(next, struct lpfc_dmabuf, list);
1999 ret_val = EIO;
2000 goto err_post_rxbufs_exit;
2001 }
2002
2003 cmd = &cmdiocbq->iocb;
2004 i = 0;
2005 }
2006 list_del(&head);
2007
2008err_post_rxbufs_exit:
2009
2010 if (rxbmp) {
2011 if (rxbmp->virt)
2012 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2013 kfree(rxbmp);
2014 }
2015
2016 if (cmdiocbq)
2017 lpfc_sli_release_iocbq(phba, cmdiocbq);
2018 return ret_val;
2019}
2020
2021/**
2022 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2023 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2024 *
2025 * This function receives a user data buffer to be transmitted and received on
2026 * the same port, the link must be up and in loopback mode prior
2027 * to being called.
2028 * 1. A kernel buffer is allocated to copy the user data into.
2029 * 2. The port registers with "itself".
2030 * 3. The transmit and receive exchange ids are obtained.
2031 * 4. The receive exchange id is posted.
2032 * 5. A new els loopback event is created.
2033 * 6. The command and response iocbs are allocated.
2034 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2035 *
2036 * This function is meant to be called n times while the port is in loopback
2037 * so it is the apps responsibility to issue a reset to take the port out
2038 * of loopback mode.
2039 **/
2040static int
2041lpfc_bsg_diag_test(struct fc_bsg_job *job)
2042{
2043 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2044 struct lpfc_hba *phba = vport->phba;
2045 struct diag_mode_test *diag_mode;
2046 struct lpfc_bsg_event *evt;
2047 struct event_data *evdat;
2048 struct lpfc_sli *psli = &phba->sli;
2049 uint32_t size;
2050 uint32_t full_size;
2051 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2052 uint16_t rpi;
2053 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2054 IOCB_t *cmd, *rsp;
2055 struct lpfc_sli_ct_request *ctreq;
2056 struct lpfc_dmabuf *txbmp;
2057 struct ulp_bde64 *txbpl = NULL;
2058 struct lpfc_dmabufext *txbuffer = NULL;
2059 struct list_head head;
2060 struct lpfc_dmabuf *curr;
2061 uint16_t txxri, rxxri;
2062 uint32_t num_bde;
2063 uint8_t *ptr = NULL, *rx_databuf = NULL;
2064 int rc = 0;
2065 unsigned long flags;
2066 void *dataout = NULL;
2067 uint32_t total_mem;
2068
2069 /* in case no data is returned return just the return code */
2070 job->reply->reply_payload_rcv_len = 0;
2071
2072 if (job->request_len <
2073 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2074 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2075 "2739 Received DIAG TEST request below minimum "
2076 "size\n");
2077 rc = -EINVAL;
2078 goto loopback_test_exit;
2079 }
2080
2081 if (job->request_payload.payload_len !=
2082 job->reply_payload.payload_len) {
2083 rc = -EINVAL;
2084 goto loopback_test_exit;
2085 }
2086
2087 diag_mode = (struct diag_mode_test *)
2088 job->request->rqst_data.h_vendor.vendor_cmd;
2089
2090 if ((phba->link_state == LPFC_HBA_ERROR) ||
2091 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2092 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2093 rc = -EACCES;
2094 goto loopback_test_exit;
2095 }
2096
2097 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2098 rc = -EACCES;
2099 goto loopback_test_exit;
2100 }
2101
2102 size = job->request_payload.payload_len;
2103 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2104
2105 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2106 rc = -ERANGE;
2107 goto loopback_test_exit;
2108 }
2109
2110 if (size >= BUF_SZ_4K) {
2111 /*
2112 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2113 * then we allocate 64k and re-use that buffer over and over to
2114 * xfer the whole block. This is because Linux kernel has a
2115 * problem allocating more than 120k of kernel space memory. Saw
2116 * problem with GET_FCPTARGETMAPPING...
2117 */
2118 if (size <= (64 * 1024))
2119 total_mem = size;
2120 else
2121 total_mem = 64 * 1024;
2122 } else
2123 /* Allocate memory for ioctl data */
2124 total_mem = BUF_SZ_4K;
2125
2126 dataout = kmalloc(total_mem, GFP_KERNEL);
2127 if (dataout == NULL) {
2128 rc = -ENOMEM;
2129 goto loopback_test_exit;
2130 }
2131
2132 ptr = dataout;
2133 ptr += ELX_LOOPBACK_HEADER_SZ;
2134 sg_copy_to_buffer(job->request_payload.sg_list,
2135 job->request_payload.sg_cnt,
2136 ptr, size);
2137
2138 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2139 if (rc) {
2140 rc = -ENOMEM;
2141 goto loopback_test_exit;
2142 }
2143
2144 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2145 if (rc) {
2146 lpfcdiag_loop_self_unreg(phba, rpi);
2147 rc = -ENOMEM;
2148 goto loopback_test_exit;
2149 }
2150
2151 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2152 if (rc) {
2153 lpfcdiag_loop_self_unreg(phba, rpi);
2154 rc = -ENOMEM;
2155 goto loopback_test_exit;
2156 }
2157
2158 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2159 SLI_CT_ELX_LOOPBACK);
2160 if (!evt) {
2161 lpfcdiag_loop_self_unreg(phba, rpi);
2162 rc = -ENOMEM;
2163 goto loopback_test_exit;
2164 }
2165
2166 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2167 list_add(&evt->node, &phba->ct_ev_waiters);
2168 lpfc_bsg_event_ref(evt);
2169 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2170
2171 cmdiocbq = lpfc_sli_get_iocbq(phba);
2172 rspiocbq = lpfc_sli_get_iocbq(phba);
2173 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2174
2175 if (txbmp) {
2176 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2177 INIT_LIST_HEAD(&txbmp->list);
2178 txbpl = (struct ulp_bde64 *) txbmp->virt;
2179 if (txbpl)
2180 txbuffer = diag_cmd_data_alloc(phba,
2181 txbpl, full_size, 0);
2182 }
2183
2184 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
2185 rc = -ENOMEM;
2186 goto err_loopback_test_exit;
2187 }
2188
2189 cmd = &cmdiocbq->iocb;
2190 rsp = &rspiocbq->iocb;
2191
2192 INIT_LIST_HEAD(&head);
2193 list_add_tail(&head, &txbuffer->dma.list);
2194 list_for_each_entry(curr, &head, list) {
2195 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2196 if (current_offset == 0) {
2197 ctreq = curr->virt;
2198 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2199 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2200 ctreq->RevisionId.bits.InId = 0;
2201 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2202 ctreq->FsSubType = 0;
2203 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2204 ctreq->CommandResponse.bits.Size = size;
2205 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2206 } else
2207 segment_offset = 0;
2208
2209 BUG_ON(segment_offset >= segment_len);
2210 memcpy(curr->virt + segment_offset,
2211 ptr + current_offset,
2212 segment_len - segment_offset);
2213
2214 current_offset += segment_len - segment_offset;
2215 BUG_ON(current_offset > size);
2216 }
2217 list_del(&head);
2218
2219 /* Build the XMIT_SEQUENCE iocb */
2220
2221 num_bde = (uint32_t)txbuffer->flag;
2222
2223 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2224 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2225 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2226 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2227
2228 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2229 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2230 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2231 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2232
2233 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2234 cmd->ulpBdeCount = 1;
2235 cmd->ulpLe = 1;
2236 cmd->ulpClass = CLASS3;
2237 cmd->ulpContext = txxri;
2238
2239 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2240 cmdiocbq->vport = phba->pport;
2241
2242 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
2243 (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
2244
2245 if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2246 rc = -EIO;
2247 goto err_loopback_test_exit;
2248 }
2249
2250 evt->waiting = 1;
2251 rc = wait_event_interruptible_timeout(
2252 evt->wq, !list_empty(&evt->events_to_see),
2253 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2254 evt->waiting = 0;
2255 if (list_empty(&evt->events_to_see))
2256 rc = (rc) ? -EINTR : -ETIMEDOUT;
2257 else {
2258 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2259 list_move(evt->events_to_see.prev, &evt->events_to_get);
2260 evdat = list_entry(evt->events_to_get.prev,
2261 typeof(*evdat), node);
2262 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2263 rx_databuf = evdat->data;
2264 if (evdat->len != full_size) {
2265 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2266 "1603 Loopback test did not receive expected "
2267 "data length. actual length 0x%x expected "
2268 "length 0x%x\n",
2269 evdat->len, full_size);
2270 rc = -EIO;
2271 } else if (rx_databuf == NULL)
2272 rc = -EIO;
2273 else {
2274 rc = IOCB_SUCCESS;
2275 /* skip over elx loopback header */
2276 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2277 job->reply->reply_payload_rcv_len =
2278 sg_copy_from_buffer(job->reply_payload.sg_list,
2279 job->reply_payload.sg_cnt,
2280 rx_databuf, size);
2281 job->reply->reply_payload_rcv_len = size;
2282 }
2283 }
2284
2285err_loopback_test_exit:
2286 lpfcdiag_loop_self_unreg(phba, rpi);
2287
2288 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2289 lpfc_bsg_event_unref(evt); /* release ref */
2290 lpfc_bsg_event_unref(evt); /* delete */
2291 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2292
2293 if (cmdiocbq != NULL)
2294 lpfc_sli_release_iocbq(phba, cmdiocbq);
2295
2296 if (rspiocbq != NULL)
2297 lpfc_sli_release_iocbq(phba, rspiocbq);
2298
2299 if (txbmp != NULL) {
2300 if (txbpl != NULL) {
2301 if (txbuffer != NULL)
2302 diag_cmd_data_free(phba, txbuffer);
2303 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2304 }
2305 kfree(txbmp);
2306 }
2307
2308loopback_test_exit:
2309 kfree(dataout);
2310 /* make error code available to userspace */
2311 job->reply->result = rc;
2312 job->dd_data = NULL;
2313 /* complete the job back to userspace if no error */
2314 if (rc == 0)
2315 job->job_done(job);
2316 return rc;
2317}
2318
2319/**
2320 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2321 * @job: GET_DFC_REV fc_bsg_job
2322 **/
2323static int
2324lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2325{
2326 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2327 struct lpfc_hba *phba = vport->phba;
2328 struct get_mgmt_rev *event_req;
2329 struct get_mgmt_rev_reply *event_reply;
2330 int rc = 0;
2331
2332 if (job->request_len <
2333 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2334 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2335 "2740 Received GET_DFC_REV request below "
2336 "minimum size\n");
2337 rc = -EINVAL;
2338 goto job_error;
2339 }
2340
2341 event_req = (struct get_mgmt_rev *)
2342 job->request->rqst_data.h_vendor.vendor_cmd;
2343
2344 event_reply = (struct get_mgmt_rev_reply *)
2345 job->reply->reply_data.vendor_reply.vendor_rsp;
2346
2347 if (job->reply_len <
2348 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2349 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2350 "2741 Received GET_DFC_REV reply below "
2351 "minimum size\n");
2352 rc = -EINVAL;
2353 goto job_error;
2354 }
2355
2356 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2357 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2358job_error:
2359 job->reply->result = rc;
2360 if (rc == 0)
2361 job->job_done(job);
2362 return rc;
2363}
2364
2365/**
2366 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2367 * @phba: Pointer to HBA context object.
2368 * @pmboxq: Pointer to mailbox command.
2369 *
2370 * This is completion handler function for mailbox commands issued from
2371 * lpfc_bsg_issue_mbox function. This function is called by the
2372 * mailbox event handler function with no lock held. This function
2373 * will wake up thread waiting on the wait queue pointed by context1
2374 * of the mailbox.
2375 **/
2376void
2377lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2378{
2379 struct bsg_job_data *dd_data;
2380 MAILBOX_t *pmb;
2381 MAILBOX_t *mb;
2382 struct fc_bsg_job *job;
2383 uint32_t size;
2384 unsigned long flags;
2385
2386 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2387 dd_data = pmboxq->context1;
2388 if (!dd_data) {
2389 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2390 return;
2391 }
2392
2393 pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
2394 mb = dd_data->context_un.mbox.mb;
2395 job = dd_data->context_un.mbox.set_job;
2396 memcpy(mb, pmb, sizeof(*pmb));
2397 size = job->request_payload.payload_len;
2398 job->reply->reply_payload_rcv_len =
2399 sg_copy_from_buffer(job->reply_payload.sg_list,
2400 job->reply_payload.sg_cnt,
2401 mb, size);
2402 job->reply->result = 0;
2403 dd_data->context_un.mbox.set_job = NULL;
2404 job->dd_data = NULL;
2405 job->job_done(job);
2406 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2407 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2408 kfree(mb);
2409 kfree(dd_data);
2410 return;
2411}
2412
2413/**
2414 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2415 * @phba: Pointer to HBA context object.
2416 * @mb: Pointer to a mailbox object.
2417 * @vport: Pointer to a vport object.
2418 *
2419 * Some commands require the port to be offline, some may not be called from
2420 * the application.
2421 **/
2422static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2423 MAILBOX_t *mb, struct lpfc_vport *vport)
2424{
2425 /* return negative error values for bsg job */
2426 switch (mb->mbxCommand) {
2427 /* Offline only */
2428 case MBX_INIT_LINK:
2429 case MBX_DOWN_LINK:
2430 case MBX_CONFIG_LINK:
2431 case MBX_CONFIG_RING:
2432 case MBX_RESET_RING:
2433 case MBX_UNREG_LOGIN:
2434 case MBX_CLEAR_LA:
2435 case MBX_DUMP_CONTEXT:
2436 case MBX_RUN_DIAGS:
2437 case MBX_RESTART:
2438 case MBX_SET_MASK:
2439 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
2440 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2441 "2743 Command 0x%x is illegal in on-line "
2442 "state\n",
2443 mb->mbxCommand);
2444 return -EPERM;
2445 }
2446 case MBX_WRITE_NV:
2447 case MBX_WRITE_VPARMS:
2448 case MBX_LOAD_SM:
2449 case MBX_READ_NV:
2450 case MBX_READ_CONFIG:
2451 case MBX_READ_RCONFIG:
2452 case MBX_READ_STATUS:
2453 case MBX_READ_XRI:
2454 case MBX_READ_REV:
2455 case MBX_READ_LNK_STAT:
2456 case MBX_DUMP_MEMORY:
2457 case MBX_DOWN_LOAD:
2458 case MBX_UPDATE_CFG:
2459 case MBX_KILL_BOARD:
2460 case MBX_LOAD_AREA:
2461 case MBX_LOAD_EXP_ROM:
2462 case MBX_BEACON:
2463 case MBX_DEL_LD_ENTRY:
2464 case MBX_SET_DEBUG:
2465 case MBX_WRITE_WWN:
2466 case MBX_SLI4_CONFIG:
2467 case MBX_READ_EVENT_LOG_STATUS:
2468 case MBX_WRITE_EVENT_LOG:
2469 case MBX_PORT_CAPABILITIES:
2470 case MBX_PORT_IOV_CONTROL:
2471 break;
2472 case MBX_SET_VARIABLE:
2473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2474 "1226 mbox: set_variable 0x%x, 0x%x\n",
2475 mb->un.varWords[0],
2476 mb->un.varWords[1]);
2477 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
2478 && (mb->un.varWords[1] == 1)) {
2479 phba->wait_4_mlo_maint_flg = 1;
2480 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
2481 phba->link_flag &= ~LS_LOOPBACK_MODE;
2482 phba->fc_topology = TOPOLOGY_PT_PT;
2483 }
2484 break;
2485 case MBX_RUN_BIU_DIAG64:
2486 case MBX_READ_EVENT_LOG:
2487 case MBX_READ_SPARM64:
2488 case MBX_READ_LA:
2489 case MBX_READ_LA64:
2490 case MBX_REG_LOGIN:
2491 case MBX_REG_LOGIN64:
2492 case MBX_CONFIG_PORT:
2493 case MBX_RUN_BIU_DIAG:
2494 default:
2495 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2496 "2742 Unknown Command 0x%x\n",
2497 mb->mbxCommand);
2498 return -EPERM;
2499 }
2500
2501 return 0; /* ok */
2502}
2503
2504/**
2505 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2506 * @phba: Pointer to HBA context object.
2507 * @mb: Pointer to a mailbox object.
2508 * @vport: Pointer to a vport object.
2509 *
2510 * Allocate a tracking object, mailbox command memory, get a mailbox
2511 * from the mailbox pool, copy the caller mailbox command.
2512 *
2513 * If offline and the sli is active we need to poll for the command (port is
2514 * being reset) and com-plete the job, otherwise issue the mailbox command and
2515 * let our completion handler finish the command.
2516 **/
2517static uint32_t
2518lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2519 struct lpfc_vport *vport)
2520{
2521 LPFC_MBOXQ_t *pmboxq;
2522 MAILBOX_t *pmb;
2523 MAILBOX_t *mb;
2524 struct bsg_job_data *dd_data;
2525 uint32_t size;
2526 int rc = 0;
2527
2528 /* allocate our bsg tracking structure */
2529 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2530 if (!dd_data) {
2531 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2532 "2727 Failed allocation of dd_data\n");
2533 return -ENOMEM;
2534 }
2535
2536 mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2537 if (!mb) {
2538 kfree(dd_data);
2539 return -ENOMEM;
2540 }
2541
2542 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2543 if (!pmboxq) {
2544 kfree(dd_data);
2545 kfree(mb);
2546 return -ENOMEM;
2547 }
2548
2549 size = job->request_payload.payload_len;
2550 job->reply->reply_payload_rcv_len =
2551 sg_copy_to_buffer(job->request_payload.sg_list,
2552 job->request_payload.sg_cnt,
2553 mb, size);
2554
2555 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2556 if (rc != 0) {
2557 kfree(dd_data);
2558 kfree(mb);
2559 mempool_free(pmboxq, phba->mbox_mem_pool);
2560 return rc; /* must be negative */
2561 }
2562
2563 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2564 pmb = &pmboxq->u.mb;
2565 memcpy(pmb, mb, sizeof(*pmb));
2566 pmb->mbxOwner = OWN_HOST;
2567 pmboxq->context1 = NULL;
2568 pmboxq->vport = vport;
2569
2570 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2571 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2572 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2573 if (rc != MBX_SUCCESS) {
2574 if (rc != MBX_TIMEOUT) {
2575 kfree(dd_data);
2576 kfree(mb);
2577 mempool_free(pmboxq, phba->mbox_mem_pool);
2578 }
2579 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2580 }
2581
2582 memcpy(mb, pmb, sizeof(*pmb));
2583 job->reply->reply_payload_rcv_len =
2584 sg_copy_from_buffer(job->reply_payload.sg_list,
2585 job->reply_payload.sg_cnt,
2586 mb, size);
2587 kfree(dd_data);
2588 kfree(mb);
2589 mempool_free(pmboxq, phba->mbox_mem_pool);
2590 /* not waiting mbox already done */
2591 return 0;
2592 }
2593
2594 /* setup wake call as IOCB callback */
2595 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2596 /* setup context field to pass wait_queue pointer to wake function */
2597 pmboxq->context1 = dd_data;
2598 dd_data->type = TYPE_MBOX;
2599 dd_data->context_un.mbox.pmboxq = pmboxq;
2600 dd_data->context_un.mbox.mb = mb;
2601 dd_data->context_un.mbox.set_job = job;
2602 job->dd_data = dd_data;
2603 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2604 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
2605 kfree(dd_data);
2606 kfree(mb);
2607 mempool_free(pmboxq, phba->mbox_mem_pool);
2608 return -EIO;
2609 }
2610
2611 return 1;
2612}
2613
2614/**
2615 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
2616 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
2617 **/
2618static int
2619lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2620{
2621 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2622 struct lpfc_hba *phba = vport->phba;
2623 int rc = 0;
2624
2625 /* in case no data is transferred */
2626 job->reply->reply_payload_rcv_len = 0;
2627 if (job->request_len <
2628 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
2629 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2630 "2737 Received MBOX_REQ request below "
2631 "minimum size\n");
2632 rc = -EINVAL;
2633 goto job_error;
2634 }
2635
2636 if (job->request_payload.payload_len != PAGE_SIZE) {
2637 rc = -EINVAL;
2638 goto job_error;
2639 }
2640
2641 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2642 rc = -EAGAIN;
2643 goto job_error;
2644 }
2645
2646 rc = lpfc_bsg_issue_mbox(phba, job, vport);
2647
2648job_error:
2649 if (rc == 0) {
2650 /* job done */
2651 job->reply->result = 0;
2652 job->dd_data = NULL;
2653 job->job_done(job);
2654 } else if (rc == 1)
2655 /* job submitted, will complete later*/
2656 rc = 0; /* return zero, no error */
2657 else {
2658 /* some error occurred */
2659 job->reply->result = rc;
2660 job->dd_data = NULL;
2661 }
2662
2663 return rc;
2664}
2665
2666/**
2667 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
2668 * @phba: Pointer to HBA context object.
2669 * @cmdiocbq: Pointer to command iocb.
2670 * @rspiocbq: Pointer to response iocb.
2671 *
2672 * This function is the completion handler for iocbs issued using
2673 * lpfc_menlo_cmd function. This function is called by the
2674 * ring event handler function without any lock held. This function
2675 * can be called from both worker thread context and interrupt
2676 * context. This function also can be called from another thread which
2677 * cleans up the SLI layer objects.
2678 * This function copies the contents of the response iocb to the
2679 * response iocb memory object provided by the caller of
2680 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
2681 * sleeps for the iocb completion.
2682 **/
2683static void
2684lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
2685 struct lpfc_iocbq *cmdiocbq,
2686 struct lpfc_iocbq *rspiocbq)
2687{
2688 struct bsg_job_data *dd_data;
2689 struct fc_bsg_job *job;
2690 IOCB_t *rsp;
2691 struct lpfc_dmabuf *bmp;
2692 struct lpfc_bsg_menlo *menlo;
2693 unsigned long flags;
2694 struct menlo_response *menlo_resp;
2695 int rc = 0;
2696
2697 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2698 dd_data = cmdiocbq->context1;
2699 if (!dd_data) {
2700 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2701 return;
2702 }
2703
2704 menlo = &dd_data->context_un.menlo;
2705 job = menlo->set_job;
2706 job->dd_data = NULL; /* so timeout handler does not reply */
2707
2708 spin_lock_irqsave(&phba->hbalock, flags);
2709 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2710 if (cmdiocbq->context2 && rspiocbq)
2711 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2712 &rspiocbq->iocb, sizeof(IOCB_t));
2713 spin_unlock_irqrestore(&phba->hbalock, flags);
2714
2715 bmp = menlo->bmp;
2716 rspiocbq = menlo->rspiocbq;
2717 rsp = &rspiocbq->iocb;
2718
2719 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2720 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2721 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2722 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2723
2724 /* always return the xri, this would be used in the case
2725 * of a menlo download to allow the data to be sent as a continuation
2726 * of the exchange.
2727 */
2728 menlo_resp = (struct menlo_response *)
2729 job->reply->reply_data.vendor_reply.vendor_rsp;
2730 menlo_resp->xri = rsp->ulpContext;
2731 if (rsp->ulpStatus) {
2732 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
2733 switch (rsp->un.ulpWord[4] & 0xff) {
2734 case IOERR_SEQUENCE_TIMEOUT:
2735 rc = -ETIMEDOUT;
2736 break;
2737 case IOERR_INVALID_RPI:
2738 rc = -EFAULT;
2739 break;
2740 default:
2741 rc = -EACCES;
2742 break;
2743 }
2744 } else
2745 rc = -EACCES;
2746 } else
2747 job->reply->reply_payload_rcv_len =
2748 rsp->un.genreq64.bdl.bdeSize;
2749
2750 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2751 lpfc_sli_release_iocbq(phba, rspiocbq);
2752 lpfc_sli_release_iocbq(phba, cmdiocbq);
2753 kfree(bmp);
2754 kfree(dd_data);
2755 /* make error code available to userspace */
2756 job->reply->result = rc;
2757 /* complete the job back to userspace */
2758 job->job_done(job);
2759 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2760 return;
2761}
2762
2763/**
2764 * lpfc_menlo_cmd - send an ioctl for menlo hardware
2765 * @job: fc_bsg_job to handle
2766 *
2767 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
2768 * all the command completions will return the xri for the command.
2769 * For menlo data requests a gen request 64 CX is used to continue the exchange
2770 * supplied in the menlo request header xri field.
2771 **/
2772static int
2773lpfc_menlo_cmd(struct fc_bsg_job *job)
2774{
2775 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2776 struct lpfc_hba *phba = vport->phba;
2777 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2778 IOCB_t *cmd, *rsp;
2779 int rc = 0;
2780 struct menlo_command *menlo_cmd;
2781 struct menlo_response *menlo_resp;
2782 struct lpfc_dmabuf *bmp = NULL;
2783 int request_nseg;
2784 int reply_nseg;
2785 struct scatterlist *sgel = NULL;
2786 int numbde;
2787 dma_addr_t busaddr;
2788 struct bsg_job_data *dd_data;
2789 struct ulp_bde64 *bpl = NULL;
2790
2791 /* in case no data is returned return just the return code */
2792 job->reply->reply_payload_rcv_len = 0;
2793
2794 if (job->request_len <
2795 sizeof(struct fc_bsg_request) +
2796 sizeof(struct menlo_command)) {
2797 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2798 "2784 Received MENLO_CMD request below "
2799 "minimum size\n");
2800 rc = -ERANGE;
2801 goto no_dd_data;
2802 }
2803
2804 if (job->reply_len <
2805 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
2806 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2807 "2785 Received MENLO_CMD reply below "
2808 "minimum size\n");
2809 rc = -ERANGE;
2810 goto no_dd_data;
2811 }
2812
2813 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
2814 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2815 "2786 Adapter does not support menlo "
2816 "commands\n");
2817 rc = -EPERM;
2818 goto no_dd_data;
2819 }
2820
2821 menlo_cmd = (struct menlo_command *)
2822 job->request->rqst_data.h_vendor.vendor_cmd;
2823
2824 menlo_resp = (struct menlo_response *)
2825 job->reply->reply_data.vendor_reply.vendor_rsp;
2826
2827 /* allocate our bsg tracking structure */
2828 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2829 if (!dd_data) {
2830 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2831 "2787 Failed allocation of dd_data\n");
2832 rc = -ENOMEM;
2833 goto no_dd_data;
2834 }
2835
2836 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2837 if (!bmp) {
2838 rc = -ENOMEM;
2839 goto free_dd;
2840 }
2841
2842 cmdiocbq = lpfc_sli_get_iocbq(phba);
2843 if (!cmdiocbq) {
2844 rc = -ENOMEM;
2845 goto free_bmp;
2846 }
2847
2848 rspiocbq = lpfc_sli_get_iocbq(phba);
2849 if (!rspiocbq) {
2850 rc = -ENOMEM;
2851 goto free_cmdiocbq;
2852 }
2853
2854 rsp = &rspiocbq->iocb;
2855
2856 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
2857 if (!bmp->virt) {
2858 rc = -ENOMEM;
2859 goto free_rspiocbq;
2860 }
2861
2862 INIT_LIST_HEAD(&bmp->list);
2863 bpl = (struct ulp_bde64 *) bmp->virt;
2864 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
2865 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2866 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
2867 busaddr = sg_dma_address(sgel);
2868 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2869 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2870 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2871 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2872 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2873 bpl++;
2874 }
2875
2876 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
2877 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2878 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
2879 busaddr = sg_dma_address(sgel);
2880 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2881 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2882 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2883 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2884 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2885 bpl++;
2886 }
2887
2888 cmd = &cmdiocbq->iocb;
2889 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
2890 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
2891 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
2892 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2893 cmd->un.genreq64.bdl.bdeSize =
2894 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
2895 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
2896 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
2897 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
2898 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
2899 cmd->ulpBdeCount = 1;
2900 cmd->ulpClass = CLASS3;
2901 cmd->ulpOwner = OWN_CHIP;
2902 cmd->ulpLe = 1; /* Limited Edition */
2903 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2904 cmdiocbq->vport = phba->pport;
2905 /* We want the firmware to timeout before we do */
2906 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
2907 cmdiocbq->context3 = bmp;
2908 cmdiocbq->context2 = rspiocbq;
2909 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
2910 cmdiocbq->context1 = dd_data;
2911 cmdiocbq->context2 = rspiocbq;
2912 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
2913 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
2914 cmd->ulpPU = MENLO_PU; /* 3 */
2915 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
2916 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
2917 } else {
2918 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
2919 cmd->ulpPU = 1;
2920 cmd->un.ulpWord[4] = 0;
2921 cmd->ulpContext = menlo_cmd->xri;
2922 }
2923
2924 dd_data->type = TYPE_MENLO;
2925 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
2926 dd_data->context_un.menlo.rspiocbq = rspiocbq;
2927 dd_data->context_un.menlo.set_job = job;
2928 dd_data->context_un.menlo.bmp = bmp;
2929
2930 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2931 MENLO_TIMEOUT - 5);
2932 if (rc == IOCB_SUCCESS)
2933 return 0; /* done for now */
2934
2935 /* iocb failed so cleanup */
2936 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2937 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2938 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2939 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2940
2941 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2942
2943free_rspiocbq:
2944 lpfc_sli_release_iocbq(phba, rspiocbq);
2945free_cmdiocbq:
2946 lpfc_sli_release_iocbq(phba, cmdiocbq);
2947free_bmp:
2948 kfree(bmp);
2949free_dd:
2950 kfree(dd_data);
2951no_dd_data:
2952 /* make error code available to userspace */
2953 job->reply->result = rc;
2954 job->dd_data = NULL;
2955 return rc;
2956}
2957/**
834 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 2958 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
835 * @job: fc_bsg_job to handle 2959 * @job: fc_bsg_job to handle
836 */ 2960 **/
837static int 2961static int
838lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 2962lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
839{ 2963{
840 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 2964 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
2965 int rc;
841 2966
842 switch (command) { 2967 switch (command) {
843 case LPFC_BSG_VENDOR_SET_CT_EVENT: 2968 case LPFC_BSG_VENDOR_SET_CT_EVENT:
844 return lpfc_bsg_set_event(job); 2969 rc = lpfc_bsg_hba_set_event(job);
845 break; 2970 break;
846
847 case LPFC_BSG_VENDOR_GET_CT_EVENT: 2971 case LPFC_BSG_VENDOR_GET_CT_EVENT:
848 return lpfc_bsg_get_event(job); 2972 rc = lpfc_bsg_hba_get_event(job);
2973 break;
2974 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
2975 rc = lpfc_bsg_send_mgmt_rsp(job);
2976 break;
2977 case LPFC_BSG_VENDOR_DIAG_MODE:
2978 rc = lpfc_bsg_diag_mode(job);
2979 break;
2980 case LPFC_BSG_VENDOR_DIAG_TEST:
2981 rc = lpfc_bsg_diag_test(job);
2982 break;
2983 case LPFC_BSG_VENDOR_GET_MGMT_REV:
2984 rc = lpfc_bsg_get_dfc_rev(job);
2985 break;
2986 case LPFC_BSG_VENDOR_MBOX:
2987 rc = lpfc_bsg_mbox_cmd(job);
2988 break;
2989 case LPFC_BSG_VENDOR_MENLO_CMD:
2990 case LPFC_BSG_VENDOR_MENLO_DATA:
2991 rc = lpfc_menlo_cmd(job);
849 break; 2992 break;
850
851 default: 2993 default:
852 return -EINVAL; 2994 rc = -EINVAL;
2995 job->reply->reply_payload_rcv_len = 0;
2996 /* make error code available to userspace */
2997 job->reply->result = rc;
2998 break;
853 } 2999 }
3000
3001 return rc;
854} 3002}
855 3003
856/** 3004/**
857 * lpfc_bsg_request - handle a bsg request from the FC transport 3005 * lpfc_bsg_request - handle a bsg request from the FC transport
858 * @job: fc_bsg_job to handle 3006 * @job: fc_bsg_job to handle
859 */ 3007 **/
860int 3008int
861lpfc_bsg_request(struct fc_bsg_job *job) 3009lpfc_bsg_request(struct fc_bsg_job *job)
862{ 3010{
863 uint32_t msgcode; 3011 uint32_t msgcode;
864 int rc = -EINVAL; 3012 int rc;
865 3013
866 msgcode = job->request->msgcode; 3014 msgcode = job->request->msgcode;
867
868 switch (msgcode) { 3015 switch (msgcode) {
869 case FC_BSG_HST_VENDOR: 3016 case FC_BSG_HST_VENDOR:
870 rc = lpfc_bsg_hst_vendor(job); 3017 rc = lpfc_bsg_hst_vendor(job);
@@ -873,9 +3020,13 @@ lpfc_bsg_request(struct fc_bsg_job *job)
873 rc = lpfc_bsg_rport_els(job); 3020 rc = lpfc_bsg_rport_els(job);
874 break; 3021 break;
875 case FC_BSG_RPT_CT: 3022 case FC_BSG_RPT_CT:
876 rc = lpfc_bsg_rport_ct(job); 3023 rc = lpfc_bsg_send_mgmt_cmd(job);
877 break; 3024 break;
878 default: 3025 default:
3026 rc = -EINVAL;
3027 job->reply->reply_payload_rcv_len = 0;
3028 /* make error code available to userspace */
3029 job->reply->result = rc;
879 break; 3030 break;
880 } 3031 }
881 3032
@@ -888,17 +3039,83 @@ lpfc_bsg_request(struct fc_bsg_job *job)
888 * 3039 *
889 * This function just aborts the job's IOCB. The aborted IOCB will return to 3040 * This function just aborts the job's IOCB. The aborted IOCB will return to
890 * the waiting function which will handle passing the error back to userspace 3041 * the waiting function which will handle passing the error back to userspace
891 */ 3042 **/
892int 3043int
893lpfc_bsg_timeout(struct fc_bsg_job *job) 3044lpfc_bsg_timeout(struct fc_bsg_job *job)
894{ 3045{
895 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3046 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
896 struct lpfc_hba *phba = vport->phba; 3047 struct lpfc_hba *phba = vport->phba;
897 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data; 3048 struct lpfc_iocbq *cmdiocb;
3049 struct lpfc_bsg_event *evt;
3050 struct lpfc_bsg_iocb *iocb;
3051 struct lpfc_bsg_mbox *mbox;
3052 struct lpfc_bsg_menlo *menlo;
898 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3053 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3054 struct bsg_job_data *dd_data;
3055 unsigned long flags;
3056
3057 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3058 dd_data = (struct bsg_job_data *)job->dd_data;
3059 /* timeout and completion crossed paths if no dd_data */
3060 if (!dd_data) {
3061 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3062 return 0;
3063 }
899 3064
900 if (cmdiocb) 3065 switch (dd_data->type) {
3066 case TYPE_IOCB:
3067 iocb = &dd_data->context_un.iocb;
3068 cmdiocb = iocb->cmdiocbq;
3069 /* hint to completion handler that the job timed out */
3070 job->reply->result = -EAGAIN;
3071 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3072 /* this will call our completion handler */
3073 spin_lock_irq(&phba->hbalock);
3074 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3075 spin_unlock_irq(&phba->hbalock);
3076 break;
3077 case TYPE_EVT:
3078 evt = dd_data->context_un.evt;
3079 /* this event has no job anymore */
3080 evt->set_job = NULL;
3081 job->dd_data = NULL;
3082 job->reply->reply_payload_rcv_len = 0;
3083 /* Return -EAGAIN which is our way of signallying the
3084 * app to retry.
3085 */
3086 job->reply->result = -EAGAIN;
3087 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3088 job->job_done(job);
3089 break;
3090 case TYPE_MBOX:
3091 mbox = &dd_data->context_un.mbox;
3092 /* this mbox has no job anymore */
3093 mbox->set_job = NULL;
3094 job->dd_data = NULL;
3095 job->reply->reply_payload_rcv_len = 0;
3096 job->reply->result = -EAGAIN;
3097 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3098 job->job_done(job);
3099 break;
3100 case TYPE_MENLO:
3101 menlo = &dd_data->context_un.menlo;
3102 cmdiocb = menlo->cmdiocbq;
3103 /* hint to completion handler that the job timed out */
3104 job->reply->result = -EAGAIN;
3105 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3106 /* this will call our completion handler */
3107 spin_lock_irq(&phba->hbalock);
901 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 3108 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3109 spin_unlock_irq(&phba->hbalock);
3110 break;
3111 default:
3112 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3113 break;
3114 }
902 3115
3116 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
3117 * otherwise an error message will be displayed on the console
3118 * so always return success (zero)
3119 */
903 return 0; 3120 return 0;
904} 3121}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
new file mode 100644
index 000000000000..5bc630819b9e
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -0,0 +1,110 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20/* bsg definitions
21 * No pointers to user data are allowed, all application buffers and sizes will
22 * derived through the bsg interface.
23 *
24 * These are the vendor unique structures passed in using the bsg
25 * FC_BSG_HST_VENDOR message code type.
26 */
27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
30#define LPFC_BSG_VENDOR_DIAG_MODE 4
31#define LPFC_BSG_VENDOR_DIAG_TEST 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7
34#define LPFC_BSG_VENDOR_MENLO_CMD 8
35#define LPFC_BSG_VENDOR_MENLO_DATA 9
36
37struct set_ct_event {
38 uint32_t command;
39 uint32_t type_mask;
40 uint32_t ev_req_id;
41 uint32_t ev_reg_id;
42};
43
44struct get_ct_event {
45 uint32_t command;
46 uint32_t ev_reg_id;
47 uint32_t ev_req_id;
48};
49
50struct get_ct_event_reply {
51 uint32_t immed_data;
52 uint32_t type;
53};
54
55struct send_mgmt_resp {
56 uint32_t command;
57 uint32_t tag;
58};
59
60
61#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
62#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
63
64struct diag_mode_set {
65 uint32_t command;
66 uint32_t type;
67 uint32_t timeout;
68};
69
70struct diag_mode_test {
71 uint32_t command;
72};
73
74#define LPFC_WWNN_TYPE 0
75#define LPFC_WWPN_TYPE 1
76
77struct get_mgmt_rev {
78 uint32_t command;
79};
80
81#define MANAGEMENT_MAJOR_REV 1
82#define MANAGEMENT_MINOR_REV 0
83
84/* the MgmtRevInfo structure */
85struct MgmtRevInfo {
86 uint32_t a_Major;
87 uint32_t a_Minor;
88};
89
90struct get_mgmt_rev_reply {
91 struct MgmtRevInfo info;
92};
93
94struct dfc_mbox_req {
95 uint32_t command;
96 uint32_t inExtWLen;
97 uint32_t outExtWLen;
98 uint8_t mbOffset;
99};
100
101/* Used for menlo command or menlo data. The xri is only used for menlo data */
102struct menlo_command {
103 uint32_t cmd;
104 uint32_t xri;
105};
106
107struct menlo_response {
108 uint32_t xri; /* return the xri of the iocb exchange */
109};
110
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 0830f37409a3..5087c4211b43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -44,16 +44,27 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
44void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
45void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
46void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 46void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
47void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
48 struct lpfc_nodelist *);
47void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 49void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
48void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 50void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
49void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); 51void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
52void lpfc_supported_pages(struct lpfcMboxq *);
53void lpfc_sli4_params(struct lpfcMboxq *);
54int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
50 55
51struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 56struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
57void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
58void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
52void lpfc_cleanup_rpis(struct lpfc_vport *, int); 59void lpfc_cleanup_rpis(struct lpfc_vport *, int);
60void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
53int lpfc_linkdown(struct lpfc_hba *); 61int lpfc_linkdown(struct lpfc_hba *);
54void lpfc_linkdown_port(struct lpfc_vport *); 62void lpfc_linkdown_port(struct lpfc_vport *);
55void lpfc_port_link_failure(struct lpfc_vport *); 63void lpfc_port_link_failure(struct lpfc_vport *);
56void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
66void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
67void lpfc_retry_pport_discovery(struct lpfc_hba *);
57 68
58void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 69void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
59void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); 70void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -71,6 +82,7 @@ void lpfc_set_disctmo(struct lpfc_vport *);
71int lpfc_can_disctmo(struct lpfc_vport *); 82int lpfc_can_disctmo(struct lpfc_vport *);
72int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *); 83int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
73void lpfc_unreg_all_rpis(struct lpfc_vport *); 84void lpfc_unreg_all_rpis(struct lpfc_vport *);
85void lpfc_unreg_hba_rpis(struct lpfc_hba *);
74void lpfc_unreg_default_rpis(struct lpfc_vport *); 86void lpfc_unreg_default_rpis(struct lpfc_vport *);
75void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *); 87void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
76 88
@@ -97,7 +109,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
97 109
98void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); 110void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
99int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, 111int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
100 struct serv_parm *, uint32_t); 112 struct serv_parm *, uint32_t, int);
101int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); 113int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
102void lpfc_more_plogi(struct lpfc_vport *); 114void lpfc_more_plogi(struct lpfc_vport *);
103void lpfc_more_adisc(struct lpfc_vport *); 115void lpfc_more_adisc(struct lpfc_vport *);
@@ -144,6 +156,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
144 156
145void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 157void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
146 struct lpfc_iocbq *); 158 struct lpfc_iocbq *);
159void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
160 struct lpfc_iocbq *);
147int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); 161int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
148int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); 162int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
149void lpfc_fdmi_tmo(unsigned long); 163void lpfc_fdmi_tmo(unsigned long);
@@ -188,11 +202,12 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
188void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); 202void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
189void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); 203void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
190void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); 204void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
191void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); 205void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
192void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); 206void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
193void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); 207void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
194void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); 208void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
195int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); 209int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
210void lpfc_issue_init_vpi(struct lpfc_vport *);
196 211
197void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 212void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
198 uint32_t , LPFC_MBOXQ_t *); 213 uint32_t , LPFC_MBOXQ_t *);
@@ -202,7 +217,15 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
202void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); 217void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
203void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, 218void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
204 uint16_t); 219 uint16_t);
220void lpfc_unregister_fcf(struct lpfc_hba *);
221void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
205void lpfc_unregister_unused_fcf(struct lpfc_hba *); 222void lpfc_unregister_unused_fcf(struct lpfc_hba *);
223int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
224void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
225void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
226uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
227int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
228void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
206 229
207int lpfc_mem_alloc(struct lpfc_hba *, int align); 230int lpfc_mem_alloc(struct lpfc_hba *, int align);
208void lpfc_mem_free(struct lpfc_hba *); 231void lpfc_mem_free(struct lpfc_hba *);
@@ -212,7 +235,10 @@ void lpfc_stop_vport_timers(struct lpfc_vport *);
212void lpfc_poll_timeout(unsigned long ptr); 235void lpfc_poll_timeout(unsigned long ptr);
213void lpfc_poll_start_timer(struct lpfc_hba *); 236void lpfc_poll_start_timer(struct lpfc_hba *);
214void lpfc_poll_eratt(unsigned long); 237void lpfc_poll_eratt(unsigned long);
215void lpfc_sli_poll_fcp_ring(struct lpfc_hba *); 238int
239lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
240 struct lpfc_sli_ring *, uint32_t);
241
216struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 242struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
217void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); 243void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
218uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 244uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -235,7 +261,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
235int lpfc_sli_check_eratt(struct lpfc_hba *); 261int lpfc_sli_check_eratt(struct lpfc_hba *);
236void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 262void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
237 struct lpfc_sli_ring *, uint32_t); 263 struct lpfc_sli_ring *, uint32_t);
238int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); 264void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
239void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 265void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
240int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, 266int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, uint32_t); 267 struct lpfc_iocbq *, uint32_t);
@@ -358,10 +384,13 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
358void lpfc_create_static_vport(struct lpfc_hba *); 384void lpfc_create_static_vport(struct lpfc_hba *);
359void lpfc_stop_hba_timers(struct lpfc_hba *); 385void lpfc_stop_hba_timers(struct lpfc_hba *);
360void lpfc_stop_port(struct lpfc_hba *); 386void lpfc_stop_port(struct lpfc_hba *);
387void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
388void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
361void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); 389void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
362int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 390int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
363void lpfc_start_fdiscs(struct lpfc_hba *phba); 391void lpfc_start_fdiscs(struct lpfc_hba *phba);
364 392struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
393struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
365#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 394#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
366#define HBA_EVENT_RSCN 5 395#define HBA_EVENT_RSCN 5
367#define HBA_EVENT_LINK_UP 2 396#define HBA_EVENT_LINK_UP 2
@@ -370,5 +399,5 @@ void lpfc_start_fdiscs(struct lpfc_hba *phba);
370/* functions to support SGIOv4/bsg interface */ 399/* functions to support SGIOv4/bsg interface */
371int lpfc_bsg_request(struct fc_bsg_job *); 400int lpfc_bsg_request(struct fc_bsg_job *);
372int lpfc_bsg_timeout(struct fc_bsg_job *); 401int lpfc_bsg_timeout(struct fc_bsg_job *);
373void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 402int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
374 struct lpfc_iocbq *); 403 struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9a1bd9534d74..463b74902ac4 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -25,12 +25,14 @@
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/slab.h>
28#include <linux/utsname.h> 29#include <linux/utsname.h>
29 30
30#include <scsi/scsi.h> 31#include <scsi/scsi.h>
31#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include <scsi/fc/fc_fs.h>
34 36
35#include "lpfc_hw4.h" 37#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 38#include "lpfc_hw.h"
@@ -87,7 +89,6 @@ void
87lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 89lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
88 struct lpfc_iocbq *piocbq) 90 struct lpfc_iocbq *piocbq)
89{ 91{
90
91 struct lpfc_dmabuf *mp = NULL; 92 struct lpfc_dmabuf *mp = NULL;
92 IOCB_t *icmd = &piocbq->iocb; 93 IOCB_t *icmd = &piocbq->iocb;
93 int i; 94 int i;
@@ -97,7 +98,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
97 struct list_head head; 98 struct list_head head;
98 struct lpfc_dmabuf *bdeBuf; 99 struct lpfc_dmabuf *bdeBuf;
99 100
100 lpfc_bsg_ct_unsol_event(phba, pring, piocbq); 101 if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
102 return;
101 103
102 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { 104 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
103 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 105 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -160,6 +162,40 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
160 } 162 }
161} 163}
162 164
165/**
166 * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
167 * @phba: Pointer to HBA context object.
168 * @pring: Pointer to the driver internal I/O ring.
169 * @piocbq: Pointer to the IOCBQ.
170 *
171 * This function serves as the default handler for the sli4 unsolicited
172 * abort event. It shall be invoked when there is no application interface
173 * registered unsolicited abort handler. This handler does nothing but
174 * just simply releases the dma buffer used by the unsol abort event.
175 **/
176void
177lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
178 struct lpfc_sli_ring *pring,
179 struct lpfc_iocbq *piocbq)
180{
181 IOCB_t *icmd = &piocbq->iocb;
182 struct lpfc_dmabuf *bdeBuf;
183 uint32_t size;
184
185 /* Forward abort event to any process registered to receive ct event */
186 if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
187 return;
188
189 /* If there is no BDE associated with IOCB, there is nothing to do */
190 if (icmd->ulpBdeCount == 0)
191 return;
192 bdeBuf = piocbq->context2;
193 piocbq->context2 = NULL;
194 size = icmd->un.cont64[0].tus.f.bdeSize;
195 lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
196 lpfc_in_buf_free(phba, bdeBuf);
197}
198
163static void 199static void
164lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 200lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
165{ 201{
@@ -304,8 +340,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
304 /* Fill in rest of iocb */ 340 /* Fill in rest of iocb */
305 icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 341 icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
306 icmd->un.genreq64.w5.hcsw.Dfctl = 0; 342 icmd->un.genreq64.w5.hcsw.Dfctl = 0;
307 icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; 343 icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
308 icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; 344 icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
309 345
310 if (!tmo) { 346 if (!tmo) {
311 /* FC spec states we need 3 * ratov for CT requests */ 347 /* FC spec states we need 3 * ratov for CT requests */
@@ -363,9 +399,14 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
363 outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); 399 outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
364 if (!outmp) 400 if (!outmp)
365 return -ENOMEM; 401 return -ENOMEM;
366 402 /*
403 * Form the CT IOCB. The total number of BDEs in this IOCB
404 * is the single command plus response count from
405 * lpfc_alloc_ct_rsp.
406 */
407 cnt += 1;
367 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, 408 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
368 cnt+1, 0, retry); 409 cnt, 0, retry);
369 if (status) { 410 if (status) {
370 lpfc_free_ct_rsp(phba, outmp); 411 lpfc_free_ct_rsp(phba, outmp);
371 return -ENOMEM; 412 return -ENOMEM;
@@ -501,6 +542,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
501 SLI_CTNS_GFF_ID, 542 SLI_CTNS_GFF_ID,
502 0, Did) == 0) 543 0, Did) == 0)
503 vport->num_disc_nodes++; 544 vport->num_disc_nodes++;
545 else
546 lpfc_setup_disc_node
547 (vport, Did);
504 } 548 }
505 else { 549 else {
506 lpfc_debugfs_disc_trc(vport, 550 lpfc_debugfs_disc_trc(vport,
@@ -1209,7 +1253,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1209 be16_to_cpu(SLI_CTNS_RFF_ID); 1253 be16_to_cpu(SLI_CTNS_RFF_ID);
1210 CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); 1254 CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
1211 CtReq->un.rff.fbits = FC4_FEATURE_INIT; 1255 CtReq->un.rff.fbits = FC4_FEATURE_INIT;
1212 CtReq->un.rff.type_code = FC_FCP_DATA; 1256 CtReq->un.rff.type_code = FC_TYPE_FCP;
1213 cmpl = lpfc_cmpl_ct_cmd_rff_id; 1257 cmpl = lpfc_cmpl_ct_cmd_rff_id;
1214 break; 1258 break;
1215 } 1259 }
@@ -1802,12 +1846,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1802 c = (rev & 0x0000ff00) >> 8; 1846 c = (rev & 0x0000ff00) >> 8;
1803 b4 = (rev & 0x000000ff); 1847 b4 = (rev & 0x000000ff);
1804 1848
1805 if (flag) 1849 sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
1806 sprintf(fwrevision, "%d.%d%d%c%d ", b1,
1807 b2, b3, c, b4);
1808 else
1809 sprintf(fwrevision, "%d.%d%d%c%d ", b1,
1810 b2, b3, c, b4);
1811 } 1850 }
1812 return; 1851 return;
1813} 1852}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8d0f0de76b63..a80d938fafc9 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -24,6 +24,7 @@
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/kthread.h> 26#include <linux/kthread.h>
27#include <linux/slab.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
28#include <linux/spinlock.h> 29#include <linux/spinlock.h>
29#include <linux/ctype.h> 30#include <linux/ctype.h>
@@ -926,7 +927,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
926 goto out; 927 goto out;
927 928
928 /* Round to page boundry */ 929 /* Round to page boundry */
929 printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n", 930 printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
930 __func__, _dump_buf_data); 931 __func__, _dump_buf_data);
931 debug->buffer = _dump_buf_data; 932 debug->buffer = _dump_buf_data;
932 if (!debug->buffer) { 933 if (!debug->buffer) {
@@ -956,8 +957,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
956 goto out; 957 goto out;
957 958
958 /* Round to page boundry */ 959 /* Round to page boundry */
959 printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__, 960 printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n",
960 _dump_buf_dif, file->f_dentry->d_name.name); 961 __func__, _dump_buf_dif, file->f_dentry->d_name.name);
961 debug->buffer = _dump_buf_dif; 962 debug->buffer = _dump_buf_dif;
962 if (!debug->buffer) { 963 if (!debug->buffer) {
963 kfree(debug); 964 kfree(debug);
@@ -1377,7 +1378,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1377 debugfs_create_dir(name, phba->hba_debugfs_root); 1378 debugfs_create_dir(name, phba->hba_debugfs_root);
1378 if (!vport->vport_debugfs_root) { 1379 if (!vport->vport_debugfs_root) {
1379 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1380 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1380 "0417 Cant create debugfs"); 1381 "0417 Cant create debugfs\n");
1381 goto debug_failed; 1382 goto debug_failed;
1382 } 1383 }
1383 atomic_inc(&phba->debugfs_vport_count); 1384 atomic_inc(&phba->debugfs_vport_count);
@@ -1430,7 +1431,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1430 vport, &lpfc_debugfs_op_nodelist); 1431 vport, &lpfc_debugfs_op_nodelist);
1431 if (!vport->debug_nodelist) { 1432 if (!vport->debug_nodelist) {
1432 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1433 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1433 "0409 Cant create debugfs nodelist"); 1434 "0409 Cant create debugfs nodelist\n");
1434 goto debug_failed; 1435 goto debug_failed;
1435 } 1436 }
1436debug_failed: 1437debug_failed:
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1142070e9484..2851d75ffc6f 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -19,7 +19,7 @@
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */ 21#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */
22#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */ 22#define FC_MAX_NS_RSP 64512 /* max size NameServer rsp */
23#define FC_MAXLOOP 126 /* max devices supported on a fc loop */ 23#define FC_MAXLOOP 126 /* max devices supported on a fc loop */
24#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */ 24#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */
25 25
@@ -105,8 +105,6 @@ struct lpfc_nodelist {
105 struct lpfc_vport *vport; 105 struct lpfc_vport *vport;
106 struct lpfc_work_evt els_retry_evt; 106 struct lpfc_work_evt els_retry_evt;
107 struct lpfc_work_evt dev_loss_evt; 107 struct lpfc_work_evt dev_loss_evt;
108 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
109 unsigned long last_q_full_time; /* jiffy of last queue full */
110 struct kref kref; 108 struct kref kref;
111 atomic_t cmd_pending; 109 atomic_t cmd_pending;
112 uint32_t cmd_qdepth; 110 uint32_t cmd_qdepth;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 45337cd23feb..5fbdb22c1899 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -21,6 +21,7 @@
21/* See Fibre Channel protocol T11 FC-LS for details */ 21/* See Fibre Channel protocol T11 FC-LS for details */
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
@@ -50,9 +51,6 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
50 struct lpfc_nodelist *ndlp, uint8_t retry); 51 struct lpfc_nodelist *ndlp, uint8_t retry);
51static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 52static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
52 struct lpfc_iocbq *iocb); 53 struct lpfc_iocbq *iocb);
53static void lpfc_register_new_vport(struct lpfc_hba *phba,
54 struct lpfc_vport *vport,
55 struct lpfc_nodelist *ndlp);
56 54
57static int lpfc_max_els_tries = 3; 55static int lpfc_max_els_tries = 3;
58 56
@@ -173,13 +171,26 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 171 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */ 172 */
175 if ((did == Fabric_DID) && 173 if ((did == Fabric_DID) &&
176 bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) && 174 (phba->hba_flag & HBA_FIP_SUPPORT) &&
177 ((elscmd == ELS_CMD_FLOGI) || 175 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) || 176 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO))) 177 (elscmd == ELS_CMD_LOGO)))
180 elsiocb->iocb_flag |= LPFC_FIP_ELS; 178 switch (elscmd) {
179 case ELS_CMD_FLOGI:
180 elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
181 & LPFC_FIP_ELS_ID_MASK);
182 break;
183 case ELS_CMD_FDISC:
184 elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
185 & LPFC_FIP_ELS_ID_MASK);
186 break;
187 case ELS_CMD_LOGO:
188 elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
189 & LPFC_FIP_ELS_ID_MASK);
190 break;
191 }
181 else 192 else
182 elsiocb->iocb_flag &= ~LPFC_FIP_ELS; 193 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
183 194
184 icmd = &elsiocb->iocb; 195 icmd = &elsiocb->iocb;
185 196
@@ -579,6 +590,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
579 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 590 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
580 spin_unlock_irq(shost->host_lock); 591 spin_unlock_irq(shost->host_lock);
581 } 592 }
593 /*
594 * If VPI is unreged, driver need to do INIT_VPI
595 * before re-registering
596 */
597 if (phba->sli_rev == LPFC_SLI_REV4) {
598 spin_lock_irq(shost->host_lock);
599 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
600 spin_unlock_irq(shost->host_lock);
601 }
582 } 602 }
583 603
584 if (phba->sli_rev < LPFC_SLI_REV4) { 604 if (phba->sli_rev < LPFC_SLI_REV4) {
@@ -591,10 +611,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
591 } else { 611 } else {
592 ndlp->nlp_type |= NLP_FABRIC; 612 ndlp->nlp_type |= NLP_FABRIC;
593 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 613 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
594 if (vport->vfi_state & LPFC_VFI_REGISTERED) { 614 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
615 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
595 lpfc_start_fdiscs(phba); 616 lpfc_start_fdiscs(phba);
596 lpfc_do_scr_ns_plogi(phba, vport); 617 lpfc_do_scr_ns_plogi(phba, vport);
597 } else 618 } else if (vport->fc_flag & FC_VFI_REGISTERED)
619 lpfc_issue_init_vpi(vport);
620 else
598 lpfc_issue_reg_vfi(vport); 621 lpfc_issue_reg_vfi(vport);
599 } 622 }
600 return 0; 623 return 0;
@@ -749,6 +772,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
749 struct lpfc_nodelist *ndlp = cmdiocb->context1; 772 struct lpfc_nodelist *ndlp = cmdiocb->context1;
750 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 773 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
751 struct serv_parm *sp; 774 struct serv_parm *sp;
775 uint16_t fcf_index;
752 int rc; 776 int rc;
753 777
754 /* Check to see if link went down during discovery */ 778 /* Check to see if link went down during discovery */
@@ -766,6 +790,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
766 vport->port_state); 790 vport->port_state);
767 791
768 if (irsp->ulpStatus) { 792 if (irsp->ulpStatus) {
793 /*
794 * In case of FIP mode, perform round robin FCF failover
795 * due to new FCF discovery
796 */
797 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
798 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
799 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
800 "2611 FLOGI failed on registered "
801 "FCF record fcf_index:%d, trying "
802 "to perform round robin failover\n",
803 phba->fcf.current_rec.fcf_indx);
804 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
805 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
806 /*
807 * Exhausted the eligible FCF record list,
808 * fail through to retry FLOGI on current
809 * FCF record.
810 */
811 lpfc_printf_log(phba, KERN_WARNING,
812 LOG_FIP | LOG_ELS,
813 "2760 FLOGI exhausted FCF "
814 "round robin failover list, "
815 "retry FLOGI on the current "
816 "registered FCF index:%d\n",
817 phba->fcf.current_rec.fcf_indx);
818 spin_lock_irq(&phba->hbalock);
819 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
820 spin_unlock_irq(&phba->hbalock);
821 } else {
822 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
823 fcf_index);
824 if (rc) {
825 lpfc_printf_log(phba, KERN_WARNING,
826 LOG_FIP | LOG_ELS,
827 "2761 FLOGI round "
828 "robin FCF failover "
829 "read FCF failed "
830 "rc:x%x, fcf_index:"
831 "%d\n", rc,
832 phba->fcf.current_rec.fcf_indx);
833 spin_lock_irq(&phba->hbalock);
834 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
835 spin_unlock_irq(&phba->hbalock);
836 } else
837 goto out;
838 }
839 }
840
769 /* Check for retry */ 841 /* Check for retry */
770 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 842 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
771 goto out; 843 goto out;
@@ -784,13 +856,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
784 } 856 }
785 857
786 /* FLOGI failure */ 858 /* FLOGI failure */
787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 859 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
788 "0100 FLOGI failure Data: x%x x%x " 860 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
789 "x%x\n",
790 irsp->ulpStatus, irsp->un.ulpWord[4], 861 irsp->ulpStatus, irsp->un.ulpWord[4],
791 irsp->ulpTimeout); 862 irsp->ulpTimeout);
792 goto flogifail; 863 goto flogifail;
793 } 864 }
865 spin_lock_irq(shost->host_lock);
866 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
867 spin_unlock_irq(shost->host_lock);
794 868
795 /* 869 /*
796 * The FLogI succeeded. Sync the data for the CPU before 870 * The FLogI succeeded. Sync the data for the CPU before
@@ -802,7 +876,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
802 876
803 /* FLOGI completes successfully */ 877 /* FLOGI completes successfully */
804 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 878 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
805 "0101 FLOGI completes sucessfully " 879 "0101 FLOGI completes successfully "
806 "Data: x%x x%x x%x x%x\n", 880 "Data: x%x x%x x%x x%x\n",
807 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 881 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
808 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 882 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
@@ -817,8 +891,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
817 else 891 else
818 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 892 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
819 893
820 if (!rc) 894 if (!rc) {
895 /* Mark the FCF discovery process done */
896 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
897 "2769 FLOGI successful on FCF record: "
898 "current_fcf_index:x%x, terminate FCF "
899 "round robin failover process\n",
900 phba->fcf.current_rec.fcf_indx);
901 spin_lock_irq(&phba->hbalock);
902 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
903 spin_unlock_irq(&phba->hbalock);
821 goto out; 904 goto out;
905 }
822 } 906 }
823 907
824flogifail: 908flogifail:
@@ -956,7 +1040,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
956 * function returns, it does not guarantee all the IOCBs are actually aborted. 1040 * function returns, it does not guarantee all the IOCBs are actually aborted.
957 * 1041 *
958 * Return code 1042 * Return code
959 * 0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0) 1043 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
960 **/ 1044 **/
961int 1045int
962lpfc_els_abort_flogi(struct lpfc_hba *phba) 1046lpfc_els_abort_flogi(struct lpfc_hba *phba)
@@ -1384,6 +1468,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1384 goto out; 1468 goto out;
1385 } 1469 }
1386 /* PLOGI failed */ 1470 /* PLOGI failed */
1471 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1472 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1473 ndlp->nlp_DID, irsp->ulpStatus,
1474 irsp->un.ulpWord[4]);
1387 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1475 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1388 if (lpfc_error_lost_link(irsp)) 1476 if (lpfc_error_lost_link(irsp))
1389 rc = NLP_STE_FREED_NODE; 1477 rc = NLP_STE_FREED_NODE;
@@ -1552,6 +1640,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1552 goto out; 1640 goto out;
1553 } 1641 }
1554 /* PRLI failed */ 1642 /* PRLI failed */
1643 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1644 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1645 ndlp->nlp_DID, irsp->ulpStatus,
1646 irsp->un.ulpWord[4]);
1555 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1647 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1556 if (lpfc_error_lost_link(irsp)) 1648 if (lpfc_error_lost_link(irsp))
1557 goto out; 1649 goto out;
@@ -1835,6 +1927,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1835 goto out; 1927 goto out;
1836 } 1928 }
1837 /* ADISC failed */ 1929 /* ADISC failed */
1930 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1931 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
1932 ndlp->nlp_DID, irsp->ulpStatus,
1933 irsp->un.ulpWord[4]);
1838 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1934 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1839 if (!lpfc_error_lost_link(irsp)) 1935 if (!lpfc_error_lost_link(irsp))
1840 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1936 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -1984,6 +2080,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1984 /* ELS command is being retried */ 2080 /* ELS command is being retried */
1985 goto out; 2081 goto out;
1986 /* LOGO failed */ 2082 /* LOGO failed */
2083 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2084 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2085 ndlp->nlp_DID, irsp->ulpStatus,
2086 irsp->un.ulpWord[4]);
1987 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2087 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1988 if (lpfc_error_lost_link(irsp)) 2088 if (lpfc_error_lost_link(irsp))
1989 goto out; 2089 goto out;
@@ -2452,6 +2552,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2452 */ 2552 */
2453 del_timer_sync(&ndlp->nlp_delayfunc); 2553 del_timer_sync(&ndlp->nlp_delayfunc);
2454 retry = ndlp->nlp_retry; 2554 retry = ndlp->nlp_retry;
2555 ndlp->nlp_retry = 0;
2455 2556
2456 switch (cmd) { 2557 switch (cmd) {
2457 case ELS_CMD_FLOGI: 2558 case ELS_CMD_FLOGI:
@@ -2706,17 +2807,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2706 if (did == FDMI_DID) 2807 if (did == FDMI_DID)
2707 retry = 1; 2808 retry = 1;
2708 2809
2709 if ((cmd == ELS_CMD_FLOGI) && 2810 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
2710 (phba->fc_topology != TOPOLOGY_LOOP) && 2811 (phba->fc_topology != TOPOLOGY_LOOP) &&
2711 !lpfc_error_lost_link(irsp)) { 2812 !lpfc_error_lost_link(irsp)) {
2712 /* FLOGI retry policy */ 2813 /* FLOGI retry policy */
2713 retry = 1; 2814 retry = 1;
2714 maxretry = 48; 2815 /* retry forever */
2715 if (cmdiocb->retry >= 32) 2816 maxretry = 0;
2817 if (cmdiocb->retry >= 100)
2818 delay = 5000;
2819 else if (cmdiocb->retry >= 32)
2716 delay = 1000; 2820 delay = 1000;
2717 } 2821 }
2718 2822
2719 if ((++cmdiocb->retry) >= maxretry) { 2823 cmdiocb->retry++;
2824 if (maxretry && (cmdiocb->retry >= maxretry)) {
2720 phba->fc_stat.elsRetryExceeded++; 2825 phba->fc_stat.elsRetryExceeded++;
2721 retry = 0; 2826 retry = 0;
2722 } 2827 }
@@ -3099,7 +3204,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3099 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3204 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3100 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 3205 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3101 /* A LS_RJT associated with Default RPI cleanup has its own 3206 /* A LS_RJT associated with Default RPI cleanup has its own
3102 * seperate code path. 3207 * separate code path.
3103 */ 3208 */
3104 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3209 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3105 ls_rjt = 1; 3210 ls_rjt = 1;
@@ -4124,8 +4229,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4124 spin_lock_irq(shost->host_lock); 4229 spin_lock_irq(shost->host_lock);
4125 if (vport->fc_rscn_flush) { 4230 if (vport->fc_rscn_flush) {
4126 /* Another thread is walking fc_rscn_id_list on this vport */ 4231 /* Another thread is walking fc_rscn_id_list on this vport */
4127 spin_unlock_irq(shost->host_lock);
4128 vport->fc_flag |= FC_RSCN_DISCOVERY; 4232 vport->fc_flag |= FC_RSCN_DISCOVERY;
4233 spin_unlock_irq(shost->host_lock);
4129 /* Send back ACC */ 4234 /* Send back ACC */
4130 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4235 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4131 return 0; 4236 return 0;
@@ -4133,7 +4238,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4133 /* Indicate we are walking fc_rscn_id_list on this vport */ 4238 /* Indicate we are walking fc_rscn_id_list on this vport */
4134 vport->fc_rscn_flush = 1; 4239 vport->fc_rscn_flush = 1;
4135 spin_unlock_irq(shost->host_lock); 4240 spin_unlock_irq(shost->host_lock);
4136 /* Get the array count after sucessfully have the token */ 4241 /* Get the array count after successfully have the token */
4137 rscn_cnt = vport->fc_rscn_id_cnt; 4242 rscn_cnt = vport->fc_rscn_id_cnt;
4138 /* If we are already processing an RSCN, save the received 4243 /* If we are already processing an RSCN, save the received
4139 * RSCN payload buffer, cmdiocb->context2 to process later. 4244 * RSCN payload buffer, cmdiocb->context2 to process later.
@@ -4367,7 +4472,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4367 4472
4368 did = Fabric_DID; 4473 did = Fabric_DID;
4369 4474
4370 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) { 4475 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
4371 /* For a FLOGI we accept, then if our portname is greater 4476 /* For a FLOGI we accept, then if our portname is greater
4372 * then the remote portname we initiate Nport login. 4477 * then the remote portname we initiate Nport login.
4373 */ 4478 */
@@ -4503,6 +4608,29 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4503} 4608}
4504 4609
4505/** 4610/**
4611 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
4612 * @vport: pointer to a host virtual N_Port data structure.
4613 * @cmdiocb: pointer to lpfc command iocb data structure.
4614 * @ndlp: pointer to a node-list data structure.
4615 *
4616 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
4617 * received as an ELS unsolicited event. A request to RRQ shall only
4618 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
4619 * Nx_Port N_Port_ID of the target Exchange is the same as the
4620 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
4621 * not accepted, an LS_RJT with reason code "Unable to perform
4622 * command request" and reason code explanation "Invalid Originator
4623 * S_ID" shall be returned. For now, we just unconditionally accept
4624 * RRQ from the target.
4625 **/
4626static void
4627lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4628 struct lpfc_nodelist *ndlp)
4629{
4630 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4631}
4632
4633/**
4506 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 4634 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
4507 * @phba: pointer to lpfc hba data structure. 4635 * @phba: pointer to lpfc hba data structure.
4508 * @pmb: pointer to the driver internal queue element for mailbox command. 4636 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -5396,7 +5524,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5396 if (lpfc_els_chk_latt(vport)) 5524 if (lpfc_els_chk_latt(vport))
5397 goto dropit; 5525 goto dropit;
5398 5526
5399 /* Ignore traffic recevied during vport shutdown. */ 5527 /* Ignore traffic received during vport shutdown. */
5400 if (vport->load_flag & FC_UNLOADING) 5528 if (vport->load_flag & FC_UNLOADING)
5401 goto dropit; 5529 goto dropit;
5402 5530
@@ -5618,6 +5746,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5618 if (newnode) 5746 if (newnode)
5619 lpfc_nlp_put(ndlp); 5747 lpfc_nlp_put(ndlp);
5620 break; 5748 break;
5749 case ELS_CMD_RRQ:
5750 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5751 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
5752 did, vport->port_state, ndlp->nlp_flag);
5753
5754 phba->fc_stat.elsRcvRRQ++;
5755 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
5756 if (newnode)
5757 lpfc_nlp_put(ndlp);
5758 break;
5621 default: 5759 default:
5622 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 5760 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5623 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 5761 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -5670,7 +5808,7 @@ dropit:
5670 * NULL - No vport with the matching @vpi found 5808 * NULL - No vport with the matching @vpi found
5671 * Otherwise - Address to the vport with the matching @vpi. 5809 * Otherwise - Address to the vport with the matching @vpi.
5672 **/ 5810 **/
5673static struct lpfc_vport * 5811struct lpfc_vport *
5674lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 5812lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5675{ 5813{
5676 struct lpfc_vport *vport; 5814 struct lpfc_vport *vport;
@@ -5864,6 +6002,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5864 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6002 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5865 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 6003 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5866 MAILBOX_t *mb = &pmb->u.mb; 6004 MAILBOX_t *mb = &pmb->u.mb;
6005 int rc;
5867 6006
5868 spin_lock_irq(shost->host_lock); 6007 spin_lock_irq(shost->host_lock);
5869 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 6008 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5885,6 +6024,26 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5885 spin_unlock_irq(shost->host_lock); 6024 spin_unlock_irq(shost->host_lock);
5886 lpfc_can_disctmo(vport); 6025 lpfc_can_disctmo(vport);
5887 break; 6026 break;
6027 /* If reg_vpi fail with invalid VPI status, re-init VPI */
6028 case 0x20:
6029 spin_lock_irq(shost->host_lock);
6030 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6031 spin_unlock_irq(shost->host_lock);
6032 lpfc_init_vpi(phba, pmb, vport->vpi);
6033 pmb->vport = vport;
6034 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
6035 rc = lpfc_sli_issue_mbox(phba, pmb,
6036 MBX_NOWAIT);
6037 if (rc == MBX_NOT_FINISHED) {
6038 lpfc_printf_vlog(vport,
6039 KERN_ERR, LOG_MBOX,
6040 "2732 Failed to issue INIT_VPI"
6041 " mailbox command\n");
6042 } else {
6043 lpfc_nlp_put(ndlp);
6044 return;
6045 }
6046
5888 default: 6047 default:
5889 /* Try to recover from this error */ 6048 /* Try to recover from this error */
5890 lpfc_mbx_unreg_vpi(vport); 6049 lpfc_mbx_unreg_vpi(vport);
@@ -5897,14 +6056,23 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5897 lpfc_initial_fdisc(vport); 6056 lpfc_initial_fdisc(vport);
5898 break; 6057 break;
5899 } 6058 }
5900
5901 } else { 6059 } else {
5902 if (vport == phba->pport) 6060 spin_lock_irq(shost->host_lock);
6061 vport->vpi_state |= LPFC_VPI_REGISTERED;
6062 spin_unlock_irq(shost->host_lock);
6063 if (vport == phba->pport) {
5903 if (phba->sli_rev < LPFC_SLI_REV4) 6064 if (phba->sli_rev < LPFC_SLI_REV4)
5904 lpfc_issue_fabric_reglogin(vport); 6065 lpfc_issue_fabric_reglogin(vport);
5905 else 6066 else {
5906 lpfc_issue_reg_vfi(vport); 6067 /*
5907 else 6068 * If the physical port is instantiated using
6069 * FDISC, do not start vport discovery.
6070 */
6071 if (vport->port_state != LPFC_FDISC)
6072 lpfc_start_fdiscs(phba);
6073 lpfc_do_scr_ns_plogi(phba, vport);
6074 }
6075 } else
5908 lpfc_do_scr_ns_plogi(phba, vport); 6076 lpfc_do_scr_ns_plogi(phba, vport);
5909 } 6077 }
5910 6078
@@ -5926,7 +6094,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5926 * This routine registers the @vport as a new virtual port with a HBA. 6094 * This routine registers the @vport as a new virtual port with a HBA.
5927 * It is done through a registering vpi mailbox command. 6095 * It is done through a registering vpi mailbox command.
5928 **/ 6096 **/
5929static void 6097void
5930lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 6098lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5931 struct lpfc_nodelist *ndlp) 6099 struct lpfc_nodelist *ndlp)
5932{ 6100{
@@ -5967,6 +6135,92 @@ mbox_err_exit:
5967} 6135}
5968 6136
5969/** 6137/**
6138 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6139 * @phba: pointer to lpfc hba data structure.
6140 *
6141 * This routine cancels the retry delay timers to all the vports.
6142 **/
6143void
6144lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6145{
6146 struct lpfc_vport **vports;
6147 struct lpfc_nodelist *ndlp;
6148 uint32_t link_state;
6149 int i;
6150
6151 /* Treat this failure as linkdown for all vports */
6152 link_state = phba->link_state;
6153 lpfc_linkdown(phba);
6154 phba->link_state = link_state;
6155
6156 vports = lpfc_create_vport_work_array(phba);
6157
6158 if (vports) {
6159 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6160 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6161 if (ndlp)
6162 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6163 lpfc_els_flush_cmd(vports[i]);
6164 }
6165 lpfc_destroy_vport_work_array(phba, vports);
6166 }
6167}
6168
6169/**
6170 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6171 * @phba: pointer to lpfc hba data structure.
6172 *
6173 * This routine abort all pending discovery commands and
6174 * start a timer to retry FLOGI for the physical port
6175 * discovery.
6176 **/
6177void
6178lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6179{
6180 struct lpfc_nodelist *ndlp;
6181 struct Scsi_Host *shost;
6182
6183 /* Cancel the all vports retry delay retry timers */
6184 lpfc_cancel_all_vport_retry_delay_timer(phba);
6185
6186 /* If fabric require FLOGI, then re-instantiate physical login */
6187 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6188 if (!ndlp)
6189 return;
6190
6191 shost = lpfc_shost_from_vport(phba->pport);
6192 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6193 spin_lock_irq(shost->host_lock);
6194 ndlp->nlp_flag |= NLP_DELAY_TMO;
6195 spin_unlock_irq(shost->host_lock);
6196 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
6197 phba->pport->port_state = LPFC_FLOGI;
6198 return;
6199}
6200
6201/**
6202 * lpfc_fabric_login_reqd - Check if FLOGI required.
6203 * @phba: pointer to lpfc hba data structure.
6204 * @cmdiocb: pointer to FDISC command iocb.
6205 * @rspiocb: pointer to FDISC response iocb.
6206 *
6207 * This routine checks if a FLOGI is reguired for FDISC
6208 * to succeed.
6209 **/
6210static int
6211lpfc_fabric_login_reqd(struct lpfc_hba *phba,
6212 struct lpfc_iocbq *cmdiocb,
6213 struct lpfc_iocbq *rspiocb)
6214{
6215
6216 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
6217 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
6218 return 0;
6219 else
6220 return 1;
6221}
6222
6223/**
5970 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 6224 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
5971 * @phba: pointer to lpfc hba data structure. 6225 * @phba: pointer to lpfc hba data structure.
5972 * @cmdiocb: pointer to lpfc command iocb data structure. 6226 * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -6015,6 +6269,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6015 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 6269 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
6016 6270
6017 if (irsp->ulpStatus) { 6271 if (irsp->ulpStatus) {
6272
6273 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
6274 lpfc_retry_pport_discovery(phba);
6275 goto out;
6276 }
6277
6018 /* Check for retry */ 6278 /* Check for retry */
6019 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 6279 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
6020 goto out; 6280 goto out;
@@ -6024,12 +6284,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6024 irsp->ulpStatus, irsp->un.ulpWord[4]); 6284 irsp->ulpStatus, irsp->un.ulpWord[4]);
6025 goto fdisc_failed; 6285 goto fdisc_failed;
6026 } 6286 }
6027 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
6028 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6029 lpfc_nlp_put(ndlp);
6030 /* giving up on FDISC. Cancel discovery timer */
6031 lpfc_can_disctmo(vport);
6032 spin_lock_irq(shost->host_lock); 6287 spin_lock_irq(shost->host_lock);
6288 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
6033 vport->fc_flag |= FC_FABRIC; 6289 vport->fc_flag |= FC_FABRIC;
6034 if (vport->phba->fc_topology == TOPOLOGY_LOOP) 6290 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
6035 vport->fc_flag |= FC_PUBLIC_LOOP; 6291 vport->fc_flag |= FC_PUBLIC_LOOP;
@@ -6057,10 +6313,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6057 lpfc_mbx_unreg_vpi(vport); 6313 lpfc_mbx_unreg_vpi(vport);
6058 spin_lock_irq(shost->host_lock); 6314 spin_lock_irq(shost->host_lock);
6059 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6315 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6316 if (phba->sli_rev == LPFC_SLI_REV4)
6317 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6060 spin_unlock_irq(shost->host_lock); 6318 spin_unlock_irq(shost->host_lock);
6061 } 6319 }
6062 6320
6063 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 6321 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
6322 lpfc_issue_init_vpi(vport);
6323 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
6064 lpfc_register_new_vport(phba, vport, ndlp); 6324 lpfc_register_new_vport(phba, vport, ndlp);
6065 else 6325 else
6066 lpfc_do_scr_ns_plogi(phba, vport); 6326 lpfc_do_scr_ns_plogi(phba, vport);
@@ -6107,6 +6367,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6107 int did = ndlp->nlp_DID; 6367 int did = ndlp->nlp_DID;
6108 int rc; 6368 int rc;
6109 6369
6370 vport->port_state = LPFC_FDISC;
6110 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 6371 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
6111 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 6372 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
6112 ELS_CMD_FDISC); 6373 ELS_CMD_FDISC);
@@ -6172,7 +6433,6 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6172 return 1; 6433 return 1;
6173 } 6434 }
6174 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 6435 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
6175 vport->port_state = LPFC_FDISC;
6176 return 0; 6436 return 0;
6177} 6437}
6178 6438
@@ -6632,21 +6892,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6632 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6892 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6633 unsigned long iflag = 0; 6893 unsigned long iflag = 0;
6634 6894
6635 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6895 spin_lock_irqsave(&phba->hbalock, iflag);
6896 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6636 list_for_each_entry_safe(sglq_entry, sglq_next, 6897 list_for_each_entry_safe(sglq_entry, sglq_next,
6637 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 6898 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6638 if (sglq_entry->sli4_xritag == xri) { 6899 if (sglq_entry->sli4_xritag == xri) {
6639 list_del(&sglq_entry->list); 6900 list_del(&sglq_entry->list);
6640 spin_unlock_irqrestore(
6641 &phba->sli4_hba.abts_sgl_list_lock,
6642 iflag);
6643 spin_lock_irqsave(&phba->hbalock, iflag);
6644
6645 list_add_tail(&sglq_entry->list, 6901 list_add_tail(&sglq_entry->list,
6646 &phba->sli4_hba.lpfc_sgl_list); 6902 &phba->sli4_hba.lpfc_sgl_list);
6903 sglq_entry->state = SGL_FREED;
6904 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6647 spin_unlock_irqrestore(&phba->hbalock, iflag); 6905 spin_unlock_irqrestore(&phba->hbalock, iflag);
6648 return; 6906 return;
6649 } 6907 }
6650 } 6908 }
6651 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6909 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6910 sglq_entry = __lpfc_get_active_sglq(phba, xri);
6911 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
6912 spin_unlock_irqrestore(&phba->hbalock, iflag);
6913 return;
6914 }
6915 sglq_entry->state = SGL_XRI_ABORTED;
6916 spin_unlock_irqrestore(&phba->hbalock, iflag);
6917 return;
6652} 6918}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e6a47e25b218..e1466eec56b7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/slab.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
24#include <linux/kthread.h> 25#include <linux/kthread.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
@@ -525,8 +526,8 @@ lpfc_work_done(struct lpfc_hba *phba)
525 spin_unlock_irq(&phba->hbalock); 526 spin_unlock_irq(&phba->hbalock);
526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 527 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
527 } 528 }
528 if (phba->hba_flag & HBA_RECEIVE_BUFFER) 529 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
529 lpfc_sli4_handle_received_buffer(phba); 530 lpfc_sli4_fcf_redisc_event_proc(phba);
530 } 531 }
531 532
532 vports = lpfc_create_vport_work_array(phba); 533 vports = lpfc_create_vport_work_array(phba);
@@ -568,8 +569,9 @@ lpfc_work_done(struct lpfc_hba *phba)
568 pring = &phba->sli.ring[LPFC_ELS_RING]; 569 pring = &phba->sli.ring[LPFC_ELS_RING];
569 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 570 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
570 status >>= (4*LPFC_ELS_RING); 571 status >>= (4*LPFC_ELS_RING);
571 if ((status & HA_RXMASK) 572 if ((status & HA_RXMASK) ||
572 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 573 (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
574 (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
573 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 575 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
574 pring->flag |= LPFC_DEFERRED_RING_EVENT; 576 pring->flag |= LPFC_DEFERRED_RING_EVENT;
575 /* Set the lpfc data pending flag */ 577 /* Set the lpfc data pending flag */
@@ -688,7 +690,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
688 lpfc_unreg_rpi(vport, ndlp); 690 lpfc_unreg_rpi(vport, ndlp);
689 691
690 /* Leave Fabric nodes alone on link down */ 692 /* Leave Fabric nodes alone on link down */
691 if (!remove && ndlp->nlp_type & NLP_FABRIC) 693 if ((phba->sli_rev < LPFC_SLI_REV4) &&
694 (!remove && ndlp->nlp_type & NLP_FABRIC))
692 continue; 695 continue;
693 rc = lpfc_disc_state_machine(vport, ndlp, NULL, 696 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
694 remove 697 remove
@@ -706,6 +709,11 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
706void 709void
707lpfc_port_link_failure(struct lpfc_vport *vport) 710lpfc_port_link_failure(struct lpfc_vport *vport)
708{ 711{
712 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
713
714 /* Cleanup any outstanding received buffers */
715 lpfc_cleanup_rcv_buffers(vport);
716
709 /* Cleanup any outstanding RSCN activity */ 717 /* Cleanup any outstanding RSCN activity */
710 lpfc_els_flush_rscn(vport); 718 lpfc_els_flush_rscn(vport);
711 719
@@ -744,13 +752,19 @@ lpfc_linkdown(struct lpfc_hba *phba)
744 752
745 if (phba->link_state == LPFC_LINK_DOWN) 753 if (phba->link_state == LPFC_LINK_DOWN)
746 return 0; 754 return 0;
755
756 /* Block all SCSI stack I/Os */
757 lpfc_scsi_dev_block(phba);
758
747 spin_lock_irq(&phba->hbalock); 759 spin_lock_irq(&phba->hbalock);
748 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); 760 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
761 spin_unlock_irq(&phba->hbalock);
749 if (phba->link_state > LPFC_LINK_DOWN) { 762 if (phba->link_state > LPFC_LINK_DOWN) {
750 phba->link_state = LPFC_LINK_DOWN; 763 phba->link_state = LPFC_LINK_DOWN;
764 spin_lock_irq(shost->host_lock);
751 phba->pport->fc_flag &= ~FC_LBIT; 765 phba->pport->fc_flag &= ~FC_LBIT;
766 spin_unlock_irq(shost->host_lock);
752 } 767 }
753 spin_unlock_irq(&phba->hbalock);
754 vports = lpfc_create_vport_work_array(phba); 768 vports = lpfc_create_vport_work_array(phba);
755 if (vports != NULL) 769 if (vports != NULL)
756 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 770 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -1015,13 +1029,12 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1015 mempool_free(mboxq, phba->mbox_mem_pool); 1029 mempool_free(mboxq, phba->mbox_mem_pool);
1016 return; 1030 return;
1017 } 1031 }
1018 if (vport->port_state != LPFC_FLOGI) { 1032 spin_lock_irqsave(&phba->hbalock, flags);
1019 spin_lock_irqsave(&phba->hbalock, flags); 1033 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1020 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1034 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1021 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1035 spin_unlock_irqrestore(&phba->hbalock, flags);
1022 spin_unlock_irqrestore(&phba->hbalock, flags); 1036 if (vport->port_state != LPFC_FLOGI)
1023 lpfc_initial_flogi(vport); 1037 lpfc_initial_flogi(vport);
1024 }
1025 1038
1026 mempool_free(mboxq, phba->mbox_mem_pool); 1039 mempool_free(mboxq, phba->mbox_mem_pool);
1027 return; 1040 return;
@@ -1039,25 +1052,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1039static uint32_t 1052static uint32_t
1040lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) 1053lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1041{ 1054{
1042 if ((fab_name[0] == 1055 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1043 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && 1056 return 0;
1044 (fab_name[1] == 1057 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1045 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && 1058 return 0;
1046 (fab_name[2] == 1059 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1047 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) && 1060 return 0;
1048 (fab_name[3] == 1061 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1049 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) && 1062 return 0;
1050 (fab_name[4] == 1063 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1051 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) && 1064 return 0;
1052 (fab_name[5] == 1065 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1053 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) && 1066 return 0;
1054 (fab_name[6] == 1067 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1055 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) && 1068 return 0;
1056 (fab_name[7] == 1069 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1057 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1058 return 1;
1059 else
1060 return 0; 1070 return 0;
1071 return 1;
1061} 1072}
1062 1073
1063/** 1074/**
@@ -1072,30 +1083,28 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1072static uint32_t 1083static uint32_t
1073lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) 1084lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1074{ 1085{
1075 if ((sw_name[0] == 1086 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1076 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) && 1087 return 0;
1077 (sw_name[1] == 1088 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1078 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) && 1089 return 0;
1079 (sw_name[2] == 1090 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1080 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) && 1091 return 0;
1081 (sw_name[3] == 1092 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1082 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) && 1093 return 0;
1083 (sw_name[4] == 1094 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1084 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
1085 (sw_name[5] ==
1086 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
1087 (sw_name[6] ==
1088 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
1089 (sw_name[7] ==
1090 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
1091 return 1;
1092 else
1093 return 0; 1095 return 0;
1096 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1097 return 0;
1098 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1099 return 0;
1100 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1101 return 0;
1102 return 1;
1094} 1103}
1095 1104
1096/** 1105/**
1097 * lpfc_mac_addr_match - Check if the fcf mac address match. 1106 * lpfc_mac_addr_match - Check if the fcf mac address match.
1098 * @phba: pointer to lpfc hba data structure. 1107 * @mac_addr: pointer to mac address.
1099 * @new_fcf_record: pointer to fcf record. 1108 * @new_fcf_record: pointer to fcf record.
1100 * 1109 *
1101 * This routine compare the fcf record's mac address with HBA's 1110 * This routine compare the fcf record's mac address with HBA's
@@ -1103,85 +1112,115 @@ lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1103 * returns 1 else return 0. 1112 * returns 1 else return 0.
1104 **/ 1113 **/
1105static uint32_t 1114static uint32_t
1106lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) 1115lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1107{ 1116{
1108 if ((phba->fcf.mac_addr[0] == 1117 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1109 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && 1118 return 0;
1110 (phba->fcf.mac_addr[1] == 1119 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1111 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1112 (phba->fcf.mac_addr[2] ==
1113 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1114 (phba->fcf.mac_addr[3] ==
1115 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1116 (phba->fcf.mac_addr[4] ==
1117 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1118 (phba->fcf.mac_addr[5] ==
1119 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1120 return 1;
1121 else
1122 return 0; 1120 return 0;
1121 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1122 return 0;
1123 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1124 return 0;
1125 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1126 return 0;
1127 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1128 return 0;
1129 return 1;
1130}
1131
1132static bool
1133lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1134{
1135 return (curr_vlan_id == new_vlan_id);
1123} 1136}
1124 1137
1125/** 1138/**
1126 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1139 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1127 * @phba: pointer to lpfc hba data structure. 1140 * @fcf: pointer to driver fcf record.
1128 * @new_fcf_record: pointer to fcf record. 1141 * @new_fcf_record: pointer to fcf record.
1129 * 1142 *
1130 * This routine copies the FCF information from the FCF 1143 * This routine copies the FCF information from the FCF
1131 * record to lpfc_hba data structure. 1144 * record to lpfc_hba data structure.
1132 **/ 1145 **/
1133static void 1146static void
1134lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) 1147lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1148 struct fcf_record *new_fcf_record)
1135{ 1149{
1136 phba->fcf.fabric_name[0] = 1150 /* Fabric name */
1151 fcf_rec->fabric_name[0] =
1137 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); 1152 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1138 phba->fcf.fabric_name[1] = 1153 fcf_rec->fabric_name[1] =
1139 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); 1154 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1140 phba->fcf.fabric_name[2] = 1155 fcf_rec->fabric_name[2] =
1141 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); 1156 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1142 phba->fcf.fabric_name[3] = 1157 fcf_rec->fabric_name[3] =
1143 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); 1158 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1144 phba->fcf.fabric_name[4] = 1159 fcf_rec->fabric_name[4] =
1145 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); 1160 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1146 phba->fcf.fabric_name[5] = 1161 fcf_rec->fabric_name[5] =
1147 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); 1162 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1148 phba->fcf.fabric_name[6] = 1163 fcf_rec->fabric_name[6] =
1149 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); 1164 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1150 phba->fcf.fabric_name[7] = 1165 fcf_rec->fabric_name[7] =
1151 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); 1166 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1152 phba->fcf.mac_addr[0] = 1167 /* Mac address */
1153 bf_get(lpfc_fcf_record_mac_0, new_fcf_record); 1168 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1154 phba->fcf.mac_addr[1] = 1169 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1155 bf_get(lpfc_fcf_record_mac_1, new_fcf_record); 1170 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1156 phba->fcf.mac_addr[2] = 1171 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1157 bf_get(lpfc_fcf_record_mac_2, new_fcf_record); 1172 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1158 phba->fcf.mac_addr[3] = 1173 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1159 bf_get(lpfc_fcf_record_mac_3, new_fcf_record); 1174 /* FCF record index */
1160 phba->fcf.mac_addr[4] = 1175 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1161 bf_get(lpfc_fcf_record_mac_4, new_fcf_record); 1176 /* FCF record priority */
1162 phba->fcf.mac_addr[5] = 1177 fcf_rec->priority = new_fcf_record->fip_priority;
1163 bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1178 /* Switch name */
1164 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1179 fcf_rec->switch_name[0] =
1165 phba->fcf.priority = new_fcf_record->fip_priority;
1166 phba->fcf.switch_name[0] =
1167 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); 1180 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1168 phba->fcf.switch_name[1] = 1181 fcf_rec->switch_name[1] =
1169 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); 1182 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1170 phba->fcf.switch_name[2] = 1183 fcf_rec->switch_name[2] =
1171 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); 1184 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1172 phba->fcf.switch_name[3] = 1185 fcf_rec->switch_name[3] =
1173 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); 1186 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1174 phba->fcf.switch_name[4] = 1187 fcf_rec->switch_name[4] =
1175 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); 1188 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1176 phba->fcf.switch_name[5] = 1189 fcf_rec->switch_name[5] =
1177 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); 1190 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1178 phba->fcf.switch_name[6] = 1191 fcf_rec->switch_name[6] =
1179 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); 1192 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1180 phba->fcf.switch_name[7] = 1193 fcf_rec->switch_name[7] =
1181 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); 1194 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1182} 1195}
1183 1196
1184/** 1197/**
1198 * lpfc_update_fcf_record - Update driver fcf record
1199 * @phba: pointer to lpfc hba data structure.
1200 * @fcf_rec: pointer to driver fcf record.
1201 * @new_fcf_record: pointer to hba fcf record.
1202 * @addr_mode: address mode to be set to the driver fcf record.
1203 * @vlan_id: vlan tag to be set to the driver fcf record.
1204 * @flag: flag bits to be set to the driver fcf record.
1205 *
1206 * This routine updates the driver FCF record from the new HBA FCF record
1207 * together with the address mode, vlan_id, and other informations. This
1208 * routine is called with the host lock held.
1209 **/
1210static void
1211__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1212 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1213 uint16_t vlan_id, uint32_t flag)
1214{
1215 /* Copy the fields from the HBA's FCF record */
1216 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1217 /* Update other fields of driver FCF record */
1218 fcf_rec->addr_mode = addr_mode;
1219 fcf_rec->vlan_id = vlan_id;
1220 fcf_rec->flag |= (flag | RECORD_VALID);
1221}
1222
1223/**
1185 * lpfc_register_fcf - Register the FCF with hba. 1224 * lpfc_register_fcf - Register the FCF with hba.
1186 * @phba: pointer to lpfc hba data structure. 1225 * @phba: pointer to lpfc hba data structure.
1187 * 1226 *
@@ -1199,13 +1238,14 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1199 1238
1200 /* If the FCF is not availabe do nothing. */ 1239 /* If the FCF is not availabe do nothing. */
1201 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1240 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1241 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1202 spin_unlock_irqrestore(&phba->hbalock, flags); 1242 spin_unlock_irqrestore(&phba->hbalock, flags);
1203 return; 1243 return;
1204 } 1244 }
1205 1245
1206 /* The FCF is already registered, start discovery */ 1246 /* The FCF is already registered, start discovery */
1207 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1247 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1208 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1248 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1209 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1249 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1210 spin_unlock_irqrestore(&phba->hbalock, flags); 1250 spin_unlock_irqrestore(&phba->hbalock, flags);
1211 if (phba->pport->port_state != LPFC_FLOGI) 1251 if (phba->pport->port_state != LPFC_FLOGI)
@@ -1216,15 +1256,23 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1216 1256
1217 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1257 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1218 GFP_KERNEL); 1258 GFP_KERNEL);
1219 if (!fcf_mbxq) 1259 if (!fcf_mbxq) {
1260 spin_lock_irqsave(&phba->hbalock, flags);
1261 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1262 spin_unlock_irqrestore(&phba->hbalock, flags);
1220 return; 1263 return;
1264 }
1221 1265
1222 lpfc_reg_fcfi(phba, fcf_mbxq); 1266 lpfc_reg_fcfi(phba, fcf_mbxq);
1223 fcf_mbxq->vport = phba->pport; 1267 fcf_mbxq->vport = phba->pport;
1224 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1268 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1225 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1269 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1226 if (rc == MBX_NOT_FINISHED) 1270 if (rc == MBX_NOT_FINISHED) {
1271 spin_lock_irqsave(&phba->hbalock, flags);
1272 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1273 spin_unlock_irqrestore(&phba->hbalock, flags);
1227 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1274 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1275 }
1228 1276
1229 return; 1277 return;
1230} 1278}
@@ -1235,6 +1283,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1235 * @new_fcf_record: pointer to fcf record. 1283 * @new_fcf_record: pointer to fcf record.
1236 * @boot_flag: Indicates if this record used by boot bios. 1284 * @boot_flag: Indicates if this record used by boot bios.
1237 * @addr_mode: The address mode to be used by this FCF 1285 * @addr_mode: The address mode to be used by this FCF
1286 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1238 * 1287 *
1239 * This routine compare the fcf record with connect list obtained from the 1288 * This routine compare the fcf record with connect list obtained from the
1240 * config region to decide if this FCF can be used for SAN discovery. It returns 1289 * config region to decide if this FCF can be used for SAN discovery. It returns
@@ -1253,13 +1302,27 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1253 uint16_t *vlan_id) 1302 uint16_t *vlan_id)
1254{ 1303{
1255 struct lpfc_fcf_conn_entry *conn_entry; 1304 struct lpfc_fcf_conn_entry *conn_entry;
1305 int i, j, fcf_vlan_id = 0;
1306
1307 /* Find the lowest VLAN id in the FCF record */
1308 for (i = 0; i < 512; i++) {
1309 if (new_fcf_record->vlan_bitmap[i]) {
1310 fcf_vlan_id = i * 8;
1311 j = 0;
1312 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1313 j++;
1314 fcf_vlan_id++;
1315 }
1316 break;
1317 }
1318 }
1256 1319
1257 /* If FCF not available return 0 */ 1320 /* If FCF not available return 0 */
1258 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1321 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1259 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1322 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1260 return 0; 1323 return 0;
1261 1324
1262 if (!phba->cfg_enable_fip) { 1325 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1263 *boot_flag = 0; 1326 *boot_flag = 0;
1264 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1327 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1265 new_fcf_record); 1328 new_fcf_record);
@@ -1286,11 +1349,16 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1286 if (*addr_mode & LPFC_FCF_FPMA) 1349 if (*addr_mode & LPFC_FCF_FPMA)
1287 *addr_mode = LPFC_FCF_FPMA; 1350 *addr_mode = LPFC_FCF_FPMA;
1288 1351
1289 *vlan_id = 0xFFFF; 1352 /* If FCF record report a vlan id use that vlan id */
1353 if (fcf_vlan_id)
1354 *vlan_id = fcf_vlan_id;
1355 else
1356 *vlan_id = 0xFFFF;
1290 return 1; 1357 return 1;
1291 } 1358 }
1292 1359
1293 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { 1360 list_for_each_entry(conn_entry,
1361 &phba->fcf_conn_rec_list, list) {
1294 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) 1362 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1295 continue; 1363 continue;
1296 1364
@@ -1384,8 +1452,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1384 (*addr_mode & LPFC_FCF_FPMA)) 1452 (*addr_mode & LPFC_FCF_FPMA))
1385 *addr_mode = LPFC_FCF_FPMA; 1453 *addr_mode = LPFC_FCF_FPMA;
1386 1454
1455 /* If matching connect list has a vlan id, use it */
1387 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1456 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1388 *vlan_id = conn_entry->conn_rec.vlan_tag; 1457 *vlan_id = conn_entry->conn_rec.vlan_tag;
1458 /*
1459 * If no vlan id is specified in connect list, use the vlan id
1460 * in the FCF record
1461 */
1462 else if (fcf_vlan_id)
1463 *vlan_id = fcf_vlan_id;
1389 else 1464 else
1390 *vlan_id = 0xFFFF; 1465 *vlan_id = 0xFFFF;
1391 1466
@@ -1407,8 +1482,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1407int 1482int
1408lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 1483lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1409{ 1484{
1410 LPFC_MBOXQ_t *mbox;
1411 int rc;
1412 /* 1485 /*
1413 * If the Link is up and no FCoE events while in the 1486 * If the Link is up and no FCoE events while in the
1414 * FCF discovery, no need to restart FCF discovery. 1487 * FCF discovery, no need to restart FCF discovery.
@@ -1417,75 +1490,70 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1417 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1490 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1418 return 0; 1491 return 0;
1419 1492
1493 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1494 "2768 Pending link or FCF event during current "
1495 "handling of the previous event: link_state:x%x, "
1496 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1497 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1498 phba->fcoe_eventtag);
1499
1420 spin_lock_irq(&phba->hbalock); 1500 spin_lock_irq(&phba->hbalock);
1421 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 1501 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1422 spin_unlock_irq(&phba->hbalock); 1502 spin_unlock_irq(&phba->hbalock);
1423 1503
1424 if (phba->link_state >= LPFC_LINK_UP) 1504 if (phba->link_state >= LPFC_LINK_UP) {
1425 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1505 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1506 "2780 Restart FCF table scan due to "
1507 "pending FCF event:evt_tag_at_scan:x%x, "
1508 "evt_tag_current:x%x\n",
1509 phba->fcoe_eventtag_at_fcf_scan,
1510 phba->fcoe_eventtag);
1511 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1512 } else {
1513 /*
1514 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1515 * flag
1516 */
1517 spin_lock_irq(&phba->hbalock);
1518 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1519 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1520 spin_unlock_irq(&phba->hbalock);
1521 }
1426 1522
1523 /* Unregister the currently registered FCF if required */
1427 if (unreg_fcf) { 1524 if (unreg_fcf) {
1428 spin_lock_irq(&phba->hbalock); 1525 spin_lock_irq(&phba->hbalock);
1429 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 1526 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1430 spin_unlock_irq(&phba->hbalock); 1527 spin_unlock_irq(&phba->hbalock);
1431 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1528 lpfc_sli4_unregister_fcf(phba);
1432 if (!mbox) {
1433 lpfc_printf_log(phba, KERN_ERR,
1434 LOG_DISCOVERY|LOG_MBOX,
1435 "2610 UNREG_FCFI mbox allocation failed\n");
1436 return 1;
1437 }
1438 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
1439 mbox->vport = phba->pport;
1440 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
1441 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1442 if (rc == MBX_NOT_FINISHED) {
1443 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
1444 "2611 UNREG_FCFI issue mbox failed\n");
1445 mempool_free(mbox, phba->mbox_mem_pool);
1446 }
1447 } 1529 }
1448
1449 return 1; 1530 return 1;
1450} 1531}
1451 1532
1452/** 1533/**
1453 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1534 * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
1454 * @phba: pointer to lpfc hba data structure. 1535 * @phba: pointer to lpfc hba data structure.
1455 * @mboxq: pointer to mailbox object. 1536 * @mboxq: pointer to mailbox object.
1537 * @next_fcf_index: pointer to holder of next fcf index.
1456 * 1538 *
1457 * This function iterate through all the fcf records available in 1539 * This routine parses the non-embedded fcf mailbox command by performing the
1458 * HBA and choose the optimal FCF record for discovery. After finding 1540 * necessarily error checking, non-embedded read FCF record mailbox command
1459 * the FCF for discovery it register the FCF record and kick start 1541 * SGE parsing, and endianness swapping.
1460 * discovery. 1542 *
1461 * If FCF_IN_USE flag is set in currently used FCF, the routine try to 1543 * Returns the pointer to the new FCF record in the non-embedded mailbox
1462 * use a FCF record which match fabric name and mac address of the 1544 * command DMA memory if successfully, other NULL.
1463 * currently used FCF record.
1464 * If the driver support only one FCF, it will try to use the FCF record
1465 * used by BOOT_BIOS.
1466 */ 1545 */
1467void 1546static struct fcf_record *
1468lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1547lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1548 uint16_t *next_fcf_index)
1469{ 1549{
1470 void *virt_addr; 1550 void *virt_addr;
1471 dma_addr_t phys_addr; 1551 dma_addr_t phys_addr;
1472 uint8_t *bytep;
1473 struct lpfc_mbx_sge sge; 1552 struct lpfc_mbx_sge sge;
1474 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1553 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1475 uint32_t shdr_status, shdr_add_status; 1554 uint32_t shdr_status, shdr_add_status;
1476 union lpfc_sli4_cfg_shdr *shdr; 1555 union lpfc_sli4_cfg_shdr *shdr;
1477 struct fcf_record *new_fcf_record; 1556 struct fcf_record *new_fcf_record;
1478 int rc;
1479 uint32_t boot_flag, addr_mode;
1480 uint32_t next_fcf_index;
1481 unsigned long flags;
1482 uint16_t vlan_id;
1483
1484 /* If there is pending FCoE event restart FCF table scan */
1485 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1486 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1487 return;
1488 }
1489 1557
1490 /* Get the first SGE entry from the non-embedded DMA memory. This 1558 /* Get the first SGE entry from the non-embedded DMA memory. This
1491 * routine only uses a single SGE. 1559 * routine only uses a single SGE.
@@ -1496,134 +1564,359 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1496 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1564 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1497 "2524 Failed to get the non-embedded SGE " 1565 "2524 Failed to get the non-embedded SGE "
1498 "virtual address\n"); 1566 "virtual address\n");
1499 goto out; 1567 return NULL;
1500 } 1568 }
1501 virt_addr = mboxq->sge_array->addr[0]; 1569 virt_addr = mboxq->sge_array->addr[0];
1502 1570
1503 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1571 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1504 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1572 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1505 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 1573 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1506 &shdr->response);
1507 /*
1508 * The FCF Record was read and there is no reason for the driver
1509 * to maintain the FCF record data or memory. Instead, just need
1510 * to book keeping the FCFIs can be used.
1511 */
1512 if (shdr_status || shdr_add_status) { 1574 if (shdr_status || shdr_add_status) {
1513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1575 if (shdr_status == STATUS_FCF_TABLE_EMPTY)
1514 "2521 READ_FCF_RECORD mailbox failed " 1576 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1515 "with status x%x add_status x%x, mbx\n", 1577 "2726 READ_FCF_RECORD Indicates empty "
1516 shdr_status, shdr_add_status); 1578 "FCF table.\n");
1517 goto out; 1579 else
1580 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1581 "2521 READ_FCF_RECORD mailbox failed "
1582 "with status x%x add_status x%x, "
1583 "mbx\n", shdr_status, shdr_add_status);
1584 return NULL;
1518 } 1585 }
1519 /* Interpreting the returned information of FCF records */ 1586
1587 /* Interpreting the returned information of the FCF record */
1520 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1588 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1521 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1589 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1522 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1590 sizeof(struct lpfc_mbx_read_fcf_tbl));
1523 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1591 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1524
1525 new_fcf_record = (struct fcf_record *)(virt_addr + 1592 new_fcf_record = (struct fcf_record *)(virt_addr +
1526 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1593 sizeof(struct lpfc_mbx_read_fcf_tbl));
1527 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1594 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1528 sizeof(struct fcf_record)); 1595 sizeof(struct fcf_record));
1529 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1530 1596
1531 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, 1597 return new_fcf_record;
1532 &boot_flag, &addr_mode, 1598}
1533 &vlan_id); 1599
1600/**
1601 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1602 * @phba: pointer to lpfc hba data structure.
1603 * @fcf_record: pointer to the fcf record.
1604 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1605 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1606 *
1607 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1608 * enabled.
1609 **/
1610static void
1611lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1612 struct fcf_record *fcf_record,
1613 uint16_t vlan_id,
1614 uint16_t next_fcf_index)
1615{
1616 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1617 "2764 READ_FCF_RECORD:\n"
1618 "\tFCF_Index : x%x\n"
1619 "\tFCF_Avail : x%x\n"
1620 "\tFCF_Valid : x%x\n"
1621 "\tFIP_Priority : x%x\n"
1622 "\tMAC_Provider : x%x\n"
1623 "\tLowest VLANID : x%x\n"
1624 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1625 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1626 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1627 "\tNext_FCF_Index: x%x\n",
1628 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1629 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1630 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1631 fcf_record->fip_priority,
1632 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1633 vlan_id,
1634 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1635 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1636 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1637 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1638 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1639 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1640 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1641 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1642 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1643 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1644 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1645 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1646 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1647 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1648 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1649 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1650 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1651 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1652 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1653 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1654 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1655 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1656 next_fcf_index);
1657}
1658
1659/**
1660 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1661 * @phba: pointer to lpfc hba data structure.
1662 * @mboxq: pointer to mailbox object.
1663 *
1664 * This function iterates through all the fcf records available in
1665 * HBA and chooses the optimal FCF record for discovery. After finding
1666 * the FCF for discovery it registers the FCF record and kicks start
1667 * discovery.
1668 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1669 * use an FCF record which matches fabric name and mac address of the
1670 * currently used FCF record.
1671 * If the driver supports only one FCF, it will try to use the FCF record
1672 * used by BOOT_BIOS.
1673 */
1674void
1675lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1676{
1677 struct fcf_record *new_fcf_record;
1678 uint32_t boot_flag, addr_mode;
1679 uint16_t fcf_index, next_fcf_index;
1680 struct lpfc_fcf_rec *fcf_rec = NULL;
1681 uint16_t vlan_id;
1682 int rc;
1683
1684 /* If there is pending FCoE event restart FCF table scan */
1685 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1686 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1687 return;
1688 }
1689
1690 /* Parse the FCF record from the non-embedded mailbox command */
1691 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1692 &next_fcf_index);
1693 if (!new_fcf_record) {
1694 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1695 "2765 Mailbox command READ_FCF_RECORD "
1696 "failed to retrieve a FCF record.\n");
1697 /* Let next new FCF event trigger fast failover */
1698 spin_lock_irq(&phba->hbalock);
1699 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1700 spin_unlock_irq(&phba->hbalock);
1701 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1702 return;
1703 }
1704
1705 /* Check the FCF record against the connection list */
1706 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1707 &addr_mode, &vlan_id);
1708
1709 /* Log the FCF record information if turned on */
1710 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1711 next_fcf_index);
1712
1534 /* 1713 /*
1535 * If the fcf record does not match with connect list entries 1714 * If the fcf record does not match with connect list entries
1536 * read the next entry. 1715 * read the next entry; otherwise, this is an eligible FCF
1716 * record for round robin FCF failover.
1537 */ 1717 */
1538 if (!rc) 1718 if (!rc) {
1719 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1720 "2781 FCF record fcf_index:x%x failed FCF "
1721 "connection list check, fcf_avail:x%x, "
1722 "fcf_valid:x%x\n",
1723 bf_get(lpfc_fcf_record_fcf_index,
1724 new_fcf_record),
1725 bf_get(lpfc_fcf_record_fcf_avail,
1726 new_fcf_record),
1727 bf_get(lpfc_fcf_record_fcf_valid,
1728 new_fcf_record));
1539 goto read_next_fcf; 1729 goto read_next_fcf;
1730 } else {
1731 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1732 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
1733 if (rc)
1734 goto read_next_fcf;
1735 }
1736
1540 /* 1737 /*
1541 * If this is not the first FCF discovery of the HBA, use last 1738 * If this is not the first FCF discovery of the HBA, use last
1542 * FCF record for the discovery. 1739 * FCF record for the discovery. The condition that a rescan
1740 * matches the in-use FCF record: fabric name, switch name, mac
1741 * address, and vlan_id.
1543 */ 1742 */
1544 spin_lock_irqsave(&phba->hbalock, flags); 1743 spin_lock_irq(&phba->hbalock);
1545 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1744 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1546 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1745 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
1746 new_fcf_record) &&
1747 lpfc_sw_name_match(phba->fcf.current_rec.switch_name,
1547 new_fcf_record) && 1748 new_fcf_record) &&
1548 lpfc_sw_name_match(phba->fcf.switch_name, 1749 lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr,
1549 new_fcf_record) && 1750 new_fcf_record) &&
1550 lpfc_mac_addr_match(phba, new_fcf_record)) { 1751 lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id,
1752 vlan_id)) {
1551 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1753 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1552 spin_unlock_irqrestore(&phba->hbalock, flags); 1754 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
1755 /* Stop FCF redisc wait timer if pending */
1756 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
1757 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1758 /* If in fast failover, mark it's completed */
1759 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
1760 FCF_DISCOVERY);
1761 spin_unlock_irq(&phba->hbalock);
1553 goto out; 1762 goto out;
1554 } 1763 }
1555 spin_unlock_irqrestore(&phba->hbalock, flags); 1764 /*
1556 goto read_next_fcf; 1765 * Read next FCF record from HBA searching for the matching
1766 * with in-use record only if not during the fast failover
1767 * period. In case of fast failover period, it shall try to
1768 * determine whether the FCF record just read should be the
1769 * next candidate.
1770 */
1771 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1772 spin_unlock_irq(&phba->hbalock);
1773 goto read_next_fcf;
1774 }
1557 } 1775 }
1776 /*
1777 * Update on failover FCF record only if it's in FCF fast-failover
1778 * period; otherwise, update on current FCF record.
1779 */
1780 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1781 fcf_rec = &phba->fcf.failover_rec;
1782 else
1783 fcf_rec = &phba->fcf.current_rec;
1784
1558 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 1785 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1559 /* 1786 /*
1560 * If the current FCF record does not have boot flag 1787 * If the driver FCF record does not have boot flag
1561 * set and new fcf record has boot flag set, use the 1788 * set and new hba fcf record has boot flag set, use
1562 * new fcf record. 1789 * the new hba fcf record.
1563 */ 1790 */
1564 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { 1791 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
1565 /* Use this FCF record */ 1792 /* Choose this FCF record */
1566 lpfc_copy_fcf_record(phba, new_fcf_record); 1793 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1567 phba->fcf.addr_mode = addr_mode; 1794 addr_mode, vlan_id, BOOT_ENABLE);
1568 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; 1795 spin_unlock_irq(&phba->hbalock);
1569 if (vlan_id != 0xFFFF) {
1570 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1571 phba->fcf.vlan_id = vlan_id;
1572 }
1573 spin_unlock_irqrestore(&phba->hbalock, flags);
1574 goto read_next_fcf; 1796 goto read_next_fcf;
1575 } 1797 }
1576 /* 1798 /*
1577 * If the current FCF record has boot flag set and the 1799 * If the driver FCF record has boot flag set and the
1578 * new FCF record does not have boot flag, read the next 1800 * new hba FCF record does not have boot flag, read
1579 * FCF record. 1801 * the next FCF record.
1580 */ 1802 */
1581 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { 1803 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
1582 spin_unlock_irqrestore(&phba->hbalock, flags); 1804 spin_unlock_irq(&phba->hbalock);
1583 goto read_next_fcf; 1805 goto read_next_fcf;
1584 } 1806 }
1585 /* 1807 /*
1586 * If there is a record with lower priority value for 1808 * If the new hba FCF record has lower priority value
1587 * the current FCF, use that record. 1809 * than the driver FCF record, use the new record.
1588 */ 1810 */
1589 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1811 if (new_fcf_record->fip_priority < fcf_rec->priority) {
1590 new_fcf_record) && 1812 /* Choose this FCF record */
1591 (new_fcf_record->fip_priority < phba->fcf.priority)) { 1813 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1592 /* Use this FCF record */ 1814 addr_mode, vlan_id, 0);
1593 lpfc_copy_fcf_record(phba, new_fcf_record);
1594 phba->fcf.addr_mode = addr_mode;
1595 if (vlan_id != 0xFFFF) {
1596 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1597 phba->fcf.vlan_id = vlan_id;
1598 }
1599 spin_unlock_irqrestore(&phba->hbalock, flags);
1600 goto read_next_fcf;
1601 } 1815 }
1602 spin_unlock_irqrestore(&phba->hbalock, flags); 1816 spin_unlock_irq(&phba->hbalock);
1603 goto read_next_fcf; 1817 goto read_next_fcf;
1604 } 1818 }
1605 /* 1819 /*
1606 * This is the first available FCF record, use this 1820 * This is the first suitable FCF record, choose this record for
1607 * record. 1821 * initial best-fit FCF.
1608 */ 1822 */
1609 lpfc_copy_fcf_record(phba, new_fcf_record); 1823 if (fcf_rec) {
1610 phba->fcf.addr_mode = addr_mode; 1824 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1611 if (boot_flag) 1825 addr_mode, vlan_id, (boot_flag ?
1612 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; 1826 BOOT_ENABLE : 0));
1613 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1827 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1614 if (vlan_id != 0xFFFF) {
1615 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1616 phba->fcf.vlan_id = vlan_id;
1617 } 1828 }
1618 spin_unlock_irqrestore(&phba->hbalock, flags); 1829 spin_unlock_irq(&phba->hbalock);
1619 goto read_next_fcf; 1830 goto read_next_fcf;
1620 1831
1621read_next_fcf: 1832read_next_fcf:
1622 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1833 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1623 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) 1834 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
1624 lpfc_register_fcf(phba); 1835 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
1625 else 1836 /*
1626 lpfc_sli4_read_fcf_record(phba, next_fcf_index); 1837 * Case of FCF fast failover scan
1838 */
1839
1840 /*
1841 * It has not found any suitable FCF record, cancel
1842 * FCF scan inprogress, and do nothing
1843 */
1844 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
1845 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1846 "2782 No suitable FCF record "
1847 "found during this round of "
1848 "post FCF rediscovery scan: "
1849 "fcf_evt_tag:x%x, fcf_index: "
1850 "x%x\n",
1851 phba->fcoe_eventtag_at_fcf_scan,
1852 bf_get(lpfc_fcf_record_fcf_index,
1853 new_fcf_record));
1854 /*
1855 * Let next new FCF event trigger fast
1856 * failover
1857 */
1858 spin_lock_irq(&phba->hbalock);
1859 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1860 spin_unlock_irq(&phba->hbalock);
1861 return;
1862 }
1863 /*
1864 * It has found a suitable FCF record that is not
1865 * the same as in-use FCF record, unregister the
1866 * in-use FCF record, replace the in-use FCF record
1867 * with the new FCF record, mark FCF fast failover
1868 * completed, and then start register the new FCF
1869 * record.
1870 */
1871
1872 /* Unregister the current in-use FCF record */
1873 lpfc_unregister_fcf(phba);
1874
1875 /* Replace in-use record with the new record */
1876 memcpy(&phba->fcf.current_rec,
1877 &phba->fcf.failover_rec,
1878 sizeof(struct lpfc_fcf_rec));
1879 /* mark the FCF fast failover completed */
1880 spin_lock_irq(&phba->hbalock);
1881 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1882 spin_unlock_irq(&phba->hbalock);
1883 /*
1884 * Set up the initial registered FCF index for FLOGI
1885 * round robin FCF failover.
1886 */
1887 phba->fcf.fcf_rr_init_indx =
1888 phba->fcf.failover_rec.fcf_indx;
1889 /* Register to the new FCF record */
1890 lpfc_register_fcf(phba);
1891 } else {
1892 /*
1893 * In case of transaction period to fast FCF failover,
1894 * do nothing when search to the end of the FCF table.
1895 */
1896 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
1897 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
1898 return;
1899 /*
1900 * Otherwise, initial scan or post linkdown rescan,
1901 * register with the best FCF record found so far
1902 * through the FCF scanning process.
1903 */
1904
1905 /* mark the initial FCF discovery completed */
1906 spin_lock_irq(&phba->hbalock);
1907 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
1908 spin_unlock_irq(&phba->hbalock);
1909 /*
1910 * Set up the initial registered FCF index for FLOGI
1911 * round robin FCF failover
1912 */
1913 phba->fcf.fcf_rr_init_indx =
1914 phba->fcf.current_rec.fcf_indx;
1915 /* Register to the new FCF record */
1916 lpfc_register_fcf(phba);
1917 }
1918 } else
1919 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
1627 return; 1920 return;
1628 1921
1629out: 1922out:
@@ -1634,16 +1927,154 @@ out:
1634} 1927}
1635 1928
1636/** 1929/**
1930 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
1931 * @phba: pointer to lpfc hba data structure.
1932 * @mboxq: pointer to mailbox object.
1933 *
1934 * This is the callback function for FLOGI failure round robin FCF failover
1935 * read FCF record mailbox command from the eligible FCF record bmask for
1936 * performing the failover. If the FCF read back is not valid/available, it
1937 * fails through to retrying FLOGI to the currently registered FCF again.
1938 * Otherwise, if the FCF read back is valid and available, it will set the
1939 * newly read FCF record to the failover FCF record, unregister currently
1940 * registered FCF record, copy the failover FCF record to the current
1941 * FCF record, and then register the current FCF record before proceeding
1942 * to trying FLOGI on the new failover FCF.
1943 */
1944void
1945lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1946{
1947 struct fcf_record *new_fcf_record;
1948 uint32_t boot_flag, addr_mode;
1949 uint16_t next_fcf_index;
1950 uint16_t current_fcf_index;
1951 uint16_t vlan_id;
1952
1953 /* If link state is not up, stop the round robin failover process */
1954 if (phba->link_state < LPFC_LINK_UP) {
1955 spin_lock_irq(&phba->hbalock);
1956 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1957 spin_unlock_irq(&phba->hbalock);
1958 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1959 return;
1960 }
1961
1962 /* Parse the FCF record from the non-embedded mailbox command */
1963 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1964 &next_fcf_index);
1965 if (!new_fcf_record) {
1966 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1967 "2766 Mailbox command READ_FCF_RECORD "
1968 "failed to retrieve a FCF record.\n");
1969 goto out;
1970 }
1971
1972 /* Get the needed parameters from FCF record */
1973 lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1974 &addr_mode, &vlan_id);
1975
1976 /* Log the FCF record information if turned on */
1977 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1978 next_fcf_index);
1979
1980 /* Upload new FCF record to the failover FCF record */
1981 spin_lock_irq(&phba->hbalock);
1982 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
1983 new_fcf_record, addr_mode, vlan_id,
1984 (boot_flag ? BOOT_ENABLE : 0));
1985 spin_unlock_irq(&phba->hbalock);
1986
1987 current_fcf_index = phba->fcf.current_rec.fcf_indx;
1988
1989 /* Unregister the current in-use FCF record */
1990 lpfc_unregister_fcf(phba);
1991
1992 /* Replace in-use record with the new record */
1993 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
1994 sizeof(struct lpfc_fcf_rec));
1995
1996 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1997 "2783 FLOGI round robin FCF failover from FCF "
1998 "(index:x%x) to FCF (index:x%x).\n",
1999 current_fcf_index,
2000 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2001
2002out:
2003 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2004 lpfc_register_fcf(phba);
2005}
2006
2007/**
2008 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2009 * @phba: pointer to lpfc hba data structure.
2010 * @mboxq: pointer to mailbox object.
2011 *
2012 * This is the callback function of read FCF record mailbox command for
2013 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2014 * failover when a new FCF event happened. If the FCF read back is
2015 * valid/available and it passes the connection list check, it updates
2016 * the bmask for the eligible FCF record for round robin failover.
2017 */
2018void
2019lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2020{
2021 struct fcf_record *new_fcf_record;
2022 uint32_t boot_flag, addr_mode;
2023 uint16_t fcf_index, next_fcf_index;
2024 uint16_t vlan_id;
2025 int rc;
2026
2027 /* If link state is not up, no need to proceed */
2028 if (phba->link_state < LPFC_LINK_UP)
2029 goto out;
2030
2031 /* If FCF discovery period is over, no need to proceed */
2032 if (phba->fcf.fcf_flag & FCF_DISCOVERY)
2033 goto out;
2034
2035 /* Parse the FCF record from the non-embedded mailbox command */
2036 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2037 &next_fcf_index);
2038 if (!new_fcf_record) {
2039 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2040 "2767 Mailbox command READ_FCF_RECORD "
2041 "failed to retrieve a FCF record.\n");
2042 goto out;
2043 }
2044
2045 /* Check the connection list for eligibility */
2046 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2047 &addr_mode, &vlan_id);
2048
2049 /* Log the FCF record information if turned on */
2050 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2051 next_fcf_index);
2052
2053 if (!rc)
2054 goto out;
2055
2056 /* Update the eligible FCF record index bmask */
2057 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2058 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
2059
2060out:
2061 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2062}
2063
2064/**
1637 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 2065 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1638 * @phba: pointer to lpfc hba data structure. 2066 * @phba: pointer to lpfc hba data structure.
1639 * @mboxq: pointer to mailbox data structure. 2067 * @mboxq: pointer to mailbox data structure.
1640 * 2068 *
1641 * This function handles completion of init vpi mailbox command. 2069 * This function handles completion of init vpi mailbox command.
1642 */ 2070 */
1643static void 2071void
1644lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2072lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1645{ 2073{
1646 struct lpfc_vport *vport = mboxq->vport; 2074 struct lpfc_vport *vport = mboxq->vport;
2075 struct lpfc_nodelist *ndlp;
2076 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2077
1647 if (mboxq->u.mb.mbxStatus) { 2078 if (mboxq->u.mb.mbxStatus) {
1648 lpfc_printf_vlog(vport, KERN_ERR, 2079 lpfc_printf_vlog(vport, KERN_ERR,
1649 LOG_MBOX, 2080 LOG_MBOX,
@@ -1653,20 +2084,67 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1653 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2084 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1654 return; 2085 return;
1655 } 2086 }
2087 spin_lock_irq(shost->host_lock);
1656 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 2088 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2089 spin_unlock_irq(shost->host_lock);
2090
2091 /* If this port is physical port or FDISC is done, do reg_vpi */
2092 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2093 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2094 if (!ndlp)
2095 lpfc_printf_vlog(vport, KERN_ERR,
2096 LOG_DISCOVERY,
2097 "2731 Cannot find fabric "
2098 "controller node\n");
2099 else
2100 lpfc_register_new_vport(phba, vport, ndlp);
2101 mempool_free(mboxq, phba->mbox_mem_pool);
2102 return;
2103 }
1657 2104
1658 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 2105 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1659 lpfc_initial_fdisc(vport); 2106 lpfc_initial_fdisc(vport);
1660 else { 2107 else {
1661 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 2108 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
1662 lpfc_printf_vlog(vport, KERN_ERR, 2109 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1663 LOG_ELS, 2110 "2606 No NPIV Fabric support\n");
1664 "2606 No NPIV Fabric support\n");
1665 } 2111 }
2112 mempool_free(mboxq, phba->mbox_mem_pool);
1666 return; 2113 return;
1667} 2114}
1668 2115
1669/** 2116/**
2117 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2118 * @vport: pointer to lpfc_vport data structure.
2119 *
2120 * This function issue a init_vpi mailbox command to initialize
2121 * VPI for the vport.
2122 */
2123void
2124lpfc_issue_init_vpi(struct lpfc_vport *vport)
2125{
2126 LPFC_MBOXQ_t *mboxq;
2127 int rc;
2128
2129 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2130 if (!mboxq) {
2131 lpfc_printf_vlog(vport, KERN_ERR,
2132 LOG_MBOX, "2607 Failed to allocate "
2133 "init_vpi mailbox\n");
2134 return;
2135 }
2136 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2137 mboxq->vport = vport;
2138 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2139 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2140 if (rc == MBX_NOT_FINISHED) {
2141 lpfc_printf_vlog(vport, KERN_ERR,
2142 LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
2143 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2144 }
2145}
2146
2147/**
1670 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 2148 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1671 * @phba: pointer to lpfc hba data structure. 2149 * @phba: pointer to lpfc hba data structure.
1672 * 2150 *
@@ -1678,8 +2156,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1678{ 2156{
1679 struct lpfc_vport **vports; 2157 struct lpfc_vport **vports;
1680 int i; 2158 int i;
1681 LPFC_MBOXQ_t *mboxq;
1682 int rc;
1683 2159
1684 vports = lpfc_create_vport_work_array(phba); 2160 vports = lpfc_create_vport_work_array(phba);
1685 if (vports != NULL) { 2161 if (vports != NULL) {
@@ -1698,26 +2174,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1698 continue; 2174 continue;
1699 } 2175 }
1700 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { 2176 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
1701 mboxq = mempool_alloc(phba->mbox_mem_pool, 2177 lpfc_issue_init_vpi(vports[i]);
1702 GFP_KERNEL);
1703 if (!mboxq) {
1704 lpfc_printf_vlog(vports[i], KERN_ERR,
1705 LOG_MBOX, "2607 Failed to allocate "
1706 "init_vpi mailbox\n");
1707 continue;
1708 }
1709 lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
1710 mboxq->vport = vports[i];
1711 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
1712 rc = lpfc_sli_issue_mbox(phba, mboxq,
1713 MBX_NOWAIT);
1714 if (rc == MBX_NOT_FINISHED) {
1715 lpfc_printf_vlog(vports[i], KERN_ERR,
1716 LOG_MBOX, "2608 Failed to issue "
1717 "init_vpi mailbox\n");
1718 mempool_free(mboxq,
1719 phba->mbox_mem_pool);
1720 }
1721 continue; 2178 continue;
1722 } 2179 }
1723 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 2180 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
@@ -1740,6 +2197,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1740{ 2197{
1741 struct lpfc_dmabuf *dmabuf = mboxq->context1; 2198 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1742 struct lpfc_vport *vport = mboxq->vport; 2199 struct lpfc_vport *vport = mboxq->vport;
2200 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1743 2201
1744 if (mboxq->u.mb.mbxStatus) { 2202 if (mboxq->u.mb.mbxStatus) {
1745 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2203 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1756,8 +2214,12 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1756 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2214 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1757 goto fail_free_mem; 2215 goto fail_free_mem;
1758 } 2216 }
1759 /* Mark the vport has registered with its VFI */ 2217 /* The VPI is implicitly registered when the VFI is registered */
1760 vport->vfi_state |= LPFC_VFI_REGISTERED; 2218 spin_lock_irq(shost->host_lock);
2219 vport->vpi_state |= LPFC_VPI_REGISTERED;
2220 vport->fc_flag |= FC_VFI_REGISTERED;
2221 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2222 spin_unlock_irq(shost->host_lock);
1761 2223
1762 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2224 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1763 lpfc_start_fdiscs(phba); 2225 lpfc_start_fdiscs(phba);
@@ -1831,8 +2293,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1831 int rc; 2293 int rc;
1832 struct fcf_record *fcf_record; 2294 struct fcf_record *fcf_record;
1833 2295
1834 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1835
1836 spin_lock_irq(&phba->hbalock); 2296 spin_lock_irq(&phba->hbalock);
1837 switch (la->UlnkSpeed) { 2297 switch (la->UlnkSpeed) {
1838 case LA_1GHZ_LINK: 2298 case LA_1GHZ_LINK:
@@ -1861,7 +2321,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1861 if (phba->fc_topology == TOPOLOGY_LOOP) { 2321 if (phba->fc_topology == TOPOLOGY_LOOP) {
1862 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 2322 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
1863 2323
1864 if (phba->cfg_enable_npiv) 2324 /* if npiv is enabled and this adapter supports npiv log
2325 * a message that npiv is not supported in this topology
2326 */
2327 if (phba->cfg_enable_npiv && phba->max_vpi)
1865 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 2328 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1866 "1309 Link Up Event npiv not supported in loop " 2329 "1309 Link Up Event npiv not supported in loop "
1867 "topology\n"); 2330 "topology\n");
@@ -1921,18 +2384,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1921 spin_unlock_irq(&phba->hbalock); 2384 spin_unlock_irq(&phba->hbalock);
1922 2385
1923 lpfc_linkup(phba); 2386 lpfc_linkup(phba);
1924 if (sparam_mbox) { 2387 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1925 lpfc_read_sparam(phba, sparam_mbox, 0); 2388 if (!sparam_mbox)
1926 sparam_mbox->vport = vport; 2389 goto out;
1927 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 2390
1928 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 2391 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
1929 if (rc == MBX_NOT_FINISHED) { 2392 if (rc) {
1930 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 2393 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1931 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2394 goto out;
1932 kfree(mp); 2395 }
1933 mempool_free(sparam_mbox, phba->mbox_mem_pool); 2396 sparam_mbox->vport = vport;
1934 goto out; 2397 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1935 } 2398 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
2399 if (rc == MBX_NOT_FINISHED) {
2400 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
2401 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2402 kfree(mp);
2403 mempool_free(sparam_mbox, phba->mbox_mem_pool);
2404 goto out;
1936 } 2405 }
1937 2406
1938 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { 2407 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
@@ -1955,7 +2424,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1955 * is phase 1 implementation that support FCF index 0 and driver 2424 * is phase 1 implementation that support FCF index 0 and driver
1956 * defaults. 2425 * defaults.
1957 */ 2426 */
1958 if (phba->cfg_enable_fip == 0) { 2427 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1959 fcf_record = kzalloc(sizeof(struct fcf_record), 2428 fcf_record = kzalloc(sizeof(struct fcf_record),
1960 GFP_KERNEL); 2429 GFP_KERNEL);
1961 if (unlikely(!fcf_record)) { 2430 if (unlikely(!fcf_record)) {
@@ -1990,11 +2459,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1990 spin_unlock_irq(&phba->hbalock); 2459 spin_unlock_irq(&phba->hbalock);
1991 return; 2460 return;
1992 } 2461 }
2462 /* This is the initial FCF discovery scan */
2463 phba->fcf.fcf_flag |= FCF_INIT_DISC;
1993 spin_unlock_irq(&phba->hbalock); 2464 spin_unlock_irq(&phba->hbalock);
1994 rc = lpfc_sli4_read_fcf_record(phba, 2465 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1995 LPFC_FCOE_FCF_GET_FIRST); 2466 "2778 Start FCF table scan at linkup\n");
1996 if (rc) 2467
2468 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2469 LPFC_FCOE_FCF_GET_FIRST);
2470 if (rc) {
2471 spin_lock_irq(&phba->hbalock);
2472 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
2473 spin_unlock_irq(&phba->hbalock);
1997 goto out; 2474 goto out;
2475 }
1998 } 2476 }
1999 2477
2000 return; 2478 return;
@@ -2080,11 +2558,14 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2080 } 2558 }
2081 2559
2082 phba->fc_eventTag = la->eventTag; 2560 phba->fc_eventTag = la->eventTag;
2561 spin_lock_irq(&phba->hbalock);
2083 if (la->mm) 2562 if (la->mm)
2084 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 2563 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
2085 else 2564 else
2086 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 2565 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
2566 spin_unlock_irq(&phba->hbalock);
2087 2567
2568 phba->link_events++;
2088 if (la->attType == AT_LINK_UP && (!la->mm)) { 2569 if (la->attType == AT_LINK_UP && (!la->mm)) {
2089 phba->fc_stat.LinkUp++; 2570 phba->fc_stat.LinkUp++;
2090 if (phba->link_flag & LS_LOOPBACK_MODE) { 2571 if (phba->link_flag & LS_LOOPBACK_MODE) {
@@ -2211,13 +2692,17 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2211 mb->mbxStatus); 2692 mb->mbxStatus);
2212 break; 2693 break;
2213 } 2694 }
2695 spin_lock_irq(shost->host_lock);
2696 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
2697 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2698 spin_unlock_irq(shost->host_lock);
2214 vport->unreg_vpi_cmpl = VPORT_OK; 2699 vport->unreg_vpi_cmpl = VPORT_OK;
2215 mempool_free(pmb, phba->mbox_mem_pool); 2700 mempool_free(pmb, phba->mbox_mem_pool);
2216 /* 2701 /*
2217 * This shost reference might have been taken at the beginning of 2702 * This shost reference might have been taken at the beginning of
2218 * lpfc_vport_delete() 2703 * lpfc_vport_delete()
2219 */ 2704 */
2220 if (vport->load_flag & FC_UNLOADING) 2705 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
2221 scsi_host_put(shost); 2706 scsi_host_put(shost);
2222} 2707}
2223 2708
@@ -2268,6 +2753,10 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2268 goto out; 2753 goto out;
2269 } 2754 }
2270 2755
2756 spin_lock_irq(shost->host_lock);
2757 vport->vpi_state |= LPFC_VPI_REGISTERED;
2758 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2759 spin_unlock_irq(shost->host_lock);
2271 vport->num_disc_nodes = 0; 2760 vport->num_disc_nodes = 0;
2272 /* go thru NPR list and issue ELS PLOGIs */ 2761 /* go thru NPR list and issue ELS PLOGIs */
2273 if (vport->fc_npr_cnt) 2762 if (vport->fc_npr_cnt)
@@ -3077,7 +3566,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3077 struct lpfc_sli *psli; 3566 struct lpfc_sli *psli;
3078 struct lpfc_sli_ring *pring; 3567 struct lpfc_sli_ring *pring;
3079 struct lpfc_iocbq *iocb, *next_iocb; 3568 struct lpfc_iocbq *iocb, *next_iocb;
3080 uint32_t rpi, i; 3569 uint32_t i;
3081 3570
3082 lpfc_fabric_abort_nport(ndlp); 3571 lpfc_fabric_abort_nport(ndlp);
3083 3572
@@ -3086,7 +3575,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3086 * by firmware with a no rpi error. 3575 * by firmware with a no rpi error.
3087 */ 3576 */
3088 psli = &phba->sli; 3577 psli = &phba->sli;
3089 rpi = ndlp->nlp_rpi;
3090 if (ndlp->nlp_flag & NLP_RPI_VALID) { 3578 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3091 /* Now process each ring */ 3579 /* Now process each ring */
3092 for (i = 0; i < psli->num_rings; i++) { 3580 for (i = 0; i < psli->num_rings; i++) {
@@ -3154,6 +3642,38 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3154 return 0; 3642 return 0;
3155} 3643}
3156 3644
3645/**
3646 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
3647 * @phba: pointer to lpfc hba data structure.
3648 *
3649 * This routine is invoked to unregister all the currently registered RPIs
3650 * to the HBA.
3651 **/
3652void
3653lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
3654{
3655 struct lpfc_vport **vports;
3656 struct lpfc_nodelist *ndlp;
3657 struct Scsi_Host *shost;
3658 int i;
3659
3660 vports = lpfc_create_vport_work_array(phba);
3661 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3662 shost = lpfc_shost_from_vport(vports[i]);
3663 spin_lock_irq(shost->host_lock);
3664 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
3665 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3666 /* The mempool_alloc might sleep */
3667 spin_unlock_irq(shost->host_lock);
3668 lpfc_unreg_rpi(vports[i], ndlp);
3669 spin_lock_irq(shost->host_lock);
3670 }
3671 }
3672 spin_unlock_irq(shost->host_lock);
3673 }
3674 lpfc_destroy_vport_work_array(phba, vports);
3675}
3676
3157void 3677void
3158lpfc_unreg_all_rpis(struct lpfc_vport *vport) 3678lpfc_unreg_all_rpis(struct lpfc_vport *vport)
3159{ 3679{
@@ -4322,6 +4842,14 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
4322 ret = 1; 4842 ret = 1;
4323 spin_unlock_irq(shost->host_lock); 4843 spin_unlock_irq(shost->host_lock);
4324 goto out; 4844 goto out;
4845 } else {
4846 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
4847 "2624 RPI %x DID %x flg %x still "
4848 "logged in\n",
4849 ndlp->nlp_rpi, ndlp->nlp_DID,
4850 ndlp->nlp_flag);
4851 if (ndlp->nlp_flag & NLP_RPI_VALID)
4852 ret = 1;
4325 } 4853 }
4326 } 4854 }
4327 spin_unlock_irq(shost->host_lock); 4855 spin_unlock_irq(shost->host_lock);
@@ -4376,120 +4904,231 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4376} 4904}
4377 4905
4378/** 4906/**
4379 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. 4907 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
4380 * @phba: Pointer to hba context object. 4908 * @phba: Pointer to hba context object.
4381 * 4909 *
4382 * This function check if there are any connected remote port for the FCF and 4910 * This function prepare the HBA for unregistering the currently registered
4383 * if all the devices are disconnected, this function unregister FCFI. 4911 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
4384 * This function also tries to use another FCF for discovery. 4912 * VFIs.
4385 */ 4913 */
4386void 4914int
4387lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 4915lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
4388{ 4916{
4389 LPFC_MBOXQ_t *mbox; 4917 LPFC_MBOXQ_t *mbox;
4390 int rc;
4391 struct lpfc_vport **vports; 4918 struct lpfc_vport **vports;
4392 int i; 4919 struct lpfc_nodelist *ndlp;
4393 4920 struct Scsi_Host *shost;
4394 spin_lock_irq(&phba->hbalock); 4921 int i, rc;
4395 /*
4396 * If HBA is not running in FIP mode or
4397 * If HBA does not support FCoE or
4398 * If FCF is not registered.
4399 * do nothing.
4400 */
4401 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4402 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4403 (phba->cfg_enable_fip == 0)) {
4404 spin_unlock_irq(&phba->hbalock);
4405 return;
4406 }
4407 spin_unlock_irq(&phba->hbalock);
4408 4922
4923 /* Unregister RPIs */
4409 if (lpfc_fcf_inuse(phba)) 4924 if (lpfc_fcf_inuse(phba))
4410 return; 4925 lpfc_unreg_hba_rpis(phba);
4411 4926
4927 /* At this point, all discovery is aborted */
4928 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4412 4929
4413 /* Unregister VPIs */ 4930 /* Unregister VPIs */
4414 vports = lpfc_create_vport_work_array(phba); 4931 vports = lpfc_create_vport_work_array(phba);
4415 if (vports && 4932 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4416 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4933 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4934 /* Stop FLOGI/FDISC retries */
4935 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
4936 if (ndlp)
4937 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
4418 lpfc_mbx_unreg_vpi(vports[i]); 4938 lpfc_mbx_unreg_vpi(vports[i]);
4419 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4939 shost = lpfc_shost_from_vport(vports[i]);
4420 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 4940 spin_lock_irq(shost->host_lock);
4941 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4942 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
4943 spin_unlock_irq(shost->host_lock);
4421 } 4944 }
4422 lpfc_destroy_vport_work_array(phba, vports); 4945 lpfc_destroy_vport_work_array(phba, vports);
4423 4946
4947 /* Cleanup any outstanding ELS commands */
4948 lpfc_els_flush_all_cmd(phba);
4949
4424 /* Unregister VFI */ 4950 /* Unregister VFI */
4425 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4951 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4426 if (!mbox) { 4952 if (!mbox) {
4427 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4953 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4428 "2556 UNREG_VFI mbox allocation failed" 4954 "2556 UNREG_VFI mbox allocation failed"
4429 "HBA state x%x\n", 4955 "HBA state x%x\n", phba->pport->port_state);
4430 phba->pport->port_state); 4956 return -ENOMEM;
4431 return;
4432 } 4957 }
4433 4958
4434 lpfc_unreg_vfi(mbox, phba->pport->vfi); 4959 lpfc_unreg_vfi(mbox, phba->pport);
4435 mbox->vport = phba->pport; 4960 mbox->vport = phba->pport;
4436 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; 4961 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4437 4962
4438 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4963 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4439 if (rc == MBX_NOT_FINISHED) { 4964 if (rc == MBX_NOT_FINISHED) {
4440 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4965 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4441 "2557 UNREG_VFI issue mbox failed rc x%x " 4966 "2557 UNREG_VFI issue mbox failed rc x%x "
4442 "HBA state x%x\n", 4967 "HBA state x%x\n",
4443 rc, phba->pport->port_state); 4968 rc, phba->pport->port_state);
4444 mempool_free(mbox, phba->mbox_mem_pool); 4969 mempool_free(mbox, phba->mbox_mem_pool);
4445 return; 4970 return -EIO;
4446 } 4971 }
4447 4972
4448 /* Unregister FCF */ 4973 shost = lpfc_shost_from_vport(phba->pport);
4974 spin_lock_irq(shost->host_lock);
4975 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
4976 spin_unlock_irq(shost->host_lock);
4977
4978 return 0;
4979}
4980
4981/**
4982 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
4983 * @phba: Pointer to hba context object.
4984 *
4985 * This function issues synchronous unregister FCF mailbox command to HBA to
4986 * unregister the currently registered FCF record. The driver does not reset
4987 * the driver FCF usage state flags.
4988 *
4989 * Return 0 if successfully issued, none-zero otherwise.
4990 */
4991int
4992lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
4993{
4994 LPFC_MBOXQ_t *mbox;
4995 int rc;
4996
4449 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4997 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4450 if (!mbox) { 4998 if (!mbox) {
4451 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4999 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4452 "2551 UNREG_FCFI mbox allocation failed" 5000 "2551 UNREG_FCFI mbox allocation failed"
4453 "HBA state x%x\n", 5001 "HBA state x%x\n", phba->pport->port_state);
4454 phba->pport->port_state); 5002 return -ENOMEM;
4455 return;
4456 } 5003 }
4457
4458 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 5004 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4459 mbox->vport = phba->pport; 5005 mbox->vport = phba->pport;
4460 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 5006 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4461 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5007 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4462 5008
4463 if (rc == MBX_NOT_FINISHED) { 5009 if (rc == MBX_NOT_FINISHED) {
4464 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5010 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4465 "2552 UNREG_FCFI issue mbox failed rc x%x " 5011 "2552 Unregister FCFI command failed rc x%x "
4466 "HBA state x%x\n", 5012 "HBA state x%x\n",
4467 rc, phba->pport->port_state); 5013 rc, phba->pport->port_state);
4468 mempool_free(mbox, phba->mbox_mem_pool); 5014 return -EINVAL;
5015 }
5016 return 0;
5017}
5018
5019/**
5020 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5021 * @phba: Pointer to hba context object.
5022 *
5023 * This function unregisters the currently reigstered FCF. This function
5024 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5025 */
5026void
5027lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
5028{
5029 int rc;
5030
5031 /* Preparation for unregistering fcf */
5032 rc = lpfc_unregister_fcf_prep(phba);
5033 if (rc) {
5034 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
5035 "2748 Failed to prepare for unregistering "
5036 "HBA's FCF record: rc=%d\n", rc);
4469 return; 5037 return;
4470 } 5038 }
4471 5039
4472 spin_lock_irq(&phba->hbalock); 5040 /* Now, unregister FCF record and reset HBA FCF state */
4473 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | 5041 rc = lpfc_sli4_unregister_fcf(phba);
4474 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | 5042 if (rc)
4475 FCF_VALID_VLAN); 5043 return;
4476 spin_unlock_irq(&phba->hbalock); 5044 /* Reset HBA FCF states after successful unregister FCF */
5045 phba->fcf.fcf_flag = 0;
5046 phba->fcf.current_rec.flag = 0;
4477 5047
4478 /* 5048 /*
4479 * If driver is not unloading, check if there is any other 5049 * If driver is not unloading, check if there is any other
4480 * FCF record that can be used for discovery. 5050 * FCF record that can be used for discovery.
4481 */ 5051 */
4482 if ((phba->pport->load_flag & FC_UNLOADING) || 5052 if ((phba->pport->load_flag & FC_UNLOADING) ||
4483 (phba->link_state < LPFC_LINK_UP)) 5053 (phba->link_state < LPFC_LINK_UP))
4484 return; 5054 return;
4485 5055
4486 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 5056 /* This is considered as the initial FCF discovery scan */
5057 spin_lock_irq(&phba->hbalock);
5058 phba->fcf.fcf_flag |= FCF_INIT_DISC;
5059 spin_unlock_irq(&phba->hbalock);
5060 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4487 5061
4488 if (rc) 5062 if (rc) {
5063 spin_lock_irq(&phba->hbalock);
5064 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
5065 spin_unlock_irq(&phba->hbalock);
4489 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5066 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4490 "2553 lpfc_unregister_unused_fcf failed to read FCF" 5067 "2553 lpfc_unregister_unused_fcf failed "
4491 " record HBA state x%x\n", 5068 "to read FCF record HBA state x%x\n",
4492 phba->pport->port_state); 5069 phba->pport->port_state);
5070 }
5071}
5072
5073/**
5074 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5075 * @phba: Pointer to hba context object.
5076 *
5077 * This function just unregisters the currently reigstered FCF. It does not
5078 * try to find another FCF for discovery.
5079 */
5080void
5081lpfc_unregister_fcf(struct lpfc_hba *phba)
5082{
5083 int rc;
5084
5085 /* Preparation for unregistering fcf */
5086 rc = lpfc_unregister_fcf_prep(phba);
5087 if (rc) {
5088 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
5089 "2749 Failed to prepare for unregistering "
5090 "HBA's FCF record: rc=%d\n", rc);
5091 return;
5092 }
5093
5094 /* Now, unregister FCF record and reset HBA FCF state */
5095 rc = lpfc_sli4_unregister_fcf(phba);
5096 if (rc)
5097 return;
5098 /* Set proper HBA FCF states after successful unregister FCF */
5099 spin_lock_irq(&phba->hbalock);
5100 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
5101 spin_unlock_irq(&phba->hbalock);
5102}
5103
5104/**
5105 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
5106 * @phba: Pointer to hba context object.
5107 *
5108 * This function check if there are any connected remote port for the FCF and
5109 * if all the devices are disconnected, this function unregister FCFI.
5110 * This function also tries to use another FCF for discovery.
5111 */
5112void
5113lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
5114{
5115 /*
5116 * If HBA is not running in FIP mode or if HBA does not support
5117 * FCoE or if FCF is not registered, do nothing.
5118 */
5119 spin_lock_irq(&phba->hbalock);
5120 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
5121 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
5122 !(phba->hba_flag & HBA_FIP_SUPPORT)) {
5123 spin_unlock_irq(&phba->hbalock);
5124 return;
5125 }
5126 spin_unlock_irq(&phba->hbalock);
5127
5128 if (lpfc_fcf_inuse(phba))
5129 return;
5130
5131 lpfc_unregister_fcf_rescan(phba);
4493} 5132}
4494 5133
4495/** 5134/**
@@ -4512,8 +5151,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4512 5151
4513 /* Free the current connect table */ 5152 /* Free the current connect table */
4514 list_for_each_entry_safe(conn_entry, next_conn_entry, 5153 list_for_each_entry_safe(conn_entry, next_conn_entry,
4515 &phba->fcf_conn_rec_list, list) 5154 &phba->fcf_conn_rec_list, list) {
5155 list_del_init(&conn_entry->list);
4516 kfree(conn_entry); 5156 kfree(conn_entry);
5157 }
4517 5158
4518 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 5159 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4519 record_count = conn_hdr->length * sizeof(uint32_t)/ 5160 record_count = conn_hdr->length * sizeof(uint32_t)/
@@ -4569,14 +5210,6 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
4569 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 5210 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4570 return; 5211 return;
4571 5212
4572 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4573 FIPP_MODE_ON)
4574 phba->cfg_enable_fip = 1;
4575
4576 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4577 FIPP_MODE_OFF)
4578 phba->cfg_enable_fip = 0;
4579
4580 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 5213 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4581 phba->valid_vlan = 1; 5214 phba->valid_vlan = 1;
4582 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 5215 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ccb26724dc53..89ff7c09e298 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -1124,21 +1124,6 @@ typedef struct {
1124/* Number of 4-byte words in an IOCB. */ 1124/* Number of 4-byte words in an IOCB. */
1125#define IOCB_WORD_SZ 8 1125#define IOCB_WORD_SZ 8
1126 1126
1127/* defines for type field in fc header */
1128#define FC_ELS_DATA 0x1
1129#define FC_LLC_SNAP 0x5
1130#define FC_FCP_DATA 0x8
1131#define FC_COMMON_TRANSPORT_ULP 0x20
1132
1133/* defines for rctl field in fc header */
1134#define FC_DEV_DATA 0x0
1135#define FC_UNSOL_CTL 0x2
1136#define FC_SOL_CTL 0x3
1137#define FC_UNSOL_DATA 0x4
1138#define FC_FCP_CMND 0x6
1139#define FC_ELS_REQ 0x22
1140#define FC_ELS_RSP 0x23
1141
1142/* network headers for Dfctl field */ 1127/* network headers for Dfctl field */
1143#define FC_NET_HDR 0x20 1128#define FC_NET_HDR 0x20
1144 1129
@@ -1183,6 +1168,8 @@ typedef struct {
1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1168#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 1169#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1170#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1171#define PCI_DEVICE_ID_TOMCAT 0x0714
1172#define PCI_DEVICE_ID_FALCON 0xf180
1186 1173
1187#define JEDEC_ID_ADDRESS 0x0080001c 1174#define JEDEC_ID_ADDRESS 0x0080001c
1188#define FIREFLY_JEDEC_ID 0x1ACC 1175#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1359,6 +1346,9 @@ typedef struct { /* FireFly BIU registers */
1359#define MBX_HEARTBEAT 0x31 1346#define MBX_HEARTBEAT 0x31
1360#define MBX_WRITE_VPARMS 0x32 1347#define MBX_WRITE_VPARMS 0x32
1361#define MBX_ASYNCEVT_ENABLE 0x33 1348#define MBX_ASYNCEVT_ENABLE 0x33
1349#define MBX_READ_EVENT_LOG_STATUS 0x37
1350#define MBX_READ_EVENT_LOG 0x38
1351#define MBX_WRITE_EVENT_LOG 0x39
1362 1352
1363#define MBX_PORT_CAPABILITIES 0x3B 1353#define MBX_PORT_CAPABILITIES 0x3B
1364#define MBX_PORT_IOV_CONTROL 0x3C 1354#define MBX_PORT_IOV_CONTROL 0x3C
@@ -1444,6 +1434,7 @@ typedef struct { /* FireFly BIU registers */
1444#define CMD_ABORT_MXRI64_CN 0x8C 1434#define CMD_ABORT_MXRI64_CN 0x8C
1445#define CMD_RCV_ELS_REQ64_CX 0x8D 1435#define CMD_RCV_ELS_REQ64_CX 0x8D
1446#define CMD_XMIT_ELS_RSP64_CX 0x95 1436#define CMD_XMIT_ELS_RSP64_CX 0x95
1437#define CMD_XMIT_BLS_RSP64_CX 0x97
1447#define CMD_FCP_IWRITE64_CR 0x98 1438#define CMD_FCP_IWRITE64_CR 0x98
1448#define CMD_FCP_IWRITE64_CX 0x99 1439#define CMD_FCP_IWRITE64_CX 0x99
1449#define CMD_FCP_IREAD64_CR 0x9A 1440#define CMD_FCP_IREAD64_CR 0x9A
@@ -1477,17 +1468,13 @@ typedef struct { /* FireFly BIU registers */
1477#define CMD_IOCB_LOGENTRY_CN 0x94 1468#define CMD_IOCB_LOGENTRY_CN 0x94
1478#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1469#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1479 1470
1480/* Unhandled Data Security SLI Commands */ 1471/* Data Security SLI Commands */
1481#define DSSCMD_IWRITE64_CR 0xD8 1472#define DSSCMD_IWRITE64_CR 0xF8
1482#define DSSCMD_IWRITE64_CX 0xD9 1473#define DSSCMD_IWRITE64_CX 0xF9
1483#define DSSCMD_IREAD64_CR 0xDA 1474#define DSSCMD_IREAD64_CR 0xFA
1484#define DSSCMD_IREAD64_CX 0xDB 1475#define DSSCMD_IREAD64_CX 0xFB
1485#define DSSCMD_INVALIDATE_DEK 0xDC 1476
1486#define DSSCMD_SET_KEK 0xDD 1477#define CMD_MAX_IOCB_CMD 0xFB
1487#define DSSCMD_GET_KEK_ID 0xDE
1488#define DSSCMD_GEN_XFER 0xDF
1489
1490#define CMD_MAX_IOCB_CMD 0xE6
1491#define CMD_IOCB_MASK 0xff 1478#define CMD_IOCB_MASK 0xff
1492 1479
1493#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG 1480#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
@@ -2306,8 +2293,7 @@ typedef struct {
2306 uint32_t rsvd1; 2293 uint32_t rsvd1;
2307 uint32_t rsvd2:8; 2294 uint32_t rsvd2:8;
2308 uint32_t sid:24; 2295 uint32_t sid:24;
2309 uint32_t rsvd3; 2296 uint32_t wwn[2];
2310 uint32_t rsvd4;
2311 uint32_t rsvd5; 2297 uint32_t rsvd5;
2312 uint16_t vfi; 2298 uint16_t vfi;
2313 uint16_t vpi; 2299 uint16_t vpi;
@@ -2315,8 +2301,7 @@ typedef struct {
2315 uint32_t rsvd1; 2301 uint32_t rsvd1;
2316 uint32_t sid:24; 2302 uint32_t sid:24;
2317 uint32_t rsvd2:8; 2303 uint32_t rsvd2:8;
2318 uint32_t rsvd3; 2304 uint32_t wwn[2];
2319 uint32_t rsvd4;
2320 uint32_t rsvd5; 2305 uint32_t rsvd5;
2321 uint16_t vpi; 2306 uint16_t vpi;
2322 uint16_t vfi; 2307 uint16_t vfi;
@@ -2326,7 +2311,13 @@ typedef struct {
2326/* Structure for MB Command UNREG_VPI (0x97) */ 2311/* Structure for MB Command UNREG_VPI (0x97) */
2327typedef struct { 2312typedef struct {
2328 uint32_t rsvd1; 2313 uint32_t rsvd1;
2329 uint32_t rsvd2; 2314#ifdef __BIG_ENDIAN_BITFIELD
2315 uint16_t rsvd2;
2316 uint16_t sli4_vpi;
2317#else /* __LITTLE_ENDIAN */
2318 uint16_t sli4_vpi;
2319 uint16_t rsvd2;
2320#endif
2330 uint32_t rsvd3; 2321 uint32_t rsvd3;
2331 uint32_t rsvd4; 2322 uint32_t rsvd4;
2332 uint32_t rsvd5; 2323 uint32_t rsvd5;
@@ -3547,7 +3538,7 @@ typedef struct _IOCB { /* IOCB structure */
3547 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ 3538 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
3548 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ 3539 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
3549 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ 3540 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
3550 3541 struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */
3551 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ 3542 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
3552 } un; 3543 } un;
3553 union { 3544 union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 3689eee04535..820015fbc4d6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,35 +52,37 @@ struct dma_address {
52 uint32_t addr_hi; 52 uint32_t addr_hi;
53}; 53};
54 54
55#define LPFC_SLIREV_CONF_WORD 0x58
56struct lpfc_sli_intf { 55struct lpfc_sli_intf {
57 uint32_t word0; 56 uint32_t word0;
58#define lpfc_sli_intf_iftype_MASK 0x00000007 57#define lpfc_sli_intf_valid_SHIFT 29
59#define lpfc_sli_intf_iftype_SHIFT 0 58#define lpfc_sli_intf_valid_MASK 0x00000007
60#define lpfc_sli_intf_iftype_WORD word0 59#define lpfc_sli_intf_valid_WORD word0
61#define lpfc_sli_intf_rev_MASK 0x0000000f
62#define lpfc_sli_intf_rev_SHIFT 4
63#define lpfc_sli_intf_rev_WORD word0
64#define LPFC_SLIREV_CONF_SLI4 4
65#define lpfc_sli_intf_family_MASK 0x000000ff
66#define lpfc_sli_intf_family_SHIFT 8
67#define lpfc_sli_intf_family_WORD word0
68#define lpfc_sli_intf_feat1_MASK 0x000000ff
69#define lpfc_sli_intf_feat1_SHIFT 16
70#define lpfc_sli_intf_feat1_WORD word0
71#define lpfc_sli_intf_feat2_MASK 0x0000001f
72#define lpfc_sli_intf_feat2_SHIFT 24
73#define lpfc_sli_intf_feat2_WORD word0
74#define lpfc_sli_intf_valid_MASK 0x00000007
75#define lpfc_sli_intf_valid_SHIFT 29
76#define lpfc_sli_intf_valid_WORD word0
77#define LPFC_SLI_INTF_VALID 6 60#define LPFC_SLI_INTF_VALID 6
61#define lpfc_sli_intf_featurelevel2_SHIFT 24
62#define lpfc_sli_intf_featurelevel2_MASK 0x0000001F
63#define lpfc_sli_intf_featurelevel2_WORD word0
64#define lpfc_sli_intf_featurelevel1_SHIFT 16
65#define lpfc_sli_intf_featurelevel1_MASK 0x000000FF
66#define lpfc_sli_intf_featurelevel1_WORD word0
67#define LPFC_SLI_INTF_FEATURELEVEL1_1 1
68#define LPFC_SLI_INTF_FEATURELEVEL1_2 2
69#define lpfc_sli_intf_sli_family_SHIFT 8
70#define lpfc_sli_intf_sli_family_MASK 0x000000FF
71#define lpfc_sli_intf_sli_family_WORD word0
72#define LPFC_SLI_INTF_FAMILY_BE2 0
73#define LPFC_SLI_INTF_FAMILY_BE3 1
74#define lpfc_sli_intf_slirev_SHIFT 4
75#define lpfc_sli_intf_slirev_MASK 0x0000000F
76#define lpfc_sli_intf_slirev_WORD word0
77#define LPFC_SLI_INTF_REV_SLI3 3
78#define LPFC_SLI_INTF_REV_SLI4 4
79#define lpfc_sli_intf_if_type_SHIFT 0
80#define lpfc_sli_intf_if_type_MASK 0x00000007
81#define lpfc_sli_intf_if_type_WORD word0
82#define LPFC_SLI_INTF_IF_TYPE_0 0
83#define LPFC_SLI_INTF_IF_TYPE_1 1
78}; 84};
79 85
80#define LPFC_SLI4_BAR0 1
81#define LPFC_SLI4_BAR1 2
82#define LPFC_SLI4_BAR2 4
83
84#define LPFC_SLI4_MBX_EMBED true 86#define LPFC_SLI4_MBX_EMBED true
85#define LPFC_SLI4_MBX_NEMBED false 87#define LPFC_SLI4_MBX_NEMBED false
86 88
@@ -161,6 +163,9 @@ struct lpfc_sli_intf {
161#define LPFC_FP_DEF_IMAX 10000 163#define LPFC_FP_DEF_IMAX 10000
162#define LPFC_SP_DEF_IMAX 10000 164#define LPFC_SP_DEF_IMAX 10000
163 165
166/* PORT_CAPABILITIES constants. */
167#define LPFC_MAX_SUPPORTED_PAGES 8
168
164struct ulp_bde64 { 169struct ulp_bde64 {
165 union ULP_BDE_TUS { 170 union ULP_BDE_TUS {
166 uint32_t w; 171 uint32_t w;
@@ -194,6 +199,26 @@ struct lpfc_sli4_flags {
194#define lpfc_fip_flag_WORD word0 199#define lpfc_fip_flag_WORD word0
195}; 200};
196 201
202struct sli4_bls_acc {
203 uint32_t word0_rsvd; /* Word0 must be reserved */
204 uint32_t word1;
205#define lpfc_abts_orig_SHIFT 0
206#define lpfc_abts_orig_MASK 0x00000001
207#define lpfc_abts_orig_WORD word1
208#define LPFC_ABTS_UNSOL_RSP 1
209#define LPFC_ABTS_UNSOL_INT 0
210 uint32_t word2;
211#define lpfc_abts_rxid_SHIFT 0
212#define lpfc_abts_rxid_MASK 0x0000FFFF
213#define lpfc_abts_rxid_WORD word2
214#define lpfc_abts_oxid_SHIFT 16
215#define lpfc_abts_oxid_MASK 0x0000FFFF
216#define lpfc_abts_oxid_WORD word2
217 uint32_t word3;
218 uint32_t word4;
219 uint32_t word5_rsvd; /* Word5 must be reserved */
220};
221
197/* event queue entry structure */ 222/* event queue entry structure */
198struct lpfc_eqe { 223struct lpfc_eqe {
199 uint32_t word0; 224 uint32_t word0;
@@ -425,7 +450,7 @@ struct lpfc_wqe_generic{
425#define lpfc_wqe_gen_status_MASK 0x0000000F 450#define lpfc_wqe_gen_status_MASK 0x0000000F
426#define lpfc_wqe_gen_status_WORD word7 451#define lpfc_wqe_gen_status_WORD word7
427#define lpfc_wqe_gen_ct_SHIFT 2 452#define lpfc_wqe_gen_ct_SHIFT 2
428#define lpfc_wqe_gen_ct_MASK 0x00000007 453#define lpfc_wqe_gen_ct_MASK 0x00000003
429#define lpfc_wqe_gen_ct_WORD word7 454#define lpfc_wqe_gen_ct_WORD word7
430 uint32_t abort_tag; 455 uint32_t abort_tag;
431 uint32_t word9; 456 uint32_t word9;
@@ -453,6 +478,13 @@ struct lpfc_wqe_generic{
453#define lpfc_wqe_gen_wqec_SHIFT 7 478#define lpfc_wqe_gen_wqec_SHIFT 7
454#define lpfc_wqe_gen_wqec_MASK 0x00000001 479#define lpfc_wqe_gen_wqec_MASK 0x00000001
455#define lpfc_wqe_gen_wqec_WORD word11 480#define lpfc_wqe_gen_wqec_WORD word11
481#define ELS_ID_FLOGI 3
482#define ELS_ID_FDISC 2
483#define ELS_ID_LOGO 1
484#define ELS_ID_DEFAULT 0
485#define lpfc_wqe_gen_els_id_SHIFT 4
486#define lpfc_wqe_gen_els_id_MASK 0x00000003
487#define lpfc_wqe_gen_els_id_WORD word11
456#define lpfc_wqe_gen_cmd_type_SHIFT 0 488#define lpfc_wqe_gen_cmd_type_SHIFT 0
457#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F 489#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
458#define lpfc_wqe_gen_cmd_type_WORD word11 490#define lpfc_wqe_gen_cmd_type_WORD word11
@@ -487,9 +519,9 @@ struct lpfc_register {
487 519
488#define LPFC_UERR_STATUS_HI 0x00A4 520#define LPFC_UERR_STATUS_HI 0x00A4
489#define LPFC_UERR_STATUS_LO 0x00A0 521#define LPFC_UERR_STATUS_LO 0x00A0
490#define LPFC_ONLINE0 0x00B0 522#define LPFC_UE_MASK_HI 0x00AC
491#define LPFC_ONLINE1 0x00B4 523#define LPFC_UE_MASK_LO 0x00A8
492#define LPFC_SCRATCHPAD 0x0058 524#define LPFC_SLI_INTF 0x0058
493 525
494/* BAR0 Registers */ 526/* BAR0 Registers */
495#define LPFC_HST_STATE 0x00AC 527#define LPFC_HST_STATE 0x00AC
@@ -549,19 +581,6 @@ struct lpfc_register {
549#define LPFC_POST_STAGE_ARMFW_READY 0xC000 581#define LPFC_POST_STAGE_ARMFW_READY 0xC000
550#define LPFC_POST_STAGE_ARMFW_UE 0xF000 582#define LPFC_POST_STAGE_ARMFW_UE 0xF000
551 583
552#define lpfc_scratchpad_slirev_SHIFT 4
553#define lpfc_scratchpad_slirev_MASK 0xF
554#define lpfc_scratchpad_slirev_WORD word0
555#define lpfc_scratchpad_chiptype_SHIFT 8
556#define lpfc_scratchpad_chiptype_MASK 0xFF
557#define lpfc_scratchpad_chiptype_WORD word0
558#define lpfc_scratchpad_featurelevel1_SHIFT 16
559#define lpfc_scratchpad_featurelevel1_MASK 0xFF
560#define lpfc_scratchpad_featurelevel1_WORD word0
561#define lpfc_scratchpad_featurelevel2_SHIFT 24
562#define lpfc_scratchpad_featurelevel2_MASK 0xFF
563#define lpfc_scratchpad_featurelevel2_WORD word0
564
565/* BAR1 Registers */ 584/* BAR1 Registers */
566#define LPFC_IMR_MASK_ALL 0xFFFFFFFF 585#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
567#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF 586#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
@@ -760,6 +779,7 @@ struct mbox_header {
760#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 779#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
761#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 780#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
762#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 781#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
782#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
763#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 783#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
764 784
765/* FCoE Opcodes */ 785/* FCoE Opcodes */
@@ -773,6 +793,7 @@ struct mbox_header {
773#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 793#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
774#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A 794#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
775#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B 795#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
796#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
776 797
777/* Mailbox command structures */ 798/* Mailbox command structures */
778struct eq_context { 799struct eq_context {
@@ -985,7 +1006,7 @@ struct lpfc_mbx_wq_destroy {
985}; 1006};
986 1007
987#define LPFC_HDR_BUF_SIZE 128 1008#define LPFC_HDR_BUF_SIZE 128
988#define LPFC_DATA_BUF_SIZE 4096 1009#define LPFC_DATA_BUF_SIZE 2048
989struct rq_context { 1010struct rq_context {
990 uint32_t word0; 1011 uint32_t word0;
991#define lpfc_rq_context_rq_size_SHIFT 16 1012#define lpfc_rq_context_rq_size_SHIFT 16
@@ -1121,10 +1142,7 @@ struct sli4_sge { /* SLI-4 */
1121 this flag !! */ 1142 this flag !! */
1122#define lpfc_sli4_sge_last_MASK 0x00000001 1143#define lpfc_sli4_sge_last_MASK 0x00000001
1123#define lpfc_sli4_sge_last_WORD word2 1144#define lpfc_sli4_sge_last_WORD word2
1124 uint32_t word3; 1145 uint32_t sge_len;
1125#define lpfc_sli4_sge_len_SHIFT 0
1126#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1127#define lpfc_sli4_sge_len_WORD word3
1128}; 1146};
1129 1147
1130struct fcf_record { 1148struct fcf_record {
@@ -1273,6 +1291,64 @@ struct lpfc_mbx_del_fcf_tbl_entry {
1273#define lpfc_mbx_del_fcf_tbl_index_WORD word10 1291#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1274}; 1292};
1275 1293
1294struct lpfc_mbx_redisc_fcf_tbl {
1295 struct mbox_header header;
1296 uint32_t word10;
1297#define lpfc_mbx_redisc_fcf_count_SHIFT 0
1298#define lpfc_mbx_redisc_fcf_count_MASK 0x0000FFFF
1299#define lpfc_mbx_redisc_fcf_count_WORD word10
1300 uint32_t resvd;
1301 uint32_t word12;
1302#define lpfc_mbx_redisc_fcf_index_SHIFT 0
1303#define lpfc_mbx_redisc_fcf_index_MASK 0x0000FFFF
1304#define lpfc_mbx_redisc_fcf_index_WORD word12
1305};
1306
1307struct lpfc_mbx_query_fw_cfg {
1308 struct mbox_header header;
1309 uint32_t config_number;
1310 uint32_t asic_rev;
1311 uint32_t phys_port;
1312 uint32_t function_mode;
1313/* firmware Function Mode */
1314#define lpfc_function_mode_toe_SHIFT 0
1315#define lpfc_function_mode_toe_MASK 0x00000001
1316#define lpfc_function_mode_toe_WORD function_mode
1317#define lpfc_function_mode_nic_SHIFT 1
1318#define lpfc_function_mode_nic_MASK 0x00000001
1319#define lpfc_function_mode_nic_WORD function_mode
1320#define lpfc_function_mode_rdma_SHIFT 2
1321#define lpfc_function_mode_rdma_MASK 0x00000001
1322#define lpfc_function_mode_rdma_WORD function_mode
1323#define lpfc_function_mode_vm_SHIFT 3
1324#define lpfc_function_mode_vm_MASK 0x00000001
1325#define lpfc_function_mode_vm_WORD function_mode
1326#define lpfc_function_mode_iscsi_i_SHIFT 4
1327#define lpfc_function_mode_iscsi_i_MASK 0x00000001
1328#define lpfc_function_mode_iscsi_i_WORD function_mode
1329#define lpfc_function_mode_iscsi_t_SHIFT 5
1330#define lpfc_function_mode_iscsi_t_MASK 0x00000001
1331#define lpfc_function_mode_iscsi_t_WORD function_mode
1332#define lpfc_function_mode_fcoe_i_SHIFT 6
1333#define lpfc_function_mode_fcoe_i_MASK 0x00000001
1334#define lpfc_function_mode_fcoe_i_WORD function_mode
1335#define lpfc_function_mode_fcoe_t_SHIFT 7
1336#define lpfc_function_mode_fcoe_t_MASK 0x00000001
1337#define lpfc_function_mode_fcoe_t_WORD function_mode
1338#define lpfc_function_mode_dal_SHIFT 8
1339#define lpfc_function_mode_dal_MASK 0x00000001
1340#define lpfc_function_mode_dal_WORD function_mode
1341#define lpfc_function_mode_lro_SHIFT 9
1342#define lpfc_function_mode_lro_MASK 0x00000001
1343#define lpfc_function_mode_lro_WORD function_mode9
1344#define lpfc_function_mode_flex10_SHIFT 10
1345#define lpfc_function_mode_flex10_MASK 0x00000001
1346#define lpfc_function_mode_flex10_WORD function_mode
1347#define lpfc_function_mode_ncsi_SHIFT 11
1348#define lpfc_function_mode_ncsi_MASK 0x00000001
1349#define lpfc_function_mode_ncsi_WORD function_mode
1350};
1351
1276/* Status field for embedded SLI_CONFIG mailbox command */ 1352/* Status field for embedded SLI_CONFIG mailbox command */
1277#define STATUS_SUCCESS 0x0 1353#define STATUS_SUCCESS 0x0
1278#define STATUS_FAILED 0x1 1354#define STATUS_FAILED 0x1
@@ -1298,6 +1374,7 @@ struct lpfc_mbx_del_fcf_tbl_entry {
1298#define STATUS_ERROR_ACITMAIN 0x2a 1374#define STATUS_ERROR_ACITMAIN 0x2a
1299#define STATUS_REBOOT_REQUIRED 0x2c 1375#define STATUS_REBOOT_REQUIRED 0x2c
1300#define STATUS_FCF_IN_USE 0x3a 1376#define STATUS_FCF_IN_USE 0x3a
1377#define STATUS_FCF_TABLE_EMPTY 0x43
1301 1378
1302struct lpfc_mbx_sli4_config { 1379struct lpfc_mbx_sli4_config {
1303 struct mbox_header header; 1380 struct mbox_header header;
@@ -1349,8 +1426,7 @@ struct lpfc_mbx_reg_vfi {
1349#define lpfc_reg_vfi_fcfi_SHIFT 0 1426#define lpfc_reg_vfi_fcfi_SHIFT 0
1350#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF 1427#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1351#define lpfc_reg_vfi_fcfi_WORD word2 1428#define lpfc_reg_vfi_fcfi_WORD word2
1352 uint32_t word3_rsvd; 1429 uint32_t wwn[2];
1353 uint32_t word4_rsvd;
1354 struct ulp_bde64 bde; 1430 struct ulp_bde64 bde;
1355 uint32_t word8_rsvd; 1431 uint32_t word8_rsvd;
1356 uint32_t word9_rsvd; 1432 uint32_t word9_rsvd;
@@ -1555,6 +1631,11 @@ struct lpfc_mbx_read_rev {
1555#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 1631#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1556#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 1632#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1557#define lpfc_mbx_rd_rev_fcoe_WORD word1 1633#define lpfc_mbx_rd_rev_fcoe_WORD word1
1634#define lpfc_mbx_rd_rev_cee_ver_SHIFT 21
1635#define lpfc_mbx_rd_rev_cee_ver_MASK 0x00000003
1636#define lpfc_mbx_rd_rev_cee_ver_WORD word1
1637#define LPFC_PREDCBX_CEE_MODE 0
1638#define LPFC_DCBX_CEE_MODE 1
1558#define lpfc_mbx_rd_rev_vpd_SHIFT 29 1639#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1559#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 1640#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1560#define lpfc_mbx_rd_rev_vpd_WORD word1 1641#define lpfc_mbx_rd_rev_vpd_WORD word1
@@ -1756,6 +1837,177 @@ struct lpfc_mbx_request_features {
1756#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 1837#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1757}; 1838};
1758 1839
1840struct lpfc_mbx_supp_pages {
1841 uint32_t word1;
1842#define qs_SHIFT 0
1843#define qs_MASK 0x00000001
1844#define qs_WORD word1
1845#define wr_SHIFT 1
1846#define wr_MASK 0x00000001
1847#define wr_WORD word1
1848#define pf_SHIFT 8
1849#define pf_MASK 0x000000ff
1850#define pf_WORD word1
1851#define cpn_SHIFT 16
1852#define cpn_MASK 0x000000ff
1853#define cpn_WORD word1
1854 uint32_t word2;
1855#define list_offset_SHIFT 0
1856#define list_offset_MASK 0x000000ff
1857#define list_offset_WORD word2
1858#define next_offset_SHIFT 8
1859#define next_offset_MASK 0x000000ff
1860#define next_offset_WORD word2
1861#define elem_cnt_SHIFT 16
1862#define elem_cnt_MASK 0x000000ff
1863#define elem_cnt_WORD word2
1864 uint32_t word3;
1865#define pn_0_SHIFT 24
1866#define pn_0_MASK 0x000000ff
1867#define pn_0_WORD word3
1868#define pn_1_SHIFT 16
1869#define pn_1_MASK 0x000000ff
1870#define pn_1_WORD word3
1871#define pn_2_SHIFT 8
1872#define pn_2_MASK 0x000000ff
1873#define pn_2_WORD word3
1874#define pn_3_SHIFT 0
1875#define pn_3_MASK 0x000000ff
1876#define pn_3_WORD word3
1877 uint32_t word4;
1878#define pn_4_SHIFT 24
1879#define pn_4_MASK 0x000000ff
1880#define pn_4_WORD word4
1881#define pn_5_SHIFT 16
1882#define pn_5_MASK 0x000000ff
1883#define pn_5_WORD word4
1884#define pn_6_SHIFT 8
1885#define pn_6_MASK 0x000000ff
1886#define pn_6_WORD word4
1887#define pn_7_SHIFT 0
1888#define pn_7_MASK 0x000000ff
1889#define pn_7_WORD word4
1890 uint32_t rsvd[27];
1891#define LPFC_SUPP_PAGES 0
1892#define LPFC_BLOCK_GUARD_PROFILES 1
1893#define LPFC_SLI4_PARAMETERS 2
1894};
1895
1896struct lpfc_mbx_sli4_params {
1897 uint32_t word1;
1898#define qs_SHIFT 0
1899#define qs_MASK 0x00000001
1900#define qs_WORD word1
1901#define wr_SHIFT 1
1902#define wr_MASK 0x00000001
1903#define wr_WORD word1
1904#define pf_SHIFT 8
1905#define pf_MASK 0x000000ff
1906#define pf_WORD word1
1907#define cpn_SHIFT 16
1908#define cpn_MASK 0x000000ff
1909#define cpn_WORD word1
1910 uint32_t word2;
1911#define if_type_SHIFT 0
1912#define if_type_MASK 0x00000007
1913#define if_type_WORD word2
1914#define sli_rev_SHIFT 4
1915#define sli_rev_MASK 0x0000000f
1916#define sli_rev_WORD word2
1917#define sli_family_SHIFT 8
1918#define sli_family_MASK 0x000000ff
1919#define sli_family_WORD word2
1920#define featurelevel_1_SHIFT 16
1921#define featurelevel_1_MASK 0x000000ff
1922#define featurelevel_1_WORD word2
1923#define featurelevel_2_SHIFT 24
1924#define featurelevel_2_MASK 0x0000001f
1925#define featurelevel_2_WORD word2
1926 uint32_t word3;
1927#define fcoe_SHIFT 0
1928#define fcoe_MASK 0x00000001
1929#define fcoe_WORD word3
1930#define fc_SHIFT 1
1931#define fc_MASK 0x00000001
1932#define fc_WORD word3
1933#define nic_SHIFT 2
1934#define nic_MASK 0x00000001
1935#define nic_WORD word3
1936#define iscsi_SHIFT 3
1937#define iscsi_MASK 0x00000001
1938#define iscsi_WORD word3
1939#define rdma_SHIFT 4
1940#define rdma_MASK 0x00000001
1941#define rdma_WORD word3
1942 uint32_t sge_supp_len;
1943 uint32_t word5;
1944#define if_page_sz_SHIFT 0
1945#define if_page_sz_MASK 0x0000ffff
1946#define if_page_sz_WORD word5
1947#define loopbk_scope_SHIFT 24
1948#define loopbk_scope_MASK 0x0000000f
1949#define loopbk_scope_WORD word5
1950#define rq_db_window_SHIFT 28
1951#define rq_db_window_MASK 0x0000000f
1952#define rq_db_window_WORD word5
1953 uint32_t word6;
1954#define eq_pages_SHIFT 0
1955#define eq_pages_MASK 0x0000000f
1956#define eq_pages_WORD word6
1957#define eqe_size_SHIFT 8
1958#define eqe_size_MASK 0x000000ff
1959#define eqe_size_WORD word6
1960 uint32_t word7;
1961#define cq_pages_SHIFT 0
1962#define cq_pages_MASK 0x0000000f
1963#define cq_pages_WORD word7
1964#define cqe_size_SHIFT 8
1965#define cqe_size_MASK 0x000000ff
1966#define cqe_size_WORD word7
1967 uint32_t word8;
1968#define mq_pages_SHIFT 0
1969#define mq_pages_MASK 0x0000000f
1970#define mq_pages_WORD word8
1971#define mqe_size_SHIFT 8
1972#define mqe_size_MASK 0x000000ff
1973#define mqe_size_WORD word8
1974#define mq_elem_cnt_SHIFT 16
1975#define mq_elem_cnt_MASK 0x000000ff
1976#define mq_elem_cnt_WORD word8
1977 uint32_t word9;
1978#define wq_pages_SHIFT 0
1979#define wq_pages_MASK 0x0000ffff
1980#define wq_pages_WORD word9
1981#define wqe_size_SHIFT 8
1982#define wqe_size_MASK 0x000000ff
1983#define wqe_size_WORD word9
1984 uint32_t word10;
1985#define rq_pages_SHIFT 0
1986#define rq_pages_MASK 0x0000ffff
1987#define rq_pages_WORD word10
1988#define rqe_size_SHIFT 8
1989#define rqe_size_MASK 0x000000ff
1990#define rqe_size_WORD word10
1991 uint32_t word11;
1992#define hdr_pages_SHIFT 0
1993#define hdr_pages_MASK 0x0000000f
1994#define hdr_pages_WORD word11
1995#define hdr_size_SHIFT 8
1996#define hdr_size_MASK 0x0000000f
1997#define hdr_size_WORD word11
1998#define hdr_pp_align_SHIFT 16
1999#define hdr_pp_align_MASK 0x0000ffff
2000#define hdr_pp_align_WORD word11
2001 uint32_t word12;
2002#define sgl_pages_SHIFT 0
2003#define sgl_pages_MASK 0x0000000f
2004#define sgl_pages_WORD word12
2005#define sgl_pp_align_SHIFT 16
2006#define sgl_pp_align_MASK 0x0000ffff
2007#define sgl_pp_align_WORD word12
2008 uint32_t rsvd_13_63[51];
2009};
2010
1759/* Mailbox Completion Queue Error Messages */ 2011/* Mailbox Completion Queue Error Messages */
1760#define MB_CQE_STATUS_SUCCESS 0x0 2012#define MB_CQE_STATUS_SUCCESS 0x0
1761#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 2013#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -1785,6 +2037,7 @@ struct lpfc_mqe {
1785 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; 2037 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1786 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; 2038 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1787 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; 2039 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
2040 struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
1788 struct lpfc_mbx_reg_fcfi reg_fcfi; 2041 struct lpfc_mbx_reg_fcfi reg_fcfi;
1789 struct lpfc_mbx_unreg_fcfi unreg_fcfi; 2042 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1790 struct lpfc_mbx_mq_create mq_create; 2043 struct lpfc_mbx_mq_create mq_create;
@@ -1804,6 +2057,9 @@ struct lpfc_mqe {
1804 struct lpfc_mbx_read_config rd_config; 2057 struct lpfc_mbx_read_config rd_config;
1805 struct lpfc_mbx_request_features req_ftrs; 2058 struct lpfc_mbx_request_features req_ftrs;
1806 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; 2059 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
2060 struct lpfc_mbx_query_fw_cfg query_fw_cfg;
2061 struct lpfc_mbx_supp_pages supp_pages;
2062 struct lpfc_mbx_sli4_params sli4_params;
1807 struct lpfc_mbx_nop nop; 2063 struct lpfc_mbx_nop nop;
1808 } un; 2064 } un;
1809}; 2065};
@@ -1880,12 +2136,15 @@ struct lpfc_acqe_link {
1880#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 2136#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1881#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 2137#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1882#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 2138#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
2139#define lpfc_acqe_qos_link_speed_SHIFT 16
2140#define lpfc_acqe_qos_link_speed_MASK 0x0000FFFF
2141#define lpfc_acqe_qos_link_speed_WORD word1
1883 uint32_t event_tag; 2142 uint32_t event_tag;
1884 uint32_t trailer; 2143 uint32_t trailer;
1885}; 2144};
1886 2145
1887struct lpfc_acqe_fcoe { 2146struct lpfc_acqe_fcoe {
1888 uint32_t fcf_index; 2147 uint32_t index;
1889 uint32_t word1; 2148 uint32_t word1;
1890#define lpfc_acqe_fcoe_fcf_count_SHIFT 0 2149#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1891#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF 2150#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
@@ -1896,6 +2155,8 @@ struct lpfc_acqe_fcoe {
1896#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 2155#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1897#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 2156#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1898#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 2157#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
2158#define LPFC_FCOE_EVENT_TYPE_CVL 0x4
2159#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD 0x5
1899 uint32_t event_tag; 2160 uint32_t event_tag;
1900 uint32_t trailer; 2161 uint32_t trailer;
1901}; 2162};
@@ -1921,12 +2182,13 @@ struct lpfc_bmbx_create {
1921#define SGL_ALIGN_SZ 64 2182#define SGL_ALIGN_SZ 64
1922#define SGL_PAGE_SIZE 4096 2183#define SGL_PAGE_SIZE 4096
1923/* align SGL addr on a size boundary - adjust address up */ 2184/* align SGL addr on a size boundary - adjust address up */
1924#define NO_XRI ((uint16_t)-1) 2185#define NO_XRI ((uint16_t)-1)
2186
1925struct wqe_common { 2187struct wqe_common {
1926 uint32_t word6; 2188 uint32_t word6;
1927#define wqe_xri_SHIFT 0 2189#define wqe_xri_tag_SHIFT 0
1928#define wqe_xri_MASK 0x0000FFFF 2190#define wqe_xri_tag_MASK 0x0000FFFF
1929#define wqe_xri_WORD word6 2191#define wqe_xri_tag_WORD word6
1930#define wqe_ctxt_tag_SHIFT 16 2192#define wqe_ctxt_tag_SHIFT 16
1931#define wqe_ctxt_tag_MASK 0x0000FFFF 2193#define wqe_ctxt_tag_MASK 0x0000FFFF
1932#define wqe_ctxt_tag_WORD word6 2194#define wqe_ctxt_tag_WORD word6
@@ -1987,7 +2249,7 @@ struct wqe_common {
1987#define wqe_wqec_MASK 0x00000001 2249#define wqe_wqec_MASK 0x00000001
1988#define wqe_wqec_WORD word11 2250#define wqe_wqec_WORD word11
1989#define wqe_cqid_SHIFT 16 2251#define wqe_cqid_SHIFT 16
1990#define wqe_cqid_MASK 0x000003ff 2252#define wqe_cqid_MASK 0x0000ffff
1991#define wqe_cqid_WORD word11 2253#define wqe_cqid_WORD word11
1992}; 2254};
1993 2255
@@ -1996,6 +2258,9 @@ struct wqe_did {
1996#define wqe_els_did_SHIFT 0 2258#define wqe_els_did_SHIFT 0
1997#define wqe_els_did_MASK 0x00FFFFFF 2259#define wqe_els_did_MASK 0x00FFFFFF
1998#define wqe_els_did_WORD word5 2260#define wqe_els_did_WORD word5
2261#define wqe_xmit_bls_pt_SHIFT 28
2262#define wqe_xmit_bls_pt_MASK 0x00000003
2263#define wqe_xmit_bls_pt_WORD word5
1999#define wqe_xmit_bls_ar_SHIFT 30 2264#define wqe_xmit_bls_ar_SHIFT 30
2000#define wqe_xmit_bls_ar_MASK 0x00000001 2265#define wqe_xmit_bls_ar_MASK 0x00000001
2001#define wqe_xmit_bls_ar_WORD word5 2266#define wqe_xmit_bls_ar_WORD word5
@@ -2044,6 +2309,23 @@ struct xmit_els_rsp64_wqe {
2044 2309
2045struct xmit_bls_rsp64_wqe { 2310struct xmit_bls_rsp64_wqe {
2046 uint32_t payload0; 2311 uint32_t payload0;
2312/* Payload0 for BA_ACC */
2313#define xmit_bls_rsp64_acc_seq_id_SHIFT 16
2314#define xmit_bls_rsp64_acc_seq_id_MASK 0x000000ff
2315#define xmit_bls_rsp64_acc_seq_id_WORD payload0
2316#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT 24
2317#define xmit_bls_rsp64_acc_seq_id_vald_MASK 0x000000ff
2318#define xmit_bls_rsp64_acc_seq_id_vald_WORD payload0
2319/* Payload0 for BA_RJT */
2320#define xmit_bls_rsp64_rjt_vspec_SHIFT 0
2321#define xmit_bls_rsp64_rjt_vspec_MASK 0x000000ff
2322#define xmit_bls_rsp64_rjt_vspec_WORD payload0
2323#define xmit_bls_rsp64_rjt_expc_SHIFT 8
2324#define xmit_bls_rsp64_rjt_expc_MASK 0x000000ff
2325#define xmit_bls_rsp64_rjt_expc_WORD payload0
2326#define xmit_bls_rsp64_rjt_rsnc_SHIFT 16
2327#define xmit_bls_rsp64_rjt_rsnc_MASK 0x000000ff
2328#define xmit_bls_rsp64_rjt_rsnc_WORD payload0
2047 uint32_t word1; 2329 uint32_t word1;
2048#define xmit_bls_rsp64_rxid_SHIFT 0 2330#define xmit_bls_rsp64_rxid_SHIFT 0
2049#define xmit_bls_rsp64_rxid_MASK 0x0000ffff 2331#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
@@ -2052,18 +2334,19 @@ struct xmit_bls_rsp64_wqe {
2052#define xmit_bls_rsp64_oxid_MASK 0x0000ffff 2334#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2053#define xmit_bls_rsp64_oxid_WORD word1 2335#define xmit_bls_rsp64_oxid_WORD word1
2054 uint32_t word2; 2336 uint32_t word2;
2055#define xmit_bls_rsp64_seqcntlo_SHIFT 0 2337#define xmit_bls_rsp64_seqcnthi_SHIFT 0
2056#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2057#define xmit_bls_rsp64_seqcntlo_WORD word2
2058#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2059#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff 2338#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2060#define xmit_bls_rsp64_seqcnthi_WORD word2 2339#define xmit_bls_rsp64_seqcnthi_WORD word2
2340#define xmit_bls_rsp64_seqcntlo_SHIFT 16
2341#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2342#define xmit_bls_rsp64_seqcntlo_WORD word2
2061 uint32_t rsrvd3; 2343 uint32_t rsrvd3;
2062 uint32_t rsrvd4; 2344 uint32_t rsrvd4;
2063 struct wqe_did wqe_dest; 2345 struct wqe_did wqe_dest;
2064 struct wqe_common wqe_com; /* words 6-11 */ 2346 struct wqe_common wqe_com; /* words 6-11 */
2065 uint32_t rsvd_12_15[4]; 2347 uint32_t rsvd_12_15[4];
2066}; 2348};
2349
2067struct wqe_rctl_dfctl { 2350struct wqe_rctl_dfctl {
2068 uint32_t word5; 2351 uint32_t word5;
2069#define wqe_si_SHIFT 2 2352#define wqe_si_SHIFT 2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 562d8cee874b..774663e8e1fe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,6 +28,8 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
31 33
32#include <scsi/scsi.h> 34#include <scsi/scsi.h>
33#include <scsi/scsi_device.h> 35#include <scsi/scsi_device.h>
@@ -349,7 +351,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
349 mb = &pmb->u.mb; 351 mb = &pmb->u.mb;
350 352
351 /* Get login parameters for NID. */ 353 /* Get login parameters for NID. */
352 lpfc_read_sparam(phba, pmb, 0); 354 rc = lpfc_read_sparam(phba, pmb, 0);
355 if (rc) {
356 mempool_free(pmb, phba->mbox_mem_pool);
357 return -ENOMEM;
358 }
359
353 pmb->vport = vport; 360 pmb->vport = vport;
354 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -358,7 +365,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
358 mb->mbxCommand, mb->mbxStatus); 365 mb->mbxCommand, mb->mbxStatus);
359 phba->link_state = LPFC_HBA_ERROR; 366 phba->link_state = LPFC_HBA_ERROR;
360 mp = (struct lpfc_dmabuf *) pmb->context1; 367 mp = (struct lpfc_dmabuf *) pmb->context1;
361 mempool_free( pmb, phba->mbox_mem_pool); 368 mempool_free(pmb, phba->mbox_mem_pool);
362 lpfc_mbuf_free(phba, mp->virt, mp->phys); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys);
363 kfree(mp); 370 kfree(mp);
364 return -EIO; 371 return -EIO;
@@ -543,7 +550,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
543 mempool_free(pmb, phba->mbox_mem_pool); 550 mempool_free(pmb, phba->mbox_mem_pool);
544 return -EIO; 551 return -EIO;
545 } 552 }
546 } else { 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
547 lpfc_init_link(phba, pmb, phba->cfg_topology, 554 lpfc_init_link(phba, pmb, phba->cfg_topology,
548 phba->cfg_link_speed); 555 phba->cfg_link_speed);
549 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -570,6 +577,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
570 } 577 }
571 /* MBOX buffer will be freed in mbox compl */ 578 /* MBOX buffer will be freed in mbox compl */
572 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580 if (!pmb) {
581 phba->link_state = LPFC_HBA_ERROR;
582 return -ENOMEM;
583 }
584
573 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
574 pmb->mbox_cmpl = lpfc_config_async_cmpl; 586 pmb->mbox_cmpl = lpfc_config_async_cmpl;
575 pmb->vport = phba->pport; 587 pmb->vport = phba->pport;
@@ -587,6 +599,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
587 599
588 /* Get Option rom version */ 600 /* Get Option rom version */
589 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602 if (!pmb) {
603 phba->link_state = LPFC_HBA_ERROR;
604 return -ENOMEM;
605 }
606
590 lpfc_dump_wakeup_param(phba, pmb); 607 lpfc_dump_wakeup_param(phba, pmb);
591 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
592 pmb->vport = phba->pport; 609 pmb->vport = phba->pport;
@@ -602,6 +619,102 @@ lpfc_config_port_post(struct lpfc_hba *phba)
602} 619}
603 620
604/** 621/**
622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure.
624 *
625 * This routine will issue the INIT_LINK mailbox command call.
626 * It is available to other drivers through the lpfc_hba data
627 * structure for use as a delayed link up mechanism with the
628 * module parameter lpfc_suppress_link_up.
629 *
630 * Return code
631 * 0 - success
632 * Any other value - error
633 **/
634int
635lpfc_hba_init_link(struct lpfc_hba *phba)
636{
637 struct lpfc_vport *vport = phba->pport;
638 LPFC_MBOXQ_t *pmb;
639 MAILBOX_t *mb;
640 int rc;
641
642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (!pmb) {
644 phba->link_state = LPFC_HBA_ERROR;
645 return -ENOMEM;
646 }
647 mb = &pmb->u.mb;
648 pmb->vport = vport;
649
650 lpfc_init_link(phba, pmb, phba->cfg_topology,
651 phba->cfg_link_speed);
652 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
653 lpfc_set_loopback_flag(phba);
654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
655 if (rc != MBX_SUCCESS) {
656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 "0498 Adapter failed to init, mbxCmd x%x "
658 "INIT_LINK, mbxStatus x%x\n",
659 mb->mbxCommand, mb->mbxStatus);
660 /* Clear all interrupt enable conditions */
661 writel(0, phba->HCregaddr);
662 readl(phba->HCregaddr); /* flush */
663 /* Clear all pending interrupts */
664 writel(0xffffffff, phba->HAregaddr);
665 readl(phba->HAregaddr); /* flush */
666 phba->link_state = LPFC_HBA_ERROR;
667 if (rc != MBX_BUSY)
668 mempool_free(pmb, phba->mbox_mem_pool);
669 return -EIO;
670 }
671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
672
673 return 0;
674}
675
676/**
677 * lpfc_hba_down_link - this routine downs the FC link
678 *
679 * This routine will issue the DOWN_LINK mailbox command call.
680 * It is available to other drivers through the lpfc_hba data
681 * structure for use to stop the link.
682 *
683 * Return code
684 * 0 - success
685 * Any other value - error
686 **/
687int
688lpfc_hba_down_link(struct lpfc_hba *phba)
689{
690 LPFC_MBOXQ_t *pmb;
691 int rc;
692
693 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
694 if (!pmb) {
695 phba->link_state = LPFC_HBA_ERROR;
696 return -ENOMEM;
697 }
698
699 lpfc_printf_log(phba,
700 KERN_ERR, LOG_INIT,
701 "0491 Adapter Link is disabled.\n");
702 lpfc_down_link(phba, pmb);
703 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
704 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
705 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
706 lpfc_printf_log(phba,
707 KERN_ERR, LOG_INIT,
708 "2522 Adapter failed to issue DOWN_LINK"
709 " mbox command rc 0x%x\n", rc);
710
711 mempool_free(pmb, phba->mbox_mem_pool);
712 return -EIO;
713 }
714 return 0;
715}
716
717/**
605 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 718 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
606 * @phba: pointer to lpfc HBA data structure. 719 * @phba: pointer to lpfc HBA data structure.
607 * 720 *
@@ -645,7 +758,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
645 * down the SLI Layer. 758 * down the SLI Layer.
646 * 759 *
647 * Return codes 760 * Return codes
648 * 0 - sucess. 761 * 0 - success.
649 * Any other value - error. 762 * Any other value - error.
650 **/ 763 **/
651static int 764static int
@@ -700,7 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
700 * down the SLI Layer. 813 * down the SLI Layer.
701 * 814 *
702 * Return codes 815 * Return codes
703 * 0 - sucess. 816 * 0 - success.
704 * Any other value - error. 817 * Any other value - error.
705 **/ 818 **/
706static int 819static int
@@ -710,6 +823,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
710 LIST_HEAD(aborts); 823 LIST_HEAD(aborts);
711 int ret; 824 int ret;
712 unsigned long iflag = 0; 825 unsigned long iflag = 0;
826 struct lpfc_sglq *sglq_entry = NULL;
827
713 ret = lpfc_hba_down_post_s3(phba); 828 ret = lpfc_hba_down_post_s3(phba);
714 if (ret) 829 if (ret)
715 return ret; 830 return ret;
@@ -725,6 +840,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
725 * list. 840 * list.
726 */ 841 */
727 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 842 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
843 list_for_each_entry(sglq_entry,
844 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
845 sglq_entry->state = SGL_FREED;
846
728 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 847 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
729 &phba->sli4_hba.lpfc_sgl_list); 848 &phba->sli4_hba.lpfc_sgl_list);
730 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 849 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -755,7 +874,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
755 * uninitialization after the HBA is reset when bring down the SLI Layer. 874 * uninitialization after the HBA is reset when bring down the SLI Layer.
756 * 875 *
757 * Return codes 876 * Return codes
758 * 0 - sucess. 877 * 0 - success.
759 * Any other value - error. 878 * Any other value - error.
760 **/ 879 **/
761int 880int
@@ -852,12 +971,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
852void 971void
853lpfc_hb_timeout_handler(struct lpfc_hba *phba) 972lpfc_hb_timeout_handler(struct lpfc_hba *phba)
854{ 973{
974 struct lpfc_vport **vports;
855 LPFC_MBOXQ_t *pmboxq; 975 LPFC_MBOXQ_t *pmboxq;
856 struct lpfc_dmabuf *buf_ptr; 976 struct lpfc_dmabuf *buf_ptr;
857 int retval; 977 int retval, i;
858 struct lpfc_sli *psli = &phba->sli; 978 struct lpfc_sli *psli = &phba->sli;
859 LIST_HEAD(completions); 979 LIST_HEAD(completions);
860 980
981 vports = lpfc_create_vport_work_array(phba);
982 if (vports != NULL)
983 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
984 lpfc_rcv_seq_check_edtov(vports[i]);
985 lpfc_destroy_vport_work_array(phba, vports);
986
861 if ((phba->link_state == LPFC_HBA_ERROR) || 987 if ((phba->link_state == LPFC_HBA_ERROR) ||
862 (phba->pport->load_flag & FC_UNLOADING) || 988 (phba->pport->load_flag & FC_UNLOADING) ||
863 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 989 (phba->pport->fc_flag & FC_OFFLINE_MODE))
@@ -1254,7 +1380,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1254 * routine from the API jump table function pointer from the lpfc_hba struct. 1380 * routine from the API jump table function pointer from the lpfc_hba struct.
1255 * 1381 *
1256 * Return codes 1382 * Return codes
1257 * 0 - sucess. 1383 * 0 - success.
1258 * Any other value - error. 1384 * Any other value - error.
1259 **/ 1385 **/
1260void 1386void
@@ -1521,10 +1647,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1521 int GE = 0; 1647 int GE = 0;
1522 int oneConnect = 0; /* default is not a oneConnect */ 1648 int oneConnect = 0; /* default is not a oneConnect */
1523 struct { 1649 struct {
1524 char * name; 1650 char *name;
1525 int max_speed; 1651 char *bus;
1526 char * bus; 1652 char *function;
1527 } m = {"<Unknown>", 0, ""}; 1653 } m = {"<Unknown>", "", ""};
1528 1654
1529 if (mdp && mdp[0] != '\0' 1655 if (mdp && mdp[0] != '\0'
1530 && descp && descp[0] != '\0') 1656 && descp && descp[0] != '\0')
@@ -1545,132 +1671,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1545 1671
1546 switch (dev_id) { 1672 switch (dev_id) {
1547 case PCI_DEVICE_ID_FIREFLY: 1673 case PCI_DEVICE_ID_FIREFLY:
1548 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1674 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1549 break; 1675 break;
1550 case PCI_DEVICE_ID_SUPERFLY: 1676 case PCI_DEVICE_ID_SUPERFLY:
1551 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1677 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1552 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1678 m = (typeof(m)){"LP7000", "PCI",
1679 "Fibre Channel Adapter"};
1553 else 1680 else
1554 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1681 m = (typeof(m)){"LP7000E", "PCI",
1682 "Fibre Channel Adapter"};
1555 break; 1683 break;
1556 case PCI_DEVICE_ID_DRAGONFLY: 1684 case PCI_DEVICE_ID_DRAGONFLY:
1557 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1685 m = (typeof(m)){"LP8000", "PCI",
1686 "Fibre Channel Adapter"};
1558 break; 1687 break;
1559 case PCI_DEVICE_ID_CENTAUR: 1688 case PCI_DEVICE_ID_CENTAUR:
1560 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1689 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1561 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1690 m = (typeof(m)){"LP9002", "PCI",
1691 "Fibre Channel Adapter"};
1562 else 1692 else
1563 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1693 m = (typeof(m)){"LP9000", "PCI",
1694 "Fibre Channel Adapter"};
1564 break; 1695 break;
1565 case PCI_DEVICE_ID_RFLY: 1696 case PCI_DEVICE_ID_RFLY:
1566 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1697 m = (typeof(m)){"LP952", "PCI",
1698 "Fibre Channel Adapter"};
1567 break; 1699 break;
1568 case PCI_DEVICE_ID_PEGASUS: 1700 case PCI_DEVICE_ID_PEGASUS:
1569 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1701 m = (typeof(m)){"LP9802", "PCI-X",
1702 "Fibre Channel Adapter"};
1570 break; 1703 break;
1571 case PCI_DEVICE_ID_THOR: 1704 case PCI_DEVICE_ID_THOR:
1572 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1705 m = (typeof(m)){"LP10000", "PCI-X",
1706 "Fibre Channel Adapter"};
1573 break; 1707 break;
1574 case PCI_DEVICE_ID_VIPER: 1708 case PCI_DEVICE_ID_VIPER:
1575 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1709 m = (typeof(m)){"LPX1000", "PCI-X",
1710 "Fibre Channel Adapter"};
1576 break; 1711 break;
1577 case PCI_DEVICE_ID_PFLY: 1712 case PCI_DEVICE_ID_PFLY:
1578 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1713 m = (typeof(m)){"LP982", "PCI-X",
1714 "Fibre Channel Adapter"};
1579 break; 1715 break;
1580 case PCI_DEVICE_ID_TFLY: 1716 case PCI_DEVICE_ID_TFLY:
1581 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1717 m = (typeof(m)){"LP1050", "PCI-X",
1718 "Fibre Channel Adapter"};
1582 break; 1719 break;
1583 case PCI_DEVICE_ID_HELIOS: 1720 case PCI_DEVICE_ID_HELIOS:
1584 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1721 m = (typeof(m)){"LP11000", "PCI-X2",
1722 "Fibre Channel Adapter"};
1585 break; 1723 break;
1586 case PCI_DEVICE_ID_HELIOS_SCSP: 1724 case PCI_DEVICE_ID_HELIOS_SCSP:
1587 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1725 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1726 "Fibre Channel Adapter"};
1588 break; 1727 break;
1589 case PCI_DEVICE_ID_HELIOS_DCSP: 1728 case PCI_DEVICE_ID_HELIOS_DCSP:
1590 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1729 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1730 "Fibre Channel Adapter"};
1591 break; 1731 break;
1592 case PCI_DEVICE_ID_NEPTUNE: 1732 case PCI_DEVICE_ID_NEPTUNE:
1593 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1733 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1594 break; 1734 break;
1595 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1735 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1596 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1736 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1597 break; 1737 break;
1598 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1738 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1599 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1739 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1600 break; 1740 break;
1601 case PCI_DEVICE_ID_BMID: 1741 case PCI_DEVICE_ID_BMID:
1602 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1742 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1603 break; 1743 break;
1604 case PCI_DEVICE_ID_BSMB: 1744 case PCI_DEVICE_ID_BSMB:
1605 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1745 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1606 break; 1746 break;
1607 case PCI_DEVICE_ID_ZEPHYR: 1747 case PCI_DEVICE_ID_ZEPHYR:
1608 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1748 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1609 break; 1749 break;
1610 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1750 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1611 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1751 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1612 break; 1752 break;
1613 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1753 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1614 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1754 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1615 GE = 1; 1755 GE = 1;
1616 break; 1756 break;
1617 case PCI_DEVICE_ID_ZMID: 1757 case PCI_DEVICE_ID_ZMID:
1618 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1758 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1619 break; 1759 break;
1620 case PCI_DEVICE_ID_ZSMB: 1760 case PCI_DEVICE_ID_ZSMB:
1621 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1761 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1622 break; 1762 break;
1623 case PCI_DEVICE_ID_LP101: 1763 case PCI_DEVICE_ID_LP101:
1624 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1764 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1625 break; 1765 break;
1626 case PCI_DEVICE_ID_LP10000S: 1766 case PCI_DEVICE_ID_LP10000S:
1627 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1767 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1628 break; 1768 break;
1629 case PCI_DEVICE_ID_LP11000S: 1769 case PCI_DEVICE_ID_LP11000S:
1630 m = (typeof(m)){"LP11000-S", max_speed, 1770 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1631 "PCI-X2"};
1632 break; 1771 break;
1633 case PCI_DEVICE_ID_LPE11000S: 1772 case PCI_DEVICE_ID_LPE11000S:
1634 m = (typeof(m)){"LPe11000-S", max_speed, 1773 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1635 "PCIe"};
1636 break; 1774 break;
1637 case PCI_DEVICE_ID_SAT: 1775 case PCI_DEVICE_ID_SAT:
1638 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1776 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1639 break; 1777 break;
1640 case PCI_DEVICE_ID_SAT_MID: 1778 case PCI_DEVICE_ID_SAT_MID:
1641 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1779 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1642 break; 1780 break;
1643 case PCI_DEVICE_ID_SAT_SMB: 1781 case PCI_DEVICE_ID_SAT_SMB:
1644 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1782 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1645 break; 1783 break;
1646 case PCI_DEVICE_ID_SAT_DCSP: 1784 case PCI_DEVICE_ID_SAT_DCSP:
1647 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1785 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1648 break; 1786 break;
1649 case PCI_DEVICE_ID_SAT_SCSP: 1787 case PCI_DEVICE_ID_SAT_SCSP:
1650 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1788 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1651 break; 1789 break;
1652 case PCI_DEVICE_ID_SAT_S: 1790 case PCI_DEVICE_ID_SAT_S:
1653 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1791 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1654 break; 1792 break;
1655 case PCI_DEVICE_ID_HORNET: 1793 case PCI_DEVICE_ID_HORNET:
1656 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1794 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1657 GE = 1; 1795 GE = 1;
1658 break; 1796 break;
1659 case PCI_DEVICE_ID_PROTEUS_VF: 1797 case PCI_DEVICE_ID_PROTEUS_VF:
1660 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1798 m = (typeof(m)){"LPev12000", "PCIe IOV",
1799 "Fibre Channel Adapter"};
1661 break; 1800 break;
1662 case PCI_DEVICE_ID_PROTEUS_PF: 1801 case PCI_DEVICE_ID_PROTEUS_PF:
1663 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1802 m = (typeof(m)){"LPev12000", "PCIe IOV",
1803 "Fibre Channel Adapter"};
1664 break; 1804 break;
1665 case PCI_DEVICE_ID_PROTEUS_S: 1805 case PCI_DEVICE_ID_PROTEUS_S:
1666 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1806 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1807 "Fibre Channel Adapter"};
1667 break; 1808 break;
1668 case PCI_DEVICE_ID_TIGERSHARK: 1809 case PCI_DEVICE_ID_TIGERSHARK:
1669 oneConnect = 1; 1810 oneConnect = 1;
1670 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1811 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1812 break;
1813 case PCI_DEVICE_ID_TOMCAT:
1814 oneConnect = 1;
1815 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1816 break;
1817 case PCI_DEVICE_ID_FALCON:
1818 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1819 "EmulexSecure Fibre"};
1671 break; 1820 break;
1672 default: 1821 default:
1673 m = (typeof(m)){ NULL }; 1822 m = (typeof(m)){"Unknown", "", ""};
1674 break; 1823 break;
1675 } 1824 }
1676 1825
@@ -1682,17 +1831,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1682 if (descp && descp[0] == '\0') { 1831 if (descp && descp[0] == '\0') {
1683 if (oneConnect) 1832 if (oneConnect)
1684 snprintf(descp, 255, 1833 snprintf(descp, 255,
1685 "Emulex OneConnect %s, FCoE Initiator, Port %s", 1834 "Emulex OneConnect %s, %s Initiator, Port %s",
1686 m.name, 1835 m.name, m.function,
1687 phba->Port); 1836 phba->Port);
1688 else 1837 else
1689 snprintf(descp, 255, 1838 snprintf(descp, 255,
1690 "Emulex %s %d%s %s %s", 1839 "Emulex %s %d%s %s %s",
1691 m.name, m.max_speed, 1840 m.name, max_speed, (GE) ? "GE" : "Gb",
1692 (GE) ? "GE" : "Gb", 1841 m.bus, m.function);
1693 m.bus,
1694 (GE) ? "FCoE Adapter" :
1695 "Fibre Channel Adapter");
1696 } 1842 }
1697} 1843}
1698 1844
@@ -2045,6 +2191,46 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2045} 2191}
2046 2192
2047/** 2193/**
2194 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2195 * @phba: pointer to lpfc hba data structure.
2196 *
2197 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2198 * caller of this routine should already hold the host lock.
2199 **/
2200void
2201__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2202{
2203 /* Clear pending FCF rediscovery wait and failover in progress flags */
2204 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2205 FCF_DEAD_DISC |
2206 FCF_ACVL_DISC);
2207 /* Now, try to stop the timer */
2208 del_timer(&phba->fcf.redisc_wait);
2209}
2210
2211/**
2212 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2213 * @phba: pointer to lpfc hba data structure.
2214 *
2215 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2216 * checks whether the FCF rediscovery wait timer is pending with the host
2217 * lock held before proceeding with disabling the timer and clearing the
2218 * wait timer pendig flag.
2219 **/
2220void
2221lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2222{
2223 spin_lock_irq(&phba->hbalock);
2224 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2225 /* FCF rediscovery timer already fired or stopped */
2226 spin_unlock_irq(&phba->hbalock);
2227 return;
2228 }
2229 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2230 spin_unlock_irq(&phba->hbalock);
2231}
2232
2233/**
2048 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2234 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2049 * @phba: pointer to lpfc hba data structure. 2235 * @phba: pointer to lpfc hba data structure.
2050 * 2236 *
@@ -2068,6 +2254,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
2068 break; 2254 break;
2069 case LPFC_PCI_DEV_OC: 2255 case LPFC_PCI_DEV_OC:
2070 /* Stop any OneConnect device sepcific driver timers */ 2256 /* Stop any OneConnect device sepcific driver timers */
2257 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2071 break; 2258 break;
2072 default: 2259 default:
2073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2200,6 +2387,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2200 struct lpfc_vport *vport = phba->pport; 2387 struct lpfc_vport *vport = phba->pport;
2201 struct lpfc_nodelist *ndlp, *next_ndlp; 2388 struct lpfc_nodelist *ndlp, *next_ndlp;
2202 struct lpfc_vport **vports; 2389 struct lpfc_vport **vports;
2390 struct Scsi_Host *shost;
2203 int i; 2391 int i;
2204 2392
2205 if (vport->fc_flag & FC_OFFLINE_MODE) 2393 if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -2213,11 +2401,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2213 vports = lpfc_create_vport_work_array(phba); 2401 vports = lpfc_create_vport_work_array(phba);
2214 if (vports != NULL) { 2402 if (vports != NULL) {
2215 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2403 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2216 struct Scsi_Host *shost;
2217
2218 if (vports[i]->load_flag & FC_UNLOADING) 2404 if (vports[i]->load_flag & FC_UNLOADING)
2219 continue; 2405 continue;
2220 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 2406 shost = lpfc_shost_from_vport(vports[i]);
2407 spin_lock_irq(shost->host_lock);
2408 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2409 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2410 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2411 spin_unlock_irq(shost->host_lock);
2412
2221 shost = lpfc_shost_from_vport(vports[i]); 2413 shost = lpfc_shost_from_vport(vports[i]);
2222 list_for_each_entry_safe(ndlp, next_ndlp, 2414 list_for_each_entry_safe(ndlp, next_ndlp,
2223 &vports[i]->fc_nodes, 2415 &vports[i]->fc_nodes,
@@ -2308,6 +2500,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2308 2500
2309 spin_lock_irq(&phba->hbalock); 2501 spin_lock_irq(&phba->hbalock);
2310 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2502 /* Release all the lpfc_scsi_bufs maintained by this host. */
2503 spin_lock(&phba->scsi_buf_list_lock);
2311 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2504 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2312 list_del(&sb->list); 2505 list_del(&sb->list);
2313 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2506 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
@@ -2315,6 +2508,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2315 kfree(sb); 2508 kfree(sb);
2316 phba->total_scsi_bufs--; 2509 phba->total_scsi_bufs--;
2317 } 2510 }
2511 spin_unlock(&phba->scsi_buf_list_lock);
2318 2512
2319 /* Release all the lpfc_iocbq entries maintained by this host. */ 2513 /* Release all the lpfc_iocbq entries maintained by this host. */
2320 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2514 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2322,9 +2516,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2322 kfree(io); 2516 kfree(io);
2323 phba->total_iocbq_bufs--; 2517 phba->total_iocbq_bufs--;
2324 } 2518 }
2325
2326 spin_unlock_irq(&phba->hbalock); 2519 spin_unlock_irq(&phba->hbalock);
2327
2328 return 0; 2520 return 0;
2329} 2521}
2330 2522
@@ -2373,7 +2565,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2373 shost->this_id = -1; 2565 shost->this_id = -1;
2374 shost->max_cmd_len = 16; 2566 shost->max_cmd_len = 16;
2375 if (phba->sli_rev == LPFC_SLI_REV4) { 2567 if (phba->sli_rev == LPFC_SLI_REV4) {
2376 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; 2568 shost->dma_boundary =
2569 phba->sli4_hba.pc_sli4_params.sge_supp_len;
2377 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2378 } 2571 }
2379 2572
@@ -2407,8 +2600,16 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2407 init_timer(&vport->els_tmofunc); 2600 init_timer(&vport->els_tmofunc);
2408 vport->els_tmofunc.function = lpfc_els_timeout; 2601 vport->els_tmofunc.function = lpfc_els_timeout;
2409 vport->els_tmofunc.data = (unsigned long)vport; 2602 vport->els_tmofunc.data = (unsigned long)vport;
2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 /* check for menlo minimum sg count */
2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2609 }
2610 }
2410 2611
2411 error = scsi_add_host(shost, dev); 2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2412 if (error) 2613 if (error)
2413 goto out_put_shost; 2614 goto out_put_shost;
2414 2615
@@ -2622,8 +2823,6 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
2622 lpfc_stop_hba_timers(phba); 2823 lpfc_stop_hba_timers(phba);
2623 phba->pport->work_port_events = 0; 2824 phba->pport->work_port_events = 0;
2624 phba->sli4_hba.intr_enable = 0; 2825 phba->sli4_hba.intr_enable = 0;
2625 /* Hard clear it for now, shall have more graceful way to wait later */
2626 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2627} 2826}
2628 2827
2629/** 2828/**
@@ -2675,7 +2874,7 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2675 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2874 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2676 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2875 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2677 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2876 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2678 phba->fcf.fcf_indx); 2877 phba->fcf.current_rec.fcf_indx);
2679 2878
2680 if (!phba->sli4_hba.intr_enable) 2879 if (!phba->sli4_hba.intr_enable)
2681 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2880 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -2699,6 +2898,117 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2699} 2898}
2700 2899
2701/** 2900/**
2901 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2902 * @phba: Pointer to hba for which this call is being executed.
2903 *
2904 * This routine starts the timer waiting for the FCF rediscovery to complete.
2905 **/
2906void
2907lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2908{
2909 unsigned long fcf_redisc_wait_tmo =
2910 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2911 /* Start fcf rediscovery wait period timer */
2912 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2913 spin_lock_irq(&phba->hbalock);
2914 /* Allow action to new fcf asynchronous event */
2915 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2916 /* Mark the FCF rediscovery pending state */
2917 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2918 spin_unlock_irq(&phba->hbalock);
2919}
2920
2921/**
2922 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2923 * @ptr: Map to lpfc_hba data structure pointer.
2924 *
2925 * This routine is invoked when waiting for FCF table rediscover has been
2926 * timed out. If new FCF record(s) has (have) been discovered during the
2927 * wait period, a new FCF event shall be added to the FCOE async event
2928 * list, and then worker thread shall be waked up for processing from the
2929 * worker thread context.
2930 **/
2931void
2932lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2933{
2934 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2935
2936 /* Don't send FCF rediscovery event if timer cancelled */
2937 spin_lock_irq(&phba->hbalock);
2938 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2939 spin_unlock_irq(&phba->hbalock);
2940 return;
2941 }
2942 /* Clear FCF rediscovery timer pending flag */
2943 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2944 /* FCF rediscovery event to worker thread */
2945 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2946 spin_unlock_irq(&phba->hbalock);
2947 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2948 "2776 FCF rediscover wait timer expired, post "
2949 "a worker thread event for FCF table scan\n");
2950 /* wake up worker thread */
2951 lpfc_worker_wake_up(phba);
2952}
2953
2954/**
2955 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2956 * @phba: pointer to lpfc hba data structure.
2957 *
2958 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2959 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2960 * was successful and the firmware supports FCoE. Any other return indicates
2961 * a error. It is assumed that this function will be called before interrupts
2962 * are enabled.
2963 **/
2964static int
2965lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2966{
2967 int rc = 0;
2968 LPFC_MBOXQ_t *mboxq;
2969 struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2970 uint32_t length;
2971 uint32_t shdr_status, shdr_add_status;
2972
2973 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2974 if (!mboxq) {
2975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2976 "2621 Failed to allocate mbox for "
2977 "query firmware config cmd\n");
2978 return -ENOMEM;
2979 }
2980 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2981 length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2982 sizeof(struct lpfc_sli4_cfg_mhdr));
2983 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2984 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2985 length, LPFC_SLI4_MBX_EMBED);
2986 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2987 /* The IOCTL status is embedded in the mailbox subheader. */
2988 shdr_status = bf_get(lpfc_mbox_hdr_status,
2989 &query_fw_cfg->header.cfg_shdr.response);
2990 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2991 &query_fw_cfg->header.cfg_shdr.response);
2992 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "2622 Query Firmware Config failed "
2995 "mbx status x%x, status x%x add_status x%x\n",
2996 rc, shdr_status, shdr_add_status);
2997 return -EINVAL;
2998 }
2999 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3001 "2623 FCoE Function not supported by firmware. "
3002 "Function mode = %08x\n",
3003 query_fw_cfg->function_mode);
3004 return -EINVAL;
3005 }
3006 if (rc != MBX_TIMEOUT)
3007 mempool_free(mboxq, phba->mbox_mem_pool);
3008 return 0;
3009}
3010
3011/**
2702 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3012 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2703 * @phba: pointer to lpfc hba data structure. 3013 * @phba: pointer to lpfc hba data structure.
2704 * @acqe_link: pointer to the async link completion queue entry. 3014 * @acqe_link: pointer to the async link completion queue entry.
@@ -2893,6 +3203,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2893 bf_get(lpfc_acqe_link_physical, acqe_link); 3203 bf_get(lpfc_acqe_link_physical, acqe_link);
2894 phba->sli4_hba.link_state.fault = 3204 phba->sli4_hba.link_state.fault =
2895 bf_get(lpfc_acqe_link_fault, acqe_link); 3205 bf_get(lpfc_acqe_link_fault, acqe_link);
3206 phba->sli4_hba.link_state.logical_speed =
3207 bf_get(lpfc_acqe_qos_link_speed, acqe_link);
2896 3208
2897 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3209 /* Invoke the lpfc_handle_latt mailbox command callback function */
2898 lpfc_mbx_cmpl_read_la(phba, pmb); 3210 lpfc_mbx_cmpl_read_la(phba, pmb);
@@ -2906,6 +3218,68 @@ out_free_pmb:
2906} 3218}
2907 3219
2908/** 3220/**
3221 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3222 * @vport: pointer to vport data structure.
3223 *
3224 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3225 * response to a CVL event.
3226 *
3227 * Return the pointer to the ndlp with the vport if successful, otherwise
3228 * return NULL.
3229 **/
3230static struct lpfc_nodelist *
3231lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3232{
3233 struct lpfc_nodelist *ndlp;
3234 struct Scsi_Host *shost;
3235 struct lpfc_hba *phba;
3236
3237 if (!vport)
3238 return NULL;
3239 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3240 if (!ndlp)
3241 return NULL;
3242 phba = vport->phba;
3243 if (!phba)
3244 return NULL;
3245 if (phba->pport->port_state <= LPFC_FLOGI)
3246 return NULL;
3247 /* If virtual link is not yet instantiated ignore CVL */
3248 if (vport->port_state <= LPFC_FDISC)
3249 return NULL;
3250 shost = lpfc_shost_from_vport(vport);
3251 if (!shost)
3252 return NULL;
3253 lpfc_linkdown_port(vport);
3254 lpfc_cleanup_pending_mbox(vport);
3255 spin_lock_irq(shost->host_lock);
3256 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3257 spin_unlock_irq(shost->host_lock);
3258
3259 return ndlp;
3260}
3261
3262/**
3263 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3264 * @vport: pointer to lpfc hba data structure.
3265 *
3266 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3267 * response to a FCF dead event.
3268 **/
3269static void
3270lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3271{
3272 struct lpfc_vport **vports;
3273 int i;
3274
3275 vports = lpfc_create_vport_work_array(phba);
3276 if (vports)
3277 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3278 lpfc_sli4_perform_vport_cvl(vports[i]);
3279 lpfc_destroy_vport_work_array(phba, vports);
3280}
3281
3282/**
2909 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3283 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2910 * @phba: pointer to lpfc hba data structure. 3284 * @phba: pointer to lpfc hba data structure.
2911 * @acqe_link: pointer to the async fcoe completion queue entry. 3285 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -2918,33 +3292,71 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2918{ 3292{
2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3293 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2920 int rc; 3294 int rc;
3295 struct lpfc_vport *vport;
3296 struct lpfc_nodelist *ndlp;
3297 struct Scsi_Host *shost;
3298 int active_vlink_present;
3299 struct lpfc_vport **vports;
3300 int i;
2921 3301
3302 phba->fc_eventTag = acqe_fcoe->event_tag;
2922 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3303 phba->fcoe_eventtag = acqe_fcoe->event_tag;
2923 switch (event_type) { 3304 switch (event_type) {
2924 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2925 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
2926 "2546 New FCF found index 0x%x tag 0x%x\n", 3307 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2927 acqe_fcoe->fcf_index, 3308 "2546 New FCF found/FCF parameter modified event: "
2928 acqe_fcoe->event_tag); 3309 "evt_tag:x%x, fcf_index:x%x\n",
2929 /* 3310 acqe_fcoe->event_tag, acqe_fcoe->index);
2930 * If the current FCF is in discovered state, or 3311
2931 * FCF discovery is in progress do nothing.
2932 */
2933 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
2934 if ((phba->fcf.fcf_flag & FCF_DISCOVERED) || 3313 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
2935 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3314 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3315 /*
3316 * If the current FCF is in discovered state or
3317 * FCF discovery is in progress, do nothing.
3318 */
3319 spin_unlock_irq(&phba->hbalock);
3320 break;
3321 }
3322
3323 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3324 /*
3325 * If fast FCF failover rescan event is pending,
3326 * do nothing.
3327 */
2936 spin_unlock_irq(&phba->hbalock); 3328 spin_unlock_irq(&phba->hbalock);
2937 break; 3329 break;
2938 } 3330 }
2939 spin_unlock_irq(&phba->hbalock); 3331 spin_unlock_irq(&phba->hbalock);
2940 3332
2941 /* Read the FCF table and re-discover SAN. */ 3333 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
2942 rc = lpfc_sli4_read_fcf_record(phba, 3334 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2943 LPFC_FCOE_FCF_GET_FIRST); 3335 /*
3336 * During period of FCF discovery, read the FCF
3337 * table record indexed by the event to update
3338 * FCF round robin failover eligible FCF bmask.
3339 */
3340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3341 LOG_DISCOVERY,
3342 "2779 Read new FCF record with "
3343 "fcf_index:x%x for updating FCF "
3344 "round robin failover bmask\n",
3345 acqe_fcoe->index);
3346 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3347 }
3348
3349 /* Otherwise, scan the entire FCF table and re-discover SAN */
3350 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3351 "2770 Start FCF table scan due to new FCF "
3352 "event: evt_tag:x%x, fcf_index:x%x\n",
3353 acqe_fcoe->event_tag, acqe_fcoe->index);
3354 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3355 LPFC_FCOE_FCF_GET_FIRST);
2944 if (rc) 3356 if (rc)
2945 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3357 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2946 "2547 Read FCF record failed 0x%x\n", 3358 "2547 Issue FCF scan read FCF mailbox "
2947 rc); 3359 "command failed 0x%x\n", rc);
2948 break; 3360 break;
2949 3361
2950 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3362 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -2955,22 +3367,130 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2955 break; 3367 break;
2956 3368
2957 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3369 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2958 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3370 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2959 "2549 FCF disconnected fron network index 0x%x" 3371 "2549 FCF disconnected from network index 0x%x"
2960 " tag 0x%x\n", acqe_fcoe->fcf_index, 3372 " tag 0x%x\n", acqe_fcoe->index,
2961 acqe_fcoe->event_tag); 3373 acqe_fcoe->event_tag);
2962 /* If the event is not for currently used fcf do nothing */ 3374 /* If the event is not for currently used fcf do nothing */
2963 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 3375 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
2964 break; 3376 break;
2965 /* 3377 /* We request port to rediscover the entire FCF table for
2966 * Currently, driver support only one FCF - so treat this as 3378 * a fast recovery from case that the current FCF record
2967 * a link down. 3379 * is no longer valid if we are not in the middle of FCF
3380 * failover process already.
2968 */ 3381 */
2969 lpfc_linkdown(phba); 3382 spin_lock_irq(&phba->hbalock);
2970 /* Unregister FCF if no devices connected to it */ 3383 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
2971 lpfc_unregister_unused_fcf(phba); 3384 spin_unlock_irq(&phba->hbalock);
3385 /* Update FLOGI FCF failover eligible FCF bmask */
3386 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3387 break;
3388 }
3389 /* Mark the fast failover process in progress */
3390 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3391 spin_unlock_irq(&phba->hbalock);
3392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3393 "2771 Start FCF fast failover process due to "
3394 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3395 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3396 rc = lpfc_sli4_redisc_fcf_table(phba);
3397 if (rc) {
3398 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3399 LOG_DISCOVERY,
3400 "2772 Issue FCF rediscover mabilbox "
3401 "command failed, fail through to FCF "
3402 "dead event\n");
3403 spin_lock_irq(&phba->hbalock);
3404 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3405 spin_unlock_irq(&phba->hbalock);
3406 /*
3407 * Last resort will fail over by treating this
3408 * as a link down to FCF registration.
3409 */
3410 lpfc_sli4_fcf_dead_failthrough(phba);
3411 } else
3412 /* Handling fast FCF failover to a DEAD FCF event
3413 * is considered equalivant to receiving CVL to all
3414 * vports.
3415 */
3416 lpfc_sli4_perform_all_vport_cvl(phba);
2972 break; 3417 break;
3418 case LPFC_FCOE_EVENT_TYPE_CVL:
3419 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3420 "2718 Clear Virtual Link Received for VPI 0x%x"
3421 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3422 vport = lpfc_find_vport_by_vpid(phba,
3423 acqe_fcoe->index - phba->vpi_base);
3424 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3425 if (!ndlp)
3426 break;
3427 active_vlink_present = 0;
3428
3429 vports = lpfc_create_vport_work_array(phba);
3430 if (vports) {
3431 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3432 i++) {
3433 if ((!(vports[i]->fc_flag &
3434 FC_VPORT_CVL_RCVD)) &&
3435 (vports[i]->port_state > LPFC_FDISC)) {
3436 active_vlink_present = 1;
3437 break;
3438 }
3439 }
3440 lpfc_destroy_vport_work_array(phba, vports);
3441 }
2973 3442
3443 if (active_vlink_present) {
3444 /*
3445 * If there are other active VLinks present,
3446 * re-instantiate the Vlink using FDISC.
3447 */
3448 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3449 shost = lpfc_shost_from_vport(vport);
3450 spin_lock_irq(shost->host_lock);
3451 ndlp->nlp_flag |= NLP_DELAY_TMO;
3452 spin_unlock_irq(shost->host_lock);
3453 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3454 vport->port_state = LPFC_FDISC;
3455 } else {
3456 /*
3457 * Otherwise, we request port to rediscover
3458 * the entire FCF table for a fast recovery
3459 * from possible case that the current FCF
3460 * is no longer valid if we are not already
3461 * in the FCF failover process.
3462 */
3463 spin_lock_irq(&phba->hbalock);
3464 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3465 spin_unlock_irq(&phba->hbalock);
3466 break;
3467 }
3468 /* Mark the fast failover process in progress */
3469 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3470 spin_unlock_irq(&phba->hbalock);
3471 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3472 LOG_DISCOVERY,
3473 "2773 Start FCF fast failover due "
3474 "to CVL event: evt_tag:x%x\n",
3475 acqe_fcoe->event_tag);
3476 rc = lpfc_sli4_redisc_fcf_table(phba);
3477 if (rc) {
3478 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3479 LOG_DISCOVERY,
3480 "2774 Issue FCF rediscover "
3481 "mabilbox command failed, "
3482 "through to CVL event\n");
3483 spin_lock_irq(&phba->hbalock);
3484 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3485 spin_unlock_irq(&phba->hbalock);
3486 /*
3487 * Last resort will be re-try on the
3488 * the current registered FCF entry.
3489 */
3490 lpfc_retry_pport_discovery(phba);
3491 }
3492 }
3493 break;
2974 default: 3494 default:
2975 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3495 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2976 "0288 Unknown FCoE event type 0x%x event tag " 3496 "0288 Unknown FCoE event type 0x%x event tag "
@@ -2990,6 +3510,7 @@ static void
2990lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3510lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2991 struct lpfc_acqe_dcbx *acqe_dcbx) 3511 struct lpfc_acqe_dcbx *acqe_dcbx)
2992{ 3512{
3513 phba->fc_eventTag = acqe_dcbx->event_tag;
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3514 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "0290 The SLI4 DCBX asynchronous event is not " 3515 "0290 The SLI4 DCBX asynchronous event is not "
2995 "handled yet\n"); 3516 "handled yet\n");
@@ -3044,6 +3565,37 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3044} 3565}
3045 3566
3046/** 3567/**
3568 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3569 * @phba: pointer to lpfc hba data structure.
3570 *
3571 * This routine is invoked by the worker thread to process FCF table
3572 * rediscovery pending completion event.
3573 **/
3574void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3575{
3576 int rc;
3577
3578 spin_lock_irq(&phba->hbalock);
3579 /* Clear FCF rediscovery timeout event */
3580 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3581 /* Clear driver fast failover FCF record flag */
3582 phba->fcf.failover_rec.flag = 0;
3583 /* Set state for FCF fast failover */
3584 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3585 spin_unlock_irq(&phba->hbalock);
3586
3587 /* Scan FCF table from the first entry to re-discover SAN */
3588 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3589 "2777 Start FCF table scan after FCF "
3590 "rediscovery quiescent period over\n");
3591 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3592 if (rc)
3593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3594 "2747 Issue FCF scan read FCF mailbox "
3595 "command failed 0x%x\n", rc);
3596}
3597
3598/**
3047 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3599 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3048 * @phba: pointer to lpfc hba data structure. 3600 * @phba: pointer to lpfc hba data structure.
3049 * @dev_grp: The HBA PCI-Device group number. 3601 * @dev_grp: The HBA PCI-Device group number.
@@ -3124,7 +3676,7 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3124 * PCI devices. 3676 * PCI devices.
3125 * 3677 *
3126 * Return codes 3678 * Return codes
3127 * 0 - sucessful 3679 * 0 - successful
3128 * other values - error 3680 * other values - error
3129 **/ 3681 **/
3130static int 3682static int
@@ -3220,7 +3772,7 @@ lpfc_reset_hba(struct lpfc_hba *phba)
3220 * support the SLI-3 HBA device it attached to. 3772 * support the SLI-3 HBA device it attached to.
3221 * 3773 *
3222 * Return codes 3774 * Return codes
3223 * 0 - sucessful 3775 * 0 - successful
3224 * other values - error 3776 * other values - error
3225 **/ 3777 **/
3226static int 3778static int
@@ -3321,15 +3873,18 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3321 * support the SLI-4 HBA device it attached to. 3873 * support the SLI-4 HBA device it attached to.
3322 * 3874 *
3323 * Return codes 3875 * Return codes
3324 * 0 - sucessful 3876 * 0 - successful
3325 * other values - error 3877 * other values - error
3326 **/ 3878 **/
3327static int 3879static int
3328lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3880lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3329{ 3881{
3330 struct lpfc_sli *psli; 3882 struct lpfc_sli *psli;
3331 int rc; 3883 LPFC_MBOXQ_t *mboxq;
3332 int i, hbq_count; 3884 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3885 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3886 struct lpfc_mqe *mqe;
3887 int longs;
3333 3888
3334 /* Before proceed, wait for POST done and device ready */ 3889 /* Before proceed, wait for POST done and device ready */
3335 rc = lpfc_sli4_post_status_check(phba); 3890 rc = lpfc_sli4_post_status_check(phba);
@@ -3358,6 +3913,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3358 init_timer(&phba->eratt_poll); 3913 init_timer(&phba->eratt_poll);
3359 phba->eratt_poll.function = lpfc_poll_eratt; 3914 phba->eratt_poll.function = lpfc_poll_eratt;
3360 phba->eratt_poll.data = (unsigned long) phba; 3915 phba->eratt_poll.data = (unsigned long) phba;
3916 /* FCF rediscover timer */
3917 init_timer(&phba->fcf.redisc_wait);
3918 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3919 phba->fcf.redisc_wait.data = (unsigned long)phba;
3920
3361 /* 3921 /*
3362 * We need to do a READ_CONFIG mailbox command here before 3922 * We need to do a READ_CONFIG mailbox command here before
3363 * calling lpfc_get_cfgparam. For VFs this will report the 3923 * calling lpfc_get_cfgparam. For VFs this will report the
@@ -3382,31 +3942,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3382 * used to create the sg_dma_buf_pool must be dynamically calculated. 3942 * used to create the sg_dma_buf_pool must be dynamically calculated.
3383 * 2 segments are added since the IOCB needs a command and response bde. 3943 * 2 segments are added since the IOCB needs a command and response bde.
3384 * To insure that the scsi sgl does not cross a 4k page boundary only 3944 * To insure that the scsi sgl does not cross a 4k page boundary only
3385 * sgl sizes of 1k, 2k, 4k, and 8k are supported. 3945 * sgl sizes of must be a power of 2.
3386 * Table of sgl sizes and seg_cnt:
3387 * sgl size, sg_seg_cnt total seg
3388 * 1k 50 52
3389 * 2k 114 116
3390 * 4k 242 244
3391 * 8k 498 500
3392 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3393 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3394 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3395 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3396 */ 3946 */
3397 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) 3947 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
3398 phba->cfg_sg_seg_cnt = 50; 3948 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
3399 else if (phba->cfg_sg_seg_cnt <= 114) 3949 /* Feature Level 1 hardware is limited to 2 pages */
3400 phba->cfg_sg_seg_cnt = 114; 3950 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
3401 else if (phba->cfg_sg_seg_cnt <= 242) 3951 LPFC_SLI_INTF_FEATURELEVEL1_1))
3402 phba->cfg_sg_seg_cnt = 242; 3952 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
3403 else 3953 else
3404 phba->cfg_sg_seg_cnt = 498; 3954 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
3405 3955 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
3406 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) 3956 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
3407 + sizeof(struct fcp_rsp); 3957 dma_buf_size = dma_buf_size << 1)
3408 phba->cfg_sg_dma_buf_size += 3958 ;
3409 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 3959 if (dma_buf_size == max_buf_size)
3960 phba->cfg_sg_seg_cnt = (dma_buf_size -
3961 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
3962 (2 * sizeof(struct sli4_sge))) /
3963 sizeof(struct sli4_sge);
3964 phba->cfg_sg_dma_buf_size = dma_buf_size;
3410 3965
3411 /* Initialize buffer queue management fields */ 3966 /* Initialize buffer queue management fields */
3412 hbq_count = lpfc_sli_hbq_count(); 3967 hbq_count = lpfc_sli_hbq_count();
@@ -3432,7 +3987,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3432 /* Driver internel slow-path CQ Event pool */ 3987 /* Driver internel slow-path CQ Event pool */
3433 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3988 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3434 /* Response IOCB work queue list */ 3989 /* Response IOCB work queue list */
3435 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3990 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
3436 /* Asynchronous event CQ Event work queue list */ 3991 /* Asynchronous event CQ Event work queue list */
3437 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3992 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3438 /* Fast-path XRI aborted CQ Event work queue list */ 3993 /* Fast-path XRI aborted CQ Event work queue list */
@@ -3461,6 +4016,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3461 if (unlikely(rc)) 4016 if (unlikely(rc))
3462 goto out_free_bsmbx; 4017 goto out_free_bsmbx;
3463 4018
4019 rc = lpfc_sli4_fw_cfg_check(phba);
4020 if (unlikely(rc))
4021 goto out_free_bsmbx;
4022
3464 /* Set up the hba's configuration parameters. */ 4023 /* Set up the hba's configuration parameters. */
3465 rc = lpfc_sli4_read_config(phba); 4024 rc = lpfc_sli4_read_config(phba);
3466 if (unlikely(rc)) 4025 if (unlikely(rc))
@@ -3502,13 +4061,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3502 goto out_free_active_sgl; 4061 goto out_free_active_sgl;
3503 } 4062 }
3504 4063
4064 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4065 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4066 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4067 GFP_KERNEL);
4068 if (!phba->fcf.fcf_rr_bmask) {
4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4070 "2759 Failed allocate memory for FCF round "
4071 "robin failover bmask\n");
4072 goto out_remove_rpi_hdrs;
4073 }
4074
3505 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4075 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3506 phba->cfg_fcp_eq_count), GFP_KERNEL); 4076 phba->cfg_fcp_eq_count), GFP_KERNEL);
3507 if (!phba->sli4_hba.fcp_eq_hdl) { 4077 if (!phba->sli4_hba.fcp_eq_hdl) {
3508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3509 "2572 Failed allocate memory for fast-path " 4079 "2572 Failed allocate memory for fast-path "
3510 "per-EQ handle array\n"); 4080 "per-EQ handle array\n");
3511 goto out_remove_rpi_hdrs; 4081 goto out_free_fcf_rr_bmask;
3512 } 4082 }
3513 4083
3514 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4084 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3520,10 +4090,49 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3520 goto out_free_fcp_eq_hdl; 4090 goto out_free_fcp_eq_hdl;
3521 } 4091 }
3522 4092
4093 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4094 GFP_KERNEL);
4095 if (!mboxq) {
4096 rc = -ENOMEM;
4097 goto out_free_fcp_eq_hdl;
4098 }
4099
4100 /* Get the Supported Pages. It is always available. */
4101 lpfc_supported_pages(mboxq);
4102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4103 if (unlikely(rc)) {
4104 rc = -EIO;
4105 mempool_free(mboxq, phba->mbox_mem_pool);
4106 goto out_free_fcp_eq_hdl;
4107 }
4108
4109 mqe = &mboxq->u.mqe;
4110 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4111 LPFC_MAX_SUPPORTED_PAGES);
4112 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4113 switch (pn_page[i]) {
4114 case LPFC_SLI4_PARAMETERS:
4115 phba->sli4_hba.pc_sli4_params.supported = 1;
4116 break;
4117 default:
4118 break;
4119 }
4120 }
4121
4122 /* Read the port's SLI4 Parameters capabilities if supported. */
4123 if (phba->sli4_hba.pc_sli4_params.supported)
4124 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4125 mempool_free(mboxq, phba->mbox_mem_pool);
4126 if (rc) {
4127 rc = -EIO;
4128 goto out_free_fcp_eq_hdl;
4129 }
3523 return rc; 4130 return rc;
3524 4131
3525out_free_fcp_eq_hdl: 4132out_free_fcp_eq_hdl:
3526 kfree(phba->sli4_hba.fcp_eq_hdl); 4133 kfree(phba->sli4_hba.fcp_eq_hdl);
4134out_free_fcf_rr_bmask:
4135 kfree(phba->fcf.fcf_rr_bmask);
3527out_remove_rpi_hdrs: 4136out_remove_rpi_hdrs:
3528 lpfc_sli4_remove_rpi_hdrs(phba); 4137 lpfc_sli4_remove_rpi_hdrs(phba);
3529out_free_active_sgl: 4138out_free_active_sgl:
@@ -3569,6 +4178,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3569 lpfc_sli4_remove_rpi_hdrs(phba); 4178 lpfc_sli4_remove_rpi_hdrs(phba);
3570 lpfc_sli4_remove_rpis(phba); 4179 lpfc_sli4_remove_rpis(phba);
3571 4180
4181 /* Free eligible FCF index bmask */
4182 kfree(phba->fcf.fcf_rr_bmask);
4183
3572 /* Free the ELS sgl list */ 4184 /* Free the ELS sgl list */
3573 lpfc_free_active_sgl(phba); 4185 lpfc_free_active_sgl(phba);
3574 lpfc_free_sgl_list(phba); 4186 lpfc_free_sgl_list(phba);
@@ -3594,8 +4206,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3594 4206
3595 /* Free the current connect table */ 4207 /* Free the current connect table */
3596 list_for_each_entry_safe(conn_entry, next_conn_entry, 4208 list_for_each_entry_safe(conn_entry, next_conn_entry,
3597 &phba->fcf_conn_rec_list, list) 4209 &phba->fcf_conn_rec_list, list) {
4210 list_del_init(&conn_entry->list);
3598 kfree(conn_entry); 4211 kfree(conn_entry);
4212 }
3599 4213
3600 return; 4214 return;
3601} 4215}
@@ -3613,6 +4227,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3613int 4227int
3614lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4228lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3615{ 4229{
4230 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4231 phba->lpfc_hba_down_link = lpfc_hba_down_link;
3616 switch (dev_grp) { 4232 switch (dev_grp) {
3617 case LPFC_PCI_DEV_LP: 4233 case LPFC_PCI_DEV_LP:
3618 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4234 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -3642,7 +4258,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3642 * device specific resource setup to support the HBA device it attached to. 4258 * device specific resource setup to support the HBA device it attached to.
3643 * 4259 *
3644 * Return codes 4260 * Return codes
3645 * 0 - sucessful 4261 * 0 - successful
3646 * other values - error 4262 * other values - error
3647 **/ 4263 **/
3648static int 4264static int
@@ -3688,7 +4304,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3688 * device specific resource setup to support the HBA device it attached to. 4304 * device specific resource setup to support the HBA device it attached to.
3689 * 4305 *
3690 * Return codes 4306 * Return codes
3691 * 0 - sucessful 4307 * 0 - successful
3692 * other values - error 4308 * other values - error
3693 **/ 4309 **/
3694static int 4310static int
@@ -3753,7 +4369,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
3753 * list and set up the IOCB tag array accordingly. 4369 * list and set up the IOCB tag array accordingly.
3754 * 4370 *
3755 * Return codes 4371 * Return codes
3756 * 0 - sucessful 4372 * 0 - successful
3757 * other values - error 4373 * other values - error
3758 **/ 4374 **/
3759static int 4375static int
@@ -3824,7 +4440,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
3824 rc = lpfc_sli4_remove_all_sgl_pages(phba); 4440 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3825 if (rc) { 4441 if (rc) {
3826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4442 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3827 "2005 Unable to deregister pages from HBA: %x", rc); 4443 "2005 Unable to deregister pages from HBA: %x\n", rc);
3828 } 4444 }
3829 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4445 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3830} 4446}
@@ -3872,7 +4488,7 @@ lpfc_free_active_sgl(struct lpfc_hba *phba)
3872 * list and set up the sgl xritag tag array accordingly. 4488 * list and set up the sgl xritag tag array accordingly.
3873 * 4489 *
3874 * Return codes 4490 * Return codes
3875 * 0 - sucessful 4491 * 0 - successful
3876 * other values - error 4492 * other values - error
3877 **/ 4493 **/
3878static int 4494static int
@@ -3960,6 +4576,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
3960 4576
3961 /* The list order is used by later block SGL registraton */ 4577 /* The list order is used by later block SGL registraton */
3962 spin_lock_irq(&phba->hbalock); 4578 spin_lock_irq(&phba->hbalock);
4579 sglq_entry->state = SGL_FREED;
3963 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4580 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3964 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4581 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3965 phba->sli4_hba.total_sglq_bufs++; 4582 phba->sli4_hba.total_sglq_bufs++;
@@ -3986,7 +4603,7 @@ out_free_mem:
3986 * enabled and the driver is reinitializing the device. 4603 * enabled and the driver is reinitializing the device.
3987 * 4604 *
3988 * Return codes 4605 * Return codes
3989 * 0 - sucessful 4606 * 0 - successful
3990 * ENOMEM - No availble memory 4607 * ENOMEM - No availble memory
3991 * EIO - The mailbox failed to complete successfully. 4608 * EIO - The mailbox failed to complete successfully.
3992 **/ 4609 **/
@@ -4146,7 +4763,7 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4146 * PCI device data structure is set. 4763 * PCI device data structure is set.
4147 * 4764 *
4148 * Return codes 4765 * Return codes
4149 * pointer to @phba - sucessful 4766 * pointer to @phba - successful
4150 * NULL - error 4767 * NULL - error
4151 **/ 4768 **/
4152static struct lpfc_hba * 4769static struct lpfc_hba *
@@ -4171,7 +4788,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
4171 return NULL; 4788 return NULL;
4172 } 4789 }
4173 4790
4174 mutex_init(&phba->ct_event_mutex); 4791 spin_lock_init(&phba->ct_ev_lock);
4175 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4792 INIT_LIST_HEAD(&phba->ct_ev_waiters);
4176 4793
4177 return phba; 4794 return phba;
@@ -4202,7 +4819,7 @@ lpfc_hba_free(struct lpfc_hba *phba)
4202 * host with it. 4819 * host with it.
4203 * 4820 *
4204 * Return codes 4821 * Return codes
4205 * 0 - sucessful 4822 * 0 - successful
4206 * other values - error 4823 * other values - error
4207 **/ 4824 **/
4208static int 4825static int
@@ -4273,7 +4890,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4273 _dump_buf_data = 4890 _dump_buf_data =
4274 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4891 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4275 if (_dump_buf_data) { 4892 if (_dump_buf_data) {
4276 printk(KERN_ERR "BLKGRD allocated %d pages for " 4893 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4894 "9043 BLKGRD: allocated %d pages for "
4277 "_dump_buf_data at 0x%p\n", 4895 "_dump_buf_data at 0x%p\n",
4278 (1 << pagecnt), _dump_buf_data); 4896 (1 << pagecnt), _dump_buf_data);
4279 _dump_buf_data_order = pagecnt; 4897 _dump_buf_data_order = pagecnt;
@@ -4284,17 +4902,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4284 --pagecnt; 4902 --pagecnt;
4285 } 4903 }
4286 if (!_dump_buf_data_order) 4904 if (!_dump_buf_data_order)
4287 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4905 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4906 "9044 BLKGRD: ERROR unable to allocate "
4288 "memory for hexdump\n"); 4907 "memory for hexdump\n");
4289 } else 4908 } else
4290 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 4909 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4910 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4291 "\n", _dump_buf_data); 4911 "\n", _dump_buf_data);
4292 if (!_dump_buf_dif) { 4912 if (!_dump_buf_dif) {
4293 while (pagecnt) { 4913 while (pagecnt) {
4294 _dump_buf_dif = 4914 _dump_buf_dif =
4295 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4915 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4296 if (_dump_buf_dif) { 4916 if (_dump_buf_dif) {
4297 printk(KERN_ERR "BLKGRD allocated %d pages for " 4917 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4918 "9046 BLKGRD: allocated %d pages for "
4298 "_dump_buf_dif at 0x%p\n", 4919 "_dump_buf_dif at 0x%p\n",
4299 (1 << pagecnt), _dump_buf_dif); 4920 (1 << pagecnt), _dump_buf_dif);
4300 _dump_buf_dif_order = pagecnt; 4921 _dump_buf_dif_order = pagecnt;
@@ -4305,10 +4926,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4305 --pagecnt; 4926 --pagecnt;
4306 } 4927 }
4307 if (!_dump_buf_dif_order) 4928 if (!_dump_buf_dif_order)
4308 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4929 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4930 "9047 BLKGRD: ERROR unable to allocate "
4309 "memory for hexdump\n"); 4931 "memory for hexdump\n");
4310 } else 4932 } else
4311 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 4933 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4934 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4312 _dump_buf_dif); 4935 _dump_buf_dif);
4313} 4936}
4314 4937
@@ -4365,7 +4988,7 @@ lpfc_post_init_setup(struct lpfc_hba *phba)
4365 * with SLI-3 interface spec. 4988 * with SLI-3 interface spec.
4366 * 4989 *
4367 * Return codes 4990 * Return codes
4368 * 0 - sucessful 4991 * 0 - successful
4369 * other values - error 4992 * other values - error
4370 **/ 4993 **/
4371static int 4994static int
@@ -4384,9 +5007,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4384 pdev = phba->pcidev; 5007 pdev = phba->pcidev;
4385 5008
4386 /* Set the device DMA mask size */ 5009 /* Set the device DMA mask size */
4387 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 5010 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
4388 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5011 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5012 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5013 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
4389 return error; 5014 return error;
5015 }
5016 }
4390 5017
4391 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5018 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4392 * required by each mapping. 5019 * required by each mapping.
@@ -4511,8 +5138,7 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4511int 5138int
4512lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5139lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4513{ 5140{
4514 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 5141 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
4515 uint32_t onlnreg0, onlnreg1;
4516 int i, port_error = -ENODEV; 5142 int i, port_error = -ENODEV;
4517 5143
4518 if (!phba->sli4_hba.STAregaddr) 5144 if (!phba->sli4_hba.STAregaddr)
@@ -4548,29 +5174,35 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4548 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5174 bf_get(lpfc_hst_state_port_status, &sta_reg));
4549 5175
4550 /* Log device information */ 5176 /* Log device information */
4551 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); 5177 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
4552 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5178 if (bf_get(lpfc_sli_intf_valid,
4553 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5179 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
4554 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5180 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4555 bf_get(lpfc_scratchpad_chiptype, &scratchpad), 5181 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4556 bf_get(lpfc_scratchpad_slirev, &scratchpad), 5182 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4557 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 5183 bf_get(lpfc_sli_intf_sli_family,
4558 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 5184 &phba->sli4_hba.sli_intf),
4559 5185 bf_get(lpfc_sli_intf_slirev,
5186 &phba->sli4_hba.sli_intf),
5187 bf_get(lpfc_sli_intf_featurelevel1,
5188 &phba->sli4_hba.sli_intf),
5189 bf_get(lpfc_sli_intf_featurelevel2,
5190 &phba->sli4_hba.sli_intf));
5191 }
5192 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5193 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
4560 /* With uncoverable error, log the error message and return error */ 5194 /* With uncoverable error, log the error message and return error */
4561 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 5195 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4562 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 5196 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4563 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 5197 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
4564 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5198 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
4565 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4566 if (uerrlo_reg.word0 || uerrhi_reg.word0) { 5200 "1422 HBA Unrecoverable error: "
4567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5201 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4568 "1422 HBA Unrecoverable error: " 5202 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
4569 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5203 uerrlo_reg.word0, uerrhi_reg.word0,
4570 "online0_reg=0x%x, online1_reg=0x%x\n", 5204 phba->sli4_hba.ue_mask_lo,
4571 uerrlo_reg.word0, uerrhi_reg.word0, 5205 phba->sli4_hba.ue_mask_hi);
4572 onlnreg0, onlnreg1);
4573 }
4574 return -ENODEV; 5206 return -ENODEV;
4575 } 5207 }
4576 5208
@@ -4591,12 +5223,12 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4591 LPFC_UERR_STATUS_LO; 5223 LPFC_UERR_STATUS_LO;
4592 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5224 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4593 LPFC_UERR_STATUS_HI; 5225 LPFC_UERR_STATUS_HI;
4594 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + 5226 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4595 LPFC_ONLINE0; 5227 LPFC_UE_MASK_LO;
4596 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + 5228 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4597 LPFC_ONLINE1; 5229 LPFC_UE_MASK_HI;
4598 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 5230 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
4599 LPFC_SCRATCHPAD; 5231 LPFC_SLI_INTF;
4600} 5232}
4601 5233
4602/** 5234/**
@@ -4662,7 +5294,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4662 * this routine. 5294 * this routine.
4663 * 5295 *
4664 * Return codes 5296 * Return codes
4665 * 0 - sucessful 5297 * 0 - successful
4666 * ENOMEM - could not allocated memory. 5298 * ENOMEM - could not allocated memory.
4667 **/ 5299 **/
4668static int 5300static int
@@ -4761,7 +5393,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4761 * allocation for the port. 5393 * allocation for the port.
4762 * 5394 *
4763 * Return codes 5395 * Return codes
4764 * 0 - sucessful 5396 * 0 - successful
4765 * ENOMEM - No availble memory 5397 * ENOMEM - No availble memory
4766 * EIO - The mailbox failed to complete successfully. 5398 * EIO - The mailbox failed to complete successfully.
4767 **/ 5399 **/
@@ -4825,7 +5457,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
4825 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5457 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4826 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5458 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4827 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5459 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4828 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; 5460 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5461 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
4829 phba->max_vports = phba->max_vpi; 5462 phba->max_vports = phba->max_vpi;
4830 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4831 "2003 cfg params XRI(B:%d M:%d), " 5464 "2003 cfg params XRI(B:%d M:%d), "
@@ -4861,7 +5494,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
4861 * HBA consistent with the SLI-4 interface spec. 5494 * HBA consistent with the SLI-4 interface spec.
4862 * 5495 *
4863 * Return codes 5496 * Return codes
4864 * 0 - sucessful 5497 * 0 - successful
4865 * ENOMEM - No availble memory 5498 * ENOMEM - No availble memory
4866 * EIO - The mailbox failed to complete successfully. 5499 * EIO - The mailbox failed to complete successfully.
4867 **/ 5500 **/
@@ -4910,7 +5543,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
4910 * we just use some constant number as place holder. 5543 * we just use some constant number as place holder.
4911 * 5544 *
4912 * Return codes 5545 * Return codes
4913 * 0 - sucessful 5546 * 0 - successful
4914 * ENOMEM - No availble memory 5547 * ENOMEM - No availble memory
4915 * EIO - The mailbox failed to complete successfully. 5548 * EIO - The mailbox failed to complete successfully.
4916 **/ 5549 **/
@@ -4979,10 +5612,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
4979 /* It does not make sense to have more EQs than WQs */ 5612 /* It does not make sense to have more EQs than WQs */
4980 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5613 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4981 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5614 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4982 "2593 The number of FCP EQs (%d) is more " 5615 "2593 The FCP EQ count(%d) cannot be greater "
4983 "than the number of FCP WQs (%d), take " 5616 "than the FCP WQ count(%d), limiting the "
4984 "the number of FCP EQs same as than of " 5617 "FCP EQ count to %d\n", cfg_fcp_eq_count,
4985 "WQs (%d)\n", cfg_fcp_eq_count,
4986 phba->cfg_fcp_wq_count, 5618 phba->cfg_fcp_wq_count,
4987 phba->cfg_fcp_wq_count); 5619 phba->cfg_fcp_wq_count);
4988 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5620 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
@@ -5058,15 +5690,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5058 } 5690 }
5059 phba->sli4_hba.els_cq = qdesc; 5691 phba->sli4_hba.els_cq = qdesc;
5060 5692
5061 /* Create slow-path Unsolicited Receive Complete Queue */
5062 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5063 phba->sli4_hba.cq_ecount);
5064 if (!qdesc) {
5065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5066 "0502 Failed allocate slow-path USOL RX CQ\n");
5067 goto out_free_els_cq;
5068 }
5069 phba->sli4_hba.rxq_cq = qdesc;
5070 5693
5071 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5694 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5072 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5695 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
@@ -5075,7 +5698,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5076 "2577 Failed allocate memory for fast-path " 5699 "2577 Failed allocate memory for fast-path "
5077 "CQ record array\n"); 5700 "CQ record array\n");
5078 goto out_free_rxq_cq; 5701 goto out_free_els_cq;
5079 } 5702 }
5080 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5703 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5081 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5704 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
@@ -5188,9 +5811,6 @@ out_free_fcp_cq:
5188 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5811 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5189 } 5812 }
5190 kfree(phba->sli4_hba.fcp_cq); 5813 kfree(phba->sli4_hba.fcp_cq);
5191out_free_rxq_cq:
5192 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5193 phba->sli4_hba.rxq_cq = NULL;
5194out_free_els_cq: 5814out_free_els_cq:
5195 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5815 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5196 phba->sli4_hba.els_cq = NULL; 5816 phba->sli4_hba.els_cq = NULL;
@@ -5218,7 +5838,7 @@ out_error:
5218 * operation. 5838 * operation.
5219 * 5839 *
5220 * Return codes 5840 * Return codes
5221 * 0 - sucessful 5841 * 0 - successful
5222 * ENOMEM - No availble memory 5842 * ENOMEM - No availble memory
5223 * EIO - The mailbox failed to complete successfully. 5843 * EIO - The mailbox failed to complete successfully.
5224 **/ 5844 **/
@@ -5247,10 +5867,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5247 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5867 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5248 phba->sli4_hba.dat_rq = NULL; 5868 phba->sli4_hba.dat_rq = NULL;
5249 5869
5250 /* Release unsolicited receive complete queue */
5251 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5252 phba->sli4_hba.rxq_cq = NULL;
5253
5254 /* Release ELS complete queue */ 5870 /* Release ELS complete queue */
5255 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5871 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5256 phba->sli4_hba.els_cq = NULL; 5872 phba->sli4_hba.els_cq = NULL;
@@ -5286,7 +5902,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5286 * operation. 5902 * operation.
5287 * 5903 *
5288 * Return codes 5904 * Return codes
5289 * 0 - sucessful 5905 * 0 - successful
5290 * ENOMEM - No availble memory 5906 * ENOMEM - No availble memory
5291 * EIO - The mailbox failed to complete successfully. 5907 * EIO - The mailbox failed to complete successfully.
5292 **/ 5908 **/
@@ -5383,25 +5999,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5383 phba->sli4_hba.els_cq->queue_id, 5999 phba->sli4_hba.els_cq->queue_id,
5384 phba->sli4_hba.sp_eq->queue_id); 6000 phba->sli4_hba.sp_eq->queue_id);
5385 6001
5386 /* Set up slow-path Unsolicited Receive Complete Queue */
5387 if (!phba->sli4_hba.rxq_cq) {
5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 "0532 USOL RX CQ not allocated\n");
5390 goto out_destroy_els_cq;
5391 }
5392 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5393 LPFC_RCQ, LPFC_USOL);
5394 if (rc) {
5395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5396 "0533 Failed setup of slow-path USOL RX CQ: "
5397 "rc = 0x%x\n", rc);
5398 goto out_destroy_els_cq;
5399 }
5400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5401 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5402 phba->sli4_hba.rxq_cq->queue_id,
5403 phba->sli4_hba.sp_eq->queue_id);
5404
5405 /* Set up fast-path FCP Response Complete Queue */ 6002 /* Set up fast-path FCP Response Complete Queue */
5406 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6003 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5407 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6004 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -5507,7 +6104,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5507 goto out_destroy_fcp_wq; 6104 goto out_destroy_fcp_wq;
5508 } 6105 }
5509 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6106 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5510 phba->sli4_hba.rxq_cq, LPFC_USOL); 6107 phba->sli4_hba.els_cq, LPFC_USOL);
5511 if (rc) { 6108 if (rc) {
5512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5513 "0541 Failed setup of Receive Queue: " 6110 "0541 Failed setup of Receive Queue: "
@@ -5519,7 +6116,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5519 "parent cq-id=%d\n", 6116 "parent cq-id=%d\n",
5520 phba->sli4_hba.hdr_rq->queue_id, 6117 phba->sli4_hba.hdr_rq->queue_id,
5521 phba->sli4_hba.dat_rq->queue_id, 6118 phba->sli4_hba.dat_rq->queue_id,
5522 phba->sli4_hba.rxq_cq->queue_id); 6119 phba->sli4_hba.els_cq->queue_id);
5523 return 0; 6120 return 0;
5524 6121
5525out_destroy_fcp_wq: 6122out_destroy_fcp_wq:
@@ -5531,8 +6128,6 @@ out_destroy_mbx_wq:
5531out_destroy_fcp_cq: 6128out_destroy_fcp_cq:
5532 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6129 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5533 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6130 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5534 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5535out_destroy_els_cq:
5536 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6131 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5537out_destroy_mbx_cq: 6132out_destroy_mbx_cq:
5538 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6133 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -5552,7 +6147,7 @@ out_error:
5552 * operation. 6147 * operation.
5553 * 6148 *
5554 * Return codes 6149 * Return codes
5555 * 0 - sucessful 6150 * 0 - successful
5556 * ENOMEM - No availble memory 6151 * ENOMEM - No availble memory
5557 * EIO - The mailbox failed to complete successfully. 6152 * EIO - The mailbox failed to complete successfully.
5558 **/ 6153 **/
@@ -5574,8 +6169,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5574 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6169 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5575 /* Unset ELS complete queue */ 6170 /* Unset ELS complete queue */
5576 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6171 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5577 /* Unset unsolicited receive complete queue */
5578 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5579 /* Unset FCP response complete queue */ 6172 /* Unset FCP response complete queue */
5580 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6173 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5581 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6174 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
@@ -5599,7 +6192,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5599 * Later, this can be used for all the slow-path events. 6192 * Later, this can be used for all the slow-path events.
5600 * 6193 *
5601 * Return codes 6194 * Return codes
5602 * 0 - sucessful 6195 * 0 - successful
5603 * -ENOMEM - No availble memory 6196 * -ENOMEM - No availble memory
5604 **/ 6197 **/
5605static int 6198static int
@@ -5760,7 +6353,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5760 * all resources assigned to the PCI function which originates this request. 6353 * all resources assigned to the PCI function which originates this request.
5761 * 6354 *
5762 * Return codes 6355 * Return codes
5763 * 0 - sucessful 6356 * 0 - successful
5764 * ENOMEM - No availble memory 6357 * ENOMEM - No availble memory
5765 * EIO - The mailbox failed to complete successfully. 6358 * EIO - The mailbox failed to complete successfully.
5766 **/ 6359 **/
@@ -5910,7 +6503,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5910 spin_lock_irqsave(&phba->hbalock, flags); 6503 spin_lock_irqsave(&phba->hbalock, flags);
5911 /* Mark the FCFI is no longer registered */ 6504 /* Mark the FCFI is no longer registered */
5912 phba->fcf.fcf_flag &= 6505 phba->fcf.fcf_flag &=
5913 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); 6506 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
5914 spin_unlock_irqrestore(&phba->hbalock, flags); 6507 spin_unlock_irqrestore(&phba->hbalock, flags);
5915 } 6508 }
5916} 6509}
@@ -5923,7 +6516,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5923 * with SLI-4 interface spec. 6516 * with SLI-4 interface spec.
5924 * 6517 *
5925 * Return codes 6518 * Return codes
5926 * 0 - sucessful 6519 * 0 - successful
5927 * other values - error 6520 * other values - error
5928 **/ 6521 **/
5929static int 6522static int
@@ -5940,22 +6533,30 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5940 pdev = phba->pcidev; 6533 pdev = phba->pcidev;
5941 6534
5942 /* Set the device DMA mask size */ 6535 /* Set the device DMA mask size */
5943 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 6536 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5944 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 6537 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6538 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6539 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5945 return error; 6540 return error;
6541 }
6542 }
5946 6543
5947 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6544 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5948 * number of bytes required by each mapping. They are actually 6545 * number of bytes required by each mapping. They are actually
5949 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. 6546 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
5950 */ 6547 */
5951 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); 6548 if (pci_resource_start(pdev, 0)) {
5952 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); 6549 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5953 6550 bar0map_len = pci_resource_len(pdev, 0);
5954 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); 6551 } else {
5955 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); 6552 phba->pci_bar0_map = pci_resource_start(pdev, 1);
6553 bar0map_len = pci_resource_len(pdev, 1);
6554 }
6555 phba->pci_bar1_map = pci_resource_start(pdev, 2);
6556 bar1map_len = pci_resource_len(pdev, 2);
5956 6557
5957 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); 6558 phba->pci_bar2_map = pci_resource_start(pdev, 4);
5958 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); 6559 bar2map_len = pci_resource_len(pdev, 4);
5959 6560
5960 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6561 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5961 phba->sli4_hba.conf_regs_memmap_p = 6562 phba->sli4_hba.conf_regs_memmap_p =
@@ -6052,7 +6653,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6052 * will be left with MSI-X enabled and leaks its vectors. 6653 * will be left with MSI-X enabled and leaks its vectors.
6053 * 6654 *
6054 * Return codes 6655 * Return codes
6055 * 0 - sucessful 6656 * 0 - successful
6056 * other values - error 6657 * other values - error
6057 **/ 6658 **/
6058static int 6659static int
@@ -6184,7 +6785,7 @@ lpfc_sli_disable_msix(struct lpfc_hba *phba)
6184 * is done in this function. 6785 * is done in this function.
6185 * 6786 *
6186 * Return codes 6787 * Return codes
6187 * 0 - sucessful 6788 * 0 - successful
6188 * other values - error 6789 * other values - error
6189 */ 6790 */
6190static int 6791static int
@@ -6243,7 +6844,7 @@ lpfc_sli_disable_msi(struct lpfc_hba *phba)
6243 * MSI-X -> MSI -> IRQ. 6844 * MSI-X -> MSI -> IRQ.
6244 * 6845 *
6245 * Return codes 6846 * Return codes
6246 * 0 - sucessful 6847 * 0 - successful
6247 * other values - error 6848 * other values - error
6248 **/ 6849 **/
6249static uint32_t 6850static uint32_t
@@ -6333,7 +6934,7 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
6333 * enabled and leaks its vectors. 6934 * enabled and leaks its vectors.
6334 * 6935 *
6335 * Return codes 6936 * Return codes
6336 * 0 - sucessful 6937 * 0 - successful
6337 * other values - error 6938 * other values - error
6338 **/ 6939 **/
6339static int 6940static int
@@ -6443,7 +7044,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6443 * which is done in this function. 7044 * which is done in this function.
6444 * 7045 *
6445 * Return codes 7046 * Return codes
6446 * 0 - sucessful 7047 * 0 - successful
6447 * other values - error 7048 * other values - error
6448 **/ 7049 **/
6449static int 7050static int
@@ -6508,7 +7109,7 @@ lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6508 * MSI-X -> MSI -> IRQ. 7109 * MSI-X -> MSI -> IRQ.
6509 * 7110 *
6510 * Return codes 7111 * Return codes
6511 * 0 - sucessful 7112 * 0 - successful
6512 * other values - error 7113 * other values - error
6513 **/ 7114 **/
6514static uint32_t 7115static uint32_t
@@ -6700,6 +7301,73 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6700 phba->pport->work_port_events = 0; 7301 phba->pport->work_port_events = 0;
6701} 7302}
6702 7303
7304 /**
7305 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7306 * @phba: Pointer to HBA context object.
7307 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7308 *
7309 * This function is called in the SLI4 code path to read the port's
7310 * sli4 capabilities.
7311 *
7312 * This function may be be called from any context that can block-wait
7313 * for the completion. The expectation is that this routine is called
7314 * typically from probe_one or from the online routine.
7315 **/
7316int
7317lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7318{
7319 int rc;
7320 struct lpfc_mqe *mqe;
7321 struct lpfc_pc_sli4_params *sli4_params;
7322 uint32_t mbox_tmo;
7323
7324 rc = 0;
7325 mqe = &mboxq->u.mqe;
7326
7327 /* Read the port's SLI4 Parameters port capabilities */
7328 lpfc_sli4_params(mboxq);
7329 if (!phba->sli4_hba.intr_enable)
7330 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7331 else {
7332 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7333 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7334 }
7335
7336 if (unlikely(rc))
7337 return 1;
7338
7339 sli4_params = &phba->sli4_hba.pc_sli4_params;
7340 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7341 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7342 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7343 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7344 &mqe->un.sli4_params);
7345 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7346 &mqe->un.sli4_params);
7347 sli4_params->proto_types = mqe->un.sli4_params.word3;
7348 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7349 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7350 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7351 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7352 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7353 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7354 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7355 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7356 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7357 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7358 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7359 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7360 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7361 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7362 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7363 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7364 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7365 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7366 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7367 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7368 return rc;
7369}
7370
6703/** 7371/**
6704 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7372 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6705 * @pdev: pointer to PCI device 7373 * @pdev: pointer to PCI device
@@ -6722,6 +7390,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6722{ 7390{
6723 struct lpfc_hba *phba; 7391 struct lpfc_hba *phba;
6724 struct lpfc_vport *vport = NULL; 7392 struct lpfc_vport *vport = NULL;
7393 struct Scsi_Host *shost = NULL;
6725 int error; 7394 int error;
6726 uint32_t cfg_mode, intr_mode; 7395 uint32_t cfg_mode, intr_mode;
6727 7396
@@ -6800,6 +7469,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6800 goto out_destroy_shost; 7469 goto out_destroy_shost;
6801 } 7470 }
6802 7471
7472 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
6803 /* Now, trying to enable interrupt and bring up the device */ 7473 /* Now, trying to enable interrupt and bring up the device */
6804 cfg_mode = phba->cfg_use_msi; 7474 cfg_mode = phba->cfg_use_msi;
6805 while (true) { 7475 while (true) {
@@ -6866,6 +7536,8 @@ out_unset_pci_mem_s3:
6866 lpfc_sli_pci_mem_unset(phba); 7536 lpfc_sli_pci_mem_unset(phba);
6867out_disable_pci_dev: 7537out_disable_pci_dev:
6868 lpfc_disable_pci_dev(phba); 7538 lpfc_disable_pci_dev(phba);
7539 if (shost)
7540 scsi_host_put(shost);
6869out_free_phba: 7541out_free_phba:
6870 lpfc_hba_free(phba); 7542 lpfc_hba_free(phba);
6871 return error; 7543 return error;
@@ -7036,6 +7708,13 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7036 /* Restore device state from PCI config space */ 7708 /* Restore device state from PCI config space */
7037 pci_set_power_state(pdev, PCI_D0); 7709 pci_set_power_state(pdev, PCI_D0);
7038 pci_restore_state(pdev); 7710 pci_restore_state(pdev);
7711
7712 /*
7713 * As the new kernel behavior of pci_restore_state() API call clears
7714 * device saved_state flag, need to save the restored state again.
7715 */
7716 pci_save_state(pdev);
7717
7039 if (pdev->is_busmaster) 7718 if (pdev->is_busmaster)
7040 pci_set_master(pdev); 7719 pci_set_master(pdev);
7041 7720
@@ -7070,6 +7749,73 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7070} 7749}
7071 7750
7072/** 7751/**
7752 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7753 * @phba: pointer to lpfc hba data structure.
7754 *
7755 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7756 * aborts and stops all the on-going I/Os on the pci device.
7757 **/
7758static void
7759lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7760{
7761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7762 "2723 PCI channel I/O abort preparing for recovery\n");
7763 /* Prepare for bringing HBA offline */
7764 lpfc_offline_prep(phba);
7765 /* Clear sli active flag to prevent sysfs access to HBA */
7766 spin_lock_irq(&phba->hbalock);
7767 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
7768 spin_unlock_irq(&phba->hbalock);
7769 /* Stop and flush all I/Os and bring HBA offline */
7770 lpfc_offline(phba);
7771}
7772
7773/**
7774 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7775 * @phba: pointer to lpfc hba data structure.
7776 *
7777 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7778 * disables the device interrupt and pci device, and aborts the internal FCP
7779 * pending I/Os.
7780 **/
7781static void
7782lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7783{
7784 struct lpfc_sli *psli = &phba->sli;
7785 struct lpfc_sli_ring *pring;
7786
7787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7788 "2710 PCI channel disable preparing for reset\n");
7789 /* Disable interrupt and pci device */
7790 lpfc_sli_disable_intr(phba);
7791 pci_disable_device(phba->pcidev);
7792 /*
7793 * There may be I/Os dropped by the firmware.
7794 * Error iocb (I/O) on txcmplq and let the SCSI layer
7795 * retry it after re-establishing link.
7796 */
7797 pring = &psli->ring[psli->fcp_ring];
7798 lpfc_sli_abort_iocb_ring(phba, pring);
7799}
7800
7801/**
7802 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7803 * @phba: pointer to lpfc hba data structure.
7804 *
7805 * This routine is called to prepare the SLI3 device for PCI slot permanently
7806 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7807 * pending I/Os.
7808 **/
7809static void
7810lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7811{
7812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813 "2711 PCI channel permanent disable for failure\n");
7814 /* Clean up all driver's outstanding SCSI I/Os */
7815 lpfc_sli_flush_fcp_rings(phba);
7816}
7817
7818/**
7073 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7819 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7074 * @pdev: pointer to PCI device. 7820 * @pdev: pointer to PCI device.
7075 * @state: the current PCI connection state. 7821 * @state: the current PCI connection state.
@@ -7083,6 +7829,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7083 * as desired. 7829 * as desired.
7084 * 7830 *
7085 * Return codes 7831 * Return codes
7832 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7086 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7833 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7087 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7834 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7088 **/ 7835 **/
@@ -7091,33 +7838,30 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7091{ 7838{
7092 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7839 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7093 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7840 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7094 struct lpfc_sli *psli = &phba->sli;
7095 struct lpfc_sli_ring *pring;
7096 7841
7097 if (state == pci_channel_io_perm_failure) { 7842 /* Block all SCSI devices' I/Os on the host */
7098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7843 lpfc_scsi_dev_block(phba);
7099 "0472 PCI channel I/O permanent failure\n"); 7844
7100 /* Block all SCSI devices' I/Os on the host */ 7845 switch (state) {
7101 lpfc_scsi_dev_block(phba); 7846 case pci_channel_io_normal:
7102 /* Clean up all driver's outstanding SCSI I/Os */ 7847 /* Non-fatal error, prepare for recovery */
7103 lpfc_sli_flush_fcp_rings(phba); 7848 lpfc_sli_prep_dev_for_recover(phba);
7849 return PCI_ERS_RESULT_CAN_RECOVER;
7850 case pci_channel_io_frozen:
7851 /* Fatal error, prepare for slot reset */
7852 lpfc_sli_prep_dev_for_reset(phba);
7853 return PCI_ERS_RESULT_NEED_RESET;
7854 case pci_channel_io_perm_failure:
7855 /* Permanent failure, prepare for device down */
7856 lpfc_prep_dev_for_perm_failure(phba);
7104 return PCI_ERS_RESULT_DISCONNECT; 7857 return PCI_ERS_RESULT_DISCONNECT;
7858 default:
7859 /* Unknown state, prepare and request slot reset */
7860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7861 "0472 Unknown PCI error state: x%x\n", state);
7862 lpfc_sli_prep_dev_for_reset(phba);
7863 return PCI_ERS_RESULT_NEED_RESET;
7105 } 7864 }
7106
7107 pci_disable_device(pdev);
7108 /*
7109 * There may be I/Os dropped by the firmware.
7110 * Error iocb (I/O) on txcmplq and let the SCSI layer
7111 * retry it after re-establishing link.
7112 */
7113 pring = &psli->ring[psli->fcp_ring];
7114 lpfc_sli_abort_iocb_ring(phba, pring);
7115
7116 /* Disable interrupt */
7117 lpfc_sli_disable_intr(phba);
7118
7119 /* Request a slot reset. */
7120 return PCI_ERS_RESULT_NEED_RESET;
7121} 7865}
7122 7866
7123/** 7867/**
@@ -7154,6 +7898,13 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7154 } 7898 }
7155 7899
7156 pci_restore_state(pdev); 7900 pci_restore_state(pdev);
7901
7902 /*
7903 * As the new kernel behavior of pci_restore_state() API call clears
7904 * device saved_state flag, need to save the restored state again.
7905 */
7906 pci_save_state(pdev);
7907
7157 if (pdev->is_busmaster) 7908 if (pdev->is_busmaster)
7158 pci_set_master(pdev); 7909 pci_set_master(pdev);
7159 7910
@@ -7197,7 +7948,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
7197 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7948 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7949 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7199 7950
7951 /* Bring the device online */
7200 lpfc_online(phba); 7952 lpfc_online(phba);
7953
7954 /* Clean up Advanced Error Reporting (AER) if needed */
7955 if (phba->hba_flag & HBA_AER_ENABLED)
7956 pci_cleanup_aer_uncorrect_error_status(pdev);
7201} 7957}
7202 7958
7203/** 7959/**
@@ -7213,15 +7969,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7213 7969
7214 if (phba->sli_rev == LPFC_SLI_REV4) { 7970 if (phba->sli_rev == LPFC_SLI_REV4) {
7215 if (max_xri <= 100) 7971 if (max_xri <= 100)
7216 return 4; 7972 return 10;
7217 else if (max_xri <= 256) 7973 else if (max_xri <= 256)
7218 return 8; 7974 return 25;
7219 else if (max_xri <= 512) 7975 else if (max_xri <= 512)
7220 return 16; 7976 return 50;
7221 else if (max_xri <= 1024) 7977 else if (max_xri <= 1024)
7222 return 32; 7978 return 100;
7223 else 7979 else
7224 return 48; 7980 return 150;
7225 } else 7981 } else
7226 return 0; 7982 return 0;
7227} 7983}
@@ -7249,6 +8005,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7249{ 8005{
7250 struct lpfc_hba *phba; 8006 struct lpfc_hba *phba;
7251 struct lpfc_vport *vport = NULL; 8007 struct lpfc_vport *vport = NULL;
8008 struct Scsi_Host *shost = NULL;
7252 int error; 8009 int error;
7253 uint32_t cfg_mode, intr_mode; 8010 uint32_t cfg_mode, intr_mode;
7254 int mcnt; 8011 int mcnt;
@@ -7329,6 +8086,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7329 goto out_destroy_shost; 8086 goto out_destroy_shost;
7330 } 8087 }
7331 8088
8089 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7332 /* Now, trying to enable interrupt and bring up the device */ 8090 /* Now, trying to enable interrupt and bring up the device */
7333 cfg_mode = phba->cfg_use_msi; 8091 cfg_mode = phba->cfg_use_msi;
7334 while (true) { 8092 while (true) {
@@ -7342,6 +8100,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7342 error = -ENODEV; 8100 error = -ENODEV;
7343 goto out_free_sysfs_attr; 8101 goto out_free_sysfs_attr;
7344 } 8102 }
8103 /* Default to single FCP EQ for non-MSI-X */
8104 if (phba->intr_type != MSIX)
8105 phba->cfg_fcp_eq_count = 1;
7345 /* Set up SLI-4 HBA */ 8106 /* Set up SLI-4 HBA */
7346 if (lpfc_sli4_hba_setup(phba)) { 8107 if (lpfc_sli4_hba_setup(phba)) {
7347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7397,6 +8158,8 @@ out_unset_pci_mem_s4:
7397 lpfc_sli4_pci_mem_unset(phba); 8158 lpfc_sli4_pci_mem_unset(phba);
7398out_disable_pci_dev: 8159out_disable_pci_dev:
7399 lpfc_disable_pci_dev(phba); 8160 lpfc_disable_pci_dev(phba);
8161 if (shost)
8162 scsi_host_put(shost);
7400out_free_phba: 8163out_free_phba:
7401 lpfc_hba_free(phba); 8164 lpfc_hba_free(phba);
7402 return error; 8165 return error;
@@ -7551,6 +8314,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7551 /* Restore device state from PCI config space */ 8314 /* Restore device state from PCI config space */
7552 pci_set_power_state(pdev, PCI_D0); 8315 pci_set_power_state(pdev, PCI_D0);
7553 pci_restore_state(pdev); 8316 pci_restore_state(pdev);
8317
8318 /*
8319 * As the new kernel behavior of pci_restore_state() API call clears
8320 * device saved_state flag, need to save the restored state again.
8321 */
8322 pci_save_state(pdev);
8323
7554 if (pdev->is_busmaster) 8324 if (pdev->is_busmaster)
7555 pci_set_master(pdev); 8325 pci_set_master(pdev);
7556 8326
@@ -7670,11 +8440,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7670 int rc; 8440 int rc;
7671 struct lpfc_sli_intf intf; 8441 struct lpfc_sli_intf intf;
7672 8442
7673 if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0)) 8443 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
7674 return -ENODEV; 8444 return -ENODEV;
7675 8445
7676 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8446 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7677 (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4)) 8447 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
7678 rc = lpfc_pci_probe_one_s4(pdev, pid); 8448 rc = lpfc_pci_probe_one_s4(pdev, pid);
7679 else 8449 else
7680 rc = lpfc_pci_probe_one_s3(pdev, pid); 8450 rc = lpfc_pci_probe_one_s3(pdev, pid);
@@ -7971,6 +8741,10 @@ static struct pci_device_id lpfc_id_table[] = {
7971 PCI_ANY_ID, PCI_ANY_ID, }, 8741 PCI_ANY_ID, PCI_ANY_ID, },
7972 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8742 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7973 PCI_ANY_ID, PCI_ANY_ID, }, 8743 PCI_ANY_ID, PCI_ANY_ID, },
8744 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8745 PCI_ANY_ID, PCI_ANY_ID, },
8746 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8747 PCI_ANY_ID, PCI_ANY_ID, },
7974 { 0 } 8748 { 0 }
7975}; 8749};
7976 8750
@@ -8053,15 +8827,15 @@ lpfc_exit(void)
8053 if (lpfc_enable_npiv) 8827 if (lpfc_enable_npiv)
8054 fc_release_transport(lpfc_vport_transport_template); 8828 fc_release_transport(lpfc_vport_transport_template);
8055 if (_dump_buf_data) { 8829 if (_dump_buf_data) {
8056 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 8830 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
8057 "at 0x%p\n", 8831 "_dump_buf_data at 0x%p\n",
8058 (1L << _dump_buf_data_order), _dump_buf_data); 8832 (1L << _dump_buf_data_order), _dump_buf_data);
8059 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8833 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8060 } 8834 }
8061 8835
8062 if (_dump_buf_dif) { 8836 if (_dump_buf_dif) {
8063 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 8837 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
8064 "at 0x%p\n", 8838 "_dump_buf_dif at 0x%p\n",
8065 (1L << _dump_buf_dif_order), _dump_buf_dif); 8839 (1L << _dump_buf_dif_order), _dump_buf_dif);
8066 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8840 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8067 } 8841 }
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 954ba57970a3..bb59e9273126 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,6 +35,7 @@
35#define LOG_VPORT 0x00004000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOF_SECURITY 0x00008000 /* Security events */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 39#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
39 40
40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 41#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1ab405902a18..72e6adb0643e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -21,12 +21,13 @@
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_transport_fc.h> 28#include <scsi/scsi_transport_fc.h>
28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30#include <scsi/fc/fc_fs.h>
30 31
31#include "lpfc_hw4.h" 32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
@@ -820,6 +821,10 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
820 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; 821 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
821 mb->un.varRegVpi.sid = vport->fc_myDID; 822 mb->un.varRegVpi.sid = vport->fc_myDID;
822 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; 823 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
824 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
825 sizeof(struct lpfc_name));
826 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
827 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
823 828
824 mb->mbxCommand = MBX_REG_VPI; 829 mb->mbxCommand = MBX_REG_VPI;
825 mb->mbxOwner = OWN_HOST; 830 mb->mbxOwner = OWN_HOST;
@@ -849,7 +854,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
849 MAILBOX_t *mb = &pmb->u.mb; 854 MAILBOX_t *mb = &pmb->u.mb;
850 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 855 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
851 856
852 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; 857 if (phba->sli_rev < LPFC_SLI_REV4)
858 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
859 else
860 mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
853 861
854 mb->mbxCommand = MBX_UNREG_VPI; 862 mb->mbxCommand = MBX_UNREG_VPI;
855 mb->mbxOwner = OWN_HOST; 863 mb->mbxOwner = OWN_HOST;
@@ -1132,7 +1140,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1132 /* Otherwise we setup specific rctl / type masks for this ring */ 1140 /* Otherwise we setup specific rctl / type masks for this ring */
1133 for (i = 0; i < pring->num_mask; i++) { 1141 for (i = 0; i < pring->num_mask; i++) {
1134 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; 1142 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1135 if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ) 1143 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1136 mb->un.varCfgRing.rrRegs[i].rmask = 0xff; 1144 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1137 else 1145 else
1138 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; 1146 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
@@ -1654,9 +1662,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1654 /* Allocate record for keeping SGE virtual addresses */ 1662 /* Allocate record for keeping SGE virtual addresses */
1655 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), 1663 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1656 GFP_KERNEL); 1664 GFP_KERNEL);
1657 if (!mbox->sge_array) 1665 if (!mbox->sge_array) {
1666 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1667 "2527 Failed to allocate non-embedded SGE "
1668 "array.\n");
1658 return 0; 1669 return 0;
1659 1670 }
1660 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { 1671 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1661 /* The DMA memory is always allocated in the length of a 1672 /* The DMA memory is always allocated in the length of a
1662 * page even though the last SGE might not fill up to a 1673 * page even though the last SGE might not fill up to a
@@ -1697,7 +1708,8 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1697 alloc_len - sizeof(union lpfc_sli4_cfg_shdr); 1708 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1698 } 1709 }
1699 /* The sub-header is in DMA memory, which needs endian converstion */ 1710 /* The sub-header is in DMA memory, which needs endian converstion */
1700 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, 1711 if (cfg_shdr)
1712 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1701 sizeof(union lpfc_sli4_cfg_shdr)); 1713 sizeof(union lpfc_sli4_cfg_shdr));
1702 1714
1703 return alloc_len; 1715 return alloc_len;
@@ -1737,6 +1749,65 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1737} 1749}
1738 1750
1739/** 1751/**
1752 * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
1753 * @phba: pointer to lpfc hba data structure.
1754 * @fcf_index: index to fcf table.
1755 *
1756 * This routine routine allocates and constructs non-embedded mailbox command
1757 * for reading a FCF table entry refered by @fcf_index.
1758 *
1759 * Return: pointer to the mailbox command constructed if successful, otherwise
1760 * NULL.
1761 **/
1762int
1763lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
1764 struct lpfcMboxq *mboxq,
1765 uint16_t fcf_index)
1766{
1767 void *virt_addr;
1768 dma_addr_t phys_addr;
1769 uint8_t *bytep;
1770 struct lpfc_mbx_sge sge;
1771 uint32_t alloc_len, req_len;
1772 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1773
1774 if (!mboxq)
1775 return -ENOMEM;
1776
1777 req_len = sizeof(struct fcf_record) +
1778 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
1779
1780 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
1781 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1782 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
1783 LPFC_SLI4_MBX_NEMBED);
1784
1785 if (alloc_len < req_len) {
1786 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1787 "0291 Allocated DMA memory size (x%x) is "
1788 "less than the requested DMA memory "
1789 "size (x%x)\n", alloc_len, req_len);
1790 return -ENOMEM;
1791 }
1792
1793 /* Get the first SGE entry from the non-embedded DMA memory. This
1794 * routine only uses a single SGE.
1795 */
1796 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1797 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1798 virt_addr = mboxq->sge_array->addr[0];
1799 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1800
1801 /* Set up command fields */
1802 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
1803 /* Perform necessary endian conversion */
1804 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1805 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
1806
1807 return 0;
1808}
1809
1810/**
1740 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox 1811 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1741 * @mboxq: pointer to lpfc mbox command. 1812 * @mboxq: pointer to lpfc mbox command.
1742 * 1813 *
@@ -1753,11 +1824,6 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1753 /* Set up host requested features. */ 1824 /* Set up host requested features. */
1754 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); 1825 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1755 1826
1756 if (phba->cfg_enable_fip)
1757 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1758 else
1759 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
1760
1761 /* Enable DIF (block guard) only if configured to do so. */ 1827 /* Enable DIF (block guard) only if configured to do so. */
1762 if (phba->cfg_enable_bg) 1828 if (phba->cfg_enable_bg)
1763 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); 1829 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
@@ -1817,6 +1883,9 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1817 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); 1883 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1818 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 1884 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1819 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); 1885 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1886 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1887 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1888 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
1820 reg_vfi->bde.addrHigh = putPaddrHigh(phys); 1889 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1821 reg_vfi->bde.addrLow = putPaddrLow(phys); 1890 reg_vfi->bde.addrLow = putPaddrLow(phys);
1822 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 1891 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
@@ -1850,7 +1919,7 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1850/** 1919/**
1851 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command 1920 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1852 * @mbox: pointer to lpfc mbox command to initialize. 1921 * @mbox: pointer to lpfc mbox command to initialize.
1853 * @vfi: VFI to be unregistered. 1922 * @vport: vport associated with the VF.
1854 * 1923 *
1855 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric 1924 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1856 * (logical NPort) into the inactive state. The SLI Host must have logged out 1925 * (logical NPort) into the inactive state. The SLI Host must have logged out
@@ -1859,11 +1928,12 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1859 * fabric inactive. 1928 * fabric inactive.
1860 **/ 1929 **/
1861void 1930void
1862lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi) 1931lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1863{ 1932{
1864 memset(mbox, 0, sizeof(*mbox)); 1933 memset(mbox, 0, sizeof(*mbox));
1865 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); 1934 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1866 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi); 1935 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
1936 vport->vfi + vport->phba->vfi_base);
1867} 1937}
1868 1938
1869/** 1939/**
@@ -1937,13 +2007,14 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1937 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); 2007 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1938 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); 2008 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1939 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); 2009 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1940 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx); 2010 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2011 phba->fcf.current_rec.fcf_indx);
1941 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ 2012 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1942 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, 2013 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
1943 (~phba->fcf.addr_mode) & 0x3); 2014 if (phba->fcf.current_rec.vlan_id != 0xFFFF) {
1944 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1945 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); 2015 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1946 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id); 2016 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2017 phba->fcf.current_rec.vlan_id);
1947 } 2018 }
1948} 2019}
1949 2020
@@ -1983,3 +2054,41 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1983 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); 2054 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
1984 resume_rpi->event_tag = ndlp->phba->fc_eventTag; 2055 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
1985} 2056}
2057
2058/**
2059 * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
2060 * mailbox command.
2061 * @mbox: pointer to lpfc mbox command to initialize.
2062 *
2063 * The PORT_CAPABILITIES supported pages mailbox command is issued to
2064 * retrieve the particular feature pages supported by the port.
2065 **/
2066void
2067lpfc_supported_pages(struct lpfcMboxq *mbox)
2068{
2069 struct lpfc_mbx_supp_pages *supp_pages;
2070
2071 memset(mbox, 0, sizeof(*mbox));
2072 supp_pages = &mbox->u.mqe.un.supp_pages;
2073 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2074 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2075}
2076
2077/**
2078 * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
2079 * mailbox command.
2080 * @mbox: pointer to lpfc mbox command to initialize.
2081 *
2082 * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
2083 * retrieve the particular SLI4 features supported by the port.
2084 **/
2085void
2086lpfc_sli4_params(struct lpfcMboxq *mbox)
2087{
2088 struct lpfc_mbx_sli4_params *sli4_params;
2089
2090 memset(mbox, 0, sizeof(*mbox));
2091 sli4_params = &mbox->u.mqe.un.sli4_params;
2092 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2093 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2094}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index a1b6db6016da..8f879e477e9d 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/mempool.h> 22#include <linux/mempool.h>
23#include <linux/slab.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index d655ed3eebef..f3cfbe2ce986 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2008 Emulex. All rights reserved. * 4 * Copyright (C) 2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -177,23 +177,3 @@ struct temp_event {
177 uint32_t data; 177 uint32_t data;
178}; 178};
179 179
180/* bsg definitions */
181#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
182#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
183
184struct set_ct_event {
185 uint32_t command;
186 uint32_t ev_req_id;
187 uint32_t ev_reg_id;
188};
189
190struct get_ct_event {
191 uint32_t command;
192 uint32_t ev_reg_id;
193 uint32_t ev_req_id;
194};
195
196struct get_ct_event_reply {
197 uint32_t immed_data;
198 uint32_t type;
199};
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3e74136f1ede..e331204a4d56 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
@@ -62,7 +63,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
62 63
63int 64int
64lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 65lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
65 struct serv_parm * sp, uint32_t class) 66 struct serv_parm *sp, uint32_t class, int flogi)
66{ 67{
67 volatile struct serv_parm *hsp = &vport->fc_sparam; 68 volatile struct serv_parm *hsp = &vport->fc_sparam;
68 uint16_t hsp_value, ssp_value = 0; 69 uint16_t hsp_value, ssp_value = 0;
@@ -75,49 +76,56 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
75 * correcting the byte values. 76 * correcting the byte values.
76 */ 77 */
77 if (sp->cls1.classValid) { 78 if (sp->cls1.classValid) {
78 hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | 79 if (!flogi) {
79 hsp->cls1.rcvDataSizeLsb; 80 hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
80 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 81 hsp->cls1.rcvDataSizeLsb);
81 sp->cls1.rcvDataSizeLsb; 82 ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
82 if (!ssp_value) 83 sp->cls1.rcvDataSizeLsb);
83 goto bad_service_param; 84 if (!ssp_value)
84 if (ssp_value > hsp_value) { 85 goto bad_service_param;
85 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 86 if (ssp_value > hsp_value) {
86 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 87 sp->cls1.rcvDataSizeLsb =
88 hsp->cls1.rcvDataSizeLsb;
89 sp->cls1.rcvDataSizeMsb =
90 hsp->cls1.rcvDataSizeMsb;
91 }
87 } 92 }
88 } else if (class == CLASS1) { 93 } else if (class == CLASS1)
89 goto bad_service_param; 94 goto bad_service_param;
90 }
91
92 if (sp->cls2.classValid) { 95 if (sp->cls2.classValid) {
93 hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | 96 if (!flogi) {
94 hsp->cls2.rcvDataSizeLsb; 97 hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
95 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 98 hsp->cls2.rcvDataSizeLsb);
96 sp->cls2.rcvDataSizeLsb; 99 ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
97 if (!ssp_value) 100 sp->cls2.rcvDataSizeLsb);
98 goto bad_service_param; 101 if (!ssp_value)
99 if (ssp_value > hsp_value) { 102 goto bad_service_param;
100 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 103 if (ssp_value > hsp_value) {
101 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 104 sp->cls2.rcvDataSizeLsb =
105 hsp->cls2.rcvDataSizeLsb;
106 sp->cls2.rcvDataSizeMsb =
107 hsp->cls2.rcvDataSizeMsb;
108 }
102 } 109 }
103 } else if (class == CLASS2) { 110 } else if (class == CLASS2)
104 goto bad_service_param; 111 goto bad_service_param;
105 }
106
107 if (sp->cls3.classValid) { 112 if (sp->cls3.classValid) {
108 hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | 113 if (!flogi) {
109 hsp->cls3.rcvDataSizeLsb; 114 hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
110 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 115 hsp->cls3.rcvDataSizeLsb);
111 sp->cls3.rcvDataSizeLsb; 116 ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
112 if (!ssp_value) 117 sp->cls3.rcvDataSizeLsb);
113 goto bad_service_param; 118 if (!ssp_value)
114 if (ssp_value > hsp_value) { 119 goto bad_service_param;
115 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 120 if (ssp_value > hsp_value) {
116 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 121 sp->cls3.rcvDataSizeLsb =
122 hsp->cls3.rcvDataSizeLsb;
123 sp->cls3.rcvDataSizeMsb =
124 hsp->cls3.rcvDataSizeMsb;
125 }
117 } 126 }
118 } else if (class == CLASS3) { 127 } else if (class == CLASS3)
119 goto bad_service_param; 128 goto bad_service_param;
120 }
121 129
122 /* 130 /*
123 * Preserve the upper four bits of the MSB from the PLOGI response. 131 * Preserve the upper four bits of the MSB from the PLOGI response.
@@ -247,7 +255,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
247 int rc; 255 int rc;
248 256
249 memset(&stat, 0, sizeof (struct ls_rjt)); 257 memset(&stat, 0, sizeof (struct ls_rjt));
250 if (vport->port_state <= LPFC_FLOGI) { 258 if (vport->port_state <= LPFC_FDISC) {
251 /* Before responding to PLOGI, check for pt2pt mode. 259 /* Before responding to PLOGI, check for pt2pt mode.
252 * If we are pt2pt, with an outstanding FLOGI, abort 260 * If we are pt2pt, with an outstanding FLOGI, abort
253 * the FLOGI and resend it first. 261 * the FLOGI and resend it first.
@@ -295,7 +303,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
295 NULL); 303 NULL);
296 return 0; 304 return 0;
297 } 305 }
298 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { 306 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
299 /* Reject this request because invalid parameters */ 307 /* Reject this request because invalid parameters */
300 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 308 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
301 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 309 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
@@ -831,7 +839,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
831 "0142 PLOGI RSP: Invalid WWN.\n"); 839 "0142 PLOGI RSP: Invalid WWN.\n");
832 goto out; 840 goto out;
833 } 841 }
834 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) 842 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
835 goto out; 843 goto out;
836 /* PLOGI chkparm OK */ 844 /* PLOGI chkparm OK */
837 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 845 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -1223,6 +1231,12 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1231 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1232 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1233 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1234 if (phba->sli_rev == LPFC_SLI_REV4) {
1235 spin_unlock_irq(&phba->hbalock);
1236 lpfc_sli4_free_rpi(phba,
1237 mb->u.mb.un.varRegLogin.rpi);
1238 spin_lock_irq(&phba->hbalock);
1239 }
1226 mp = (struct lpfc_dmabuf *) (mb->context1); 1240 mp = (struct lpfc_dmabuf *) (mb->context1);
1227 if (mp) { 1241 if (mp) {
1228 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1242 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1230,6 +1244,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1230 } 1244 }
1231 lpfc_nlp_put(ndlp); 1245 lpfc_nlp_put(ndlp);
1232 list_del(&mb->list); 1246 list_del(&mb->list);
1247 phba->sli.mboxq_cnt--;
1233 mempool_free(mb, phba->mbox_mem_pool); 1248 mempool_free(mb, phba->mbox_mem_pool);
1234 } 1249 }
1235 } 1250 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c88f59f0ce30..dccdb822328c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -19,6 +19,7 @@
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/slab.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
@@ -59,22 +60,26 @@ static char *dif_op_str[] = {
59}; 60};
60static void 61static void
61lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 62lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
63static void
64lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
62 65
63static void 66static void
64lpfc_debug_save_data(struct scsi_cmnd *cmnd) 67lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
65{ 68{
66 void *src, *dst; 69 void *src, *dst;
67 struct scatterlist *sgde = scsi_sglist(cmnd); 70 struct scatterlist *sgde = scsi_sglist(cmnd);
68 71
69 if (!_dump_buf_data) { 72 if (!_dump_buf_data) {
70 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", 73 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
71 __func__); 75 __func__);
72 return; 76 return;
73 } 77 }
74 78
75 79
76 if (!sgde) { 80 if (!sgde) {
77 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n"); 81 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
82 "9051 BLKGRD: ERROR: data scatterlist is null\n");
78 return; 83 return;
79 } 84 }
80 85
@@ -88,19 +93,21 @@ lpfc_debug_save_data(struct scsi_cmnd *cmnd)
88} 93}
89 94
90static void 95static void
91lpfc_debug_save_dif(struct scsi_cmnd *cmnd) 96lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
92{ 97{
93 void *src, *dst; 98 void *src, *dst;
94 struct scatterlist *sgde = scsi_prot_sglist(cmnd); 99 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
95 100
96 if (!_dump_buf_dif) { 101 if (!_dump_buf_dif) {
97 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", 102 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
98 __func__); 104 __func__);
99 return; 105 return;
100 } 106 }
101 107
102 if (!sgde) { 108 if (!sgde) {
103 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n"); 109 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
110 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
104 return; 111 return;
105 } 112 }
106 113
@@ -242,6 +249,36 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
242} 249}
243 250
244/** 251/**
252 * lpfc_change_queue_depth - Alter scsi device queue depth
253 * @sdev: Pointer the scsi device on which to change the queue depth.
254 * @qdepth: New queue depth to set the sdev to.
255 * @reason: The reason for the queue depth change.
256 *
257 * This function is called by the midlayer and the LLD to alter the queue
258 * depth for a scsi device. This function sets the queue depth to the new
259 * value and sends an event out to log the queue depth change.
260 **/
261int
262lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
263{
264 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
265 struct lpfc_hba *phba = vport->phba;
266 struct lpfc_rport_data *rdata;
267 unsigned long new_queue_depth, old_queue_depth;
268
269 old_queue_depth = sdev->queue_depth;
270 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
271 new_queue_depth = sdev->queue_depth;
272 rdata = sdev->hostdata;
273 if (rdata)
274 lpfc_send_sdev_queuedepth_change_event(phba, vport,
275 rdata->pnode, sdev->lun,
276 old_queue_depth,
277 new_queue_depth);
278 return sdev->queue_depth;
279}
280
281/**
245 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 282 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
246 * @phba: The Hba for which this call is being executed. 283 * @phba: The Hba for which this call is being executed.
247 * 284 *
@@ -305,8 +342,10 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
305 if (vport->cfg_lun_queue_depth <= queue_depth) 342 if (vport->cfg_lun_queue_depth <= queue_depth)
306 return; 343 return;
307 spin_lock_irqsave(&phba->hbalock, flags); 344 spin_lock_irqsave(&phba->hbalock, flags);
308 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 345 if (time_before(jiffies,
309 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { 346 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
347 time_before(jiffies,
348 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
310 spin_unlock_irqrestore(&phba->hbalock, flags); 349 spin_unlock_irqrestore(&phba->hbalock, flags);
311 return; 350 return;
312 } 351 }
@@ -338,10 +377,9 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
338 struct lpfc_vport **vports; 377 struct lpfc_vport **vports;
339 struct Scsi_Host *shost; 378 struct Scsi_Host *shost;
340 struct scsi_device *sdev; 379 struct scsi_device *sdev;
341 unsigned long new_queue_depth, old_queue_depth; 380 unsigned long new_queue_depth;
342 unsigned long num_rsrc_err, num_cmd_success; 381 unsigned long num_rsrc_err, num_cmd_success;
343 int i; 382 int i;
344 struct lpfc_rport_data *rdata;
345 383
346 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 384 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
347 num_cmd_success = atomic_read(&phba->num_cmd_success); 385 num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -359,22 +397,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
359 else 397 else
360 new_queue_depth = sdev->queue_depth - 398 new_queue_depth = sdev->queue_depth -
361 new_queue_depth; 399 new_queue_depth;
362 old_queue_depth = sdev->queue_depth; 400 lpfc_change_queue_depth(sdev, new_queue_depth,
363 if (sdev->ordered_tags) 401 SCSI_QDEPTH_DEFAULT);
364 scsi_adjust_queue_depth(sdev,
365 MSG_ORDERED_TAG,
366 new_queue_depth);
367 else
368 scsi_adjust_queue_depth(sdev,
369 MSG_SIMPLE_TAG,
370 new_queue_depth);
371 rdata = sdev->hostdata;
372 if (rdata)
373 lpfc_send_sdev_queuedepth_change_event(
374 phba, vports[i],
375 rdata->pnode,
376 sdev->lun, old_queue_depth,
377 new_queue_depth);
378 } 402 }
379 } 403 }
380 lpfc_destroy_vport_work_array(phba, vports); 404 lpfc_destroy_vport_work_array(phba, vports);
@@ -398,7 +422,6 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
398 struct Scsi_Host *shost; 422 struct Scsi_Host *shost;
399 struct scsi_device *sdev; 423 struct scsi_device *sdev;
400 int i; 424 int i;
401 struct lpfc_rport_data *rdata;
402 425
403 vports = lpfc_create_vport_work_array(phba); 426 vports = lpfc_create_vport_work_array(phba);
404 if (vports != NULL) 427 if (vports != NULL)
@@ -408,22 +431,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
408 if (vports[i]->cfg_lun_queue_depth <= 431 if (vports[i]->cfg_lun_queue_depth <=
409 sdev->queue_depth) 432 sdev->queue_depth)
410 continue; 433 continue;
411 if (sdev->ordered_tags) 434 lpfc_change_queue_depth(sdev,
412 scsi_adjust_queue_depth(sdev, 435 sdev->queue_depth+1,
413 MSG_ORDERED_TAG, 436 SCSI_QDEPTH_RAMP_UP);
414 sdev->queue_depth+1);
415 else
416 scsi_adjust_queue_depth(sdev,
417 MSG_SIMPLE_TAG,
418 sdev->queue_depth+1);
419 rdata = sdev->hostdata;
420 if (rdata)
421 lpfc_send_sdev_queuedepth_change_event(
422 phba, vports[i],
423 rdata->pnode,
424 sdev->lun,
425 sdev->queue_depth - 1,
426 sdev->queue_depth);
427 } 437 }
428 } 438 }
429 lpfc_destroy_vport_work_array(phba, vports); 439 lpfc_destroy_vport_work_array(phba, vports);
@@ -589,7 +599,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
589 iocb->ulpClass = CLASS3; 599 iocb->ulpClass = CLASS3;
590 psb->status = IOSTAT_SUCCESS; 600 psb->status = IOSTAT_SUCCESS;
591 /* Put it back into the SCSI buffer list */ 601 /* Put it back into the SCSI buffer list */
592 lpfc_release_scsi_buf_s4(phba, psb); 602 lpfc_release_scsi_buf_s3(phba, psb);
593 603
594 } 604 }
595 605
@@ -611,22 +621,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
611 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 621 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
612 struct lpfc_scsi_buf *psb, *next_psb; 622 struct lpfc_scsi_buf *psb, *next_psb;
613 unsigned long iflag = 0; 623 unsigned long iflag = 0;
624 struct lpfc_iocbq *iocbq;
625 int i;
614 626
615 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); 627 spin_lock_irqsave(&phba->hbalock, iflag);
628 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
616 list_for_each_entry_safe(psb, next_psb, 629 list_for_each_entry_safe(psb, next_psb,
617 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 630 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
618 if (psb->cur_iocbq.sli4_xritag == xri) { 631 if (psb->cur_iocbq.sli4_xritag == xri) {
619 list_del(&psb->list); 632 list_del(&psb->list);
633 psb->exch_busy = 0;
620 psb->status = IOSTAT_SUCCESS; 634 psb->status = IOSTAT_SUCCESS;
621 spin_unlock_irqrestore( 635 spin_unlock(
622 &phba->sli4_hba.abts_scsi_buf_list_lock, 636 &phba->sli4_hba.abts_scsi_buf_list_lock);
623 iflag); 637 spin_unlock_irqrestore(&phba->hbalock, iflag);
624 lpfc_release_scsi_buf_s4(phba, psb); 638 lpfc_release_scsi_buf_s4(phba, psb);
625 return; 639 return;
626 } 640 }
627 } 641 }
628 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 642 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
629 iflag); 643 for (i = 1; i <= phba->sli.last_iotag; i++) {
644 iocbq = phba->sli.iocbq_lookup[i];
645
646 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
647 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
648 continue;
649 if (iocbq->sli4_xritag != xri)
650 continue;
651 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
652 psb->exch_busy = 0;
653 spin_unlock_irqrestore(&phba->hbalock, iflag);
654 return;
655
656 }
657 spin_unlock_irqrestore(&phba->hbalock, iflag);
630} 658}
631 659
632/** 660/**
@@ -679,11 +707,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
679 list); 707 list);
680 if (status) { 708 if (status) {
681 /* Put this back on the abort scsi list */ 709 /* Put this back on the abort scsi list */
682 psb->status = IOSTAT_LOCAL_REJECT; 710 psb->exch_busy = 1;
683 psb->result = IOERR_ABORT_REQUESTED;
684 rc++; 711 rc++;
685 } else 712 } else {
713 psb->exch_busy = 0;
686 psb->status = IOSTAT_SUCCESS; 714 psb->status = IOSTAT_SUCCESS;
715 }
687 /* Put it back into the SCSI buffer list */ 716 /* Put it back into the SCSI buffer list */
688 lpfc_release_scsi_buf_s4(phba, psb); 717 lpfc_release_scsi_buf_s4(phba, psb);
689 } 718 }
@@ -787,19 +816,17 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
787 */ 816 */
788 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 817 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
789 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 818 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
790 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
791 bf_set(lpfc_sli4_sge_last, sgl, 0); 819 bf_set(lpfc_sli4_sge_last, sgl, 0);
792 sgl->word2 = cpu_to_le32(sgl->word2); 820 sgl->word2 = cpu_to_le32(sgl->word2);
793 sgl->word3 = cpu_to_le32(sgl->word3); 821 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
794 sgl++; 822 sgl++;
795 823
796 /* Setup the physical region for the FCP RSP */ 824 /* Setup the physical region for the FCP RSP */
797 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 825 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
798 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 826 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
799 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
800 bf_set(lpfc_sli4_sge_last, sgl, 1); 827 bf_set(lpfc_sli4_sge_last, sgl, 1);
801 sgl->word2 = cpu_to_le32(sgl->word2); 828 sgl->word2 = cpu_to_le32(sgl->word2);
802 sgl->word3 = cpu_to_le32(sgl->word3); 829 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
803 830
804 /* 831 /*
805 * Since the IOCB for the FCP I/O is built into this 832 * Since the IOCB for the FCP I/O is built into this
@@ -830,11 +857,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
830 psb->cur_iocbq.sli4_xritag); 857 psb->cur_iocbq.sli4_xritag);
831 if (status) { 858 if (status) {
832 /* Put this back on the abort scsi list */ 859 /* Put this back on the abort scsi list */
833 psb->status = IOSTAT_LOCAL_REJECT; 860 psb->exch_busy = 1;
834 psb->result = IOERR_ABORT_REQUESTED;
835 rc++; 861 rc++;
836 } else 862 } else {
863 psb->exch_busy = 0;
837 psb->status = IOSTAT_SUCCESS; 864 psb->status = IOSTAT_SUCCESS;
865 }
838 /* Put it back into the SCSI buffer list */ 866 /* Put it back into the SCSI buffer list */
839 lpfc_release_scsi_buf_s4(phba, psb); 867 lpfc_release_scsi_buf_s4(phba, psb);
840 break; 868 break;
@@ -848,11 +876,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
848 list); 876 list);
849 if (status) { 877 if (status) {
850 /* Put this back on the abort scsi list */ 878 /* Put this back on the abort scsi list */
851 psb->status = IOSTAT_LOCAL_REJECT; 879 psb->exch_busy = 1;
852 psb->result = IOERR_ABORT_REQUESTED;
853 rc++; 880 rc++;
854 } else 881 } else {
882 psb->exch_busy = 0;
855 psb->status = IOSTAT_SUCCESS; 883 psb->status = IOSTAT_SUCCESS;
884 }
856 /* Put it back into the SCSI buffer list */ 885 /* Put it back into the SCSI buffer list */
857 lpfc_release_scsi_buf_s4(phba, psb); 886 lpfc_release_scsi_buf_s4(phba, psb);
858 } 887 }
@@ -942,8 +971,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
942{ 971{
943 unsigned long iflag = 0; 972 unsigned long iflag = 0;
944 973
945 if (psb->status == IOSTAT_LOCAL_REJECT 974 if (psb->exch_busy) {
946 && psb->result == IOERR_ABORT_REQUESTED) {
947 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 975 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
948 iflag); 976 iflag);
949 psb->pCmd = NULL; 977 psb->pCmd = NULL;
@@ -996,6 +1024,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
996 struct scatterlist *sgel = NULL; 1024 struct scatterlist *sgel = NULL;
997 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1025 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
998 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1026 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1027 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
999 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1028 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1000 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1029 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1001 dma_addr_t physaddr; 1030 dma_addr_t physaddr;
@@ -1024,7 +1053,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1024 1053
1025 lpfc_cmd->seg_cnt = nseg; 1054 lpfc_cmd->seg_cnt = nseg;
1026 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1055 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1027 printk(KERN_ERR "%s: Too many sg segments from " 1056 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1057 "9064 BLKGRD: %s: Too many sg segments from "
1028 "dma_map_sg. Config %d, seg_cnt %d\n", 1058 "dma_map_sg. Config %d, seg_cnt %d\n",
1029 __func__, phba->cfg_sg_seg_cnt, 1059 __func__, phba->cfg_sg_seg_cnt,
1030 lpfc_cmd->seg_cnt); 1060 lpfc_cmd->seg_cnt);
@@ -1045,6 +1075,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1045 physaddr = sg_dma_address(sgel); 1075 physaddr = sg_dma_address(sgel);
1046 if (phba->sli_rev == 3 && 1076 if (phba->sli_rev == 3 &&
1047 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1077 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1078 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1048 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1079 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1049 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1080 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1050 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1081 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -1071,7 +1102,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1071 * explicitly reinitialized since all iocb memory resources are reused. 1102 * explicitly reinitialized since all iocb memory resources are reused.
1072 */ 1103 */
1073 if (phba->sli_rev == 3 && 1104 if (phba->sli_rev == 3 &&
1074 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 1105 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1106 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1075 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1107 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1076 /* 1108 /*
1077 * The extended IOCB format can only fit 3 BDE or a BPL. 1109 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -1096,6 +1128,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1096 } else { 1128 } else {
1097 iocb_cmd->un.fcpi64.bdl.bdeSize = 1129 iocb_cmd->un.fcpi64.bdl.bdeSize =
1098 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1130 ((num_bde + 2) * sizeof(struct ulp_bde64));
1131 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1099 } 1132 }
1100 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1133 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1101 1134
@@ -1112,7 +1145,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1112 * with the cmd 1145 * with the cmd
1113 */ 1146 */
1114static int 1147static int
1115lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) 1148lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1116{ 1149{
1117 uint8_t guard_type = scsi_host_get_guard(sc->device->host); 1150 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1118 uint8_t ret_prof = LPFC_PROF_INVALID; 1151 uint8_t ret_prof = LPFC_PROF_INVALID;
@@ -1136,7 +1169,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1136 1169
1137 case SCSI_PROT_NORMAL: 1170 case SCSI_PROT_NORMAL:
1138 default: 1171 default:
1139 printk(KERN_ERR "Bad op/guard:%d/%d combination\n", 1172 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1173 "9063 BLKGRD:Bad op/guard:%d/%d combination\n",
1140 scsi_get_prot_op(sc), guard_type); 1174 scsi_get_prot_op(sc), guard_type);
1141 break; 1175 break;
1142 1176
@@ -1157,7 +1191,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1157 case SCSI_PROT_WRITE_STRIP: 1191 case SCSI_PROT_WRITE_STRIP:
1158 case SCSI_PROT_NORMAL: 1192 case SCSI_PROT_NORMAL:
1159 default: 1193 default:
1160 printk(KERN_ERR "Bad op/guard:%d/%d combination\n", 1194 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1195 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1161 scsi_get_prot_op(sc), guard_type); 1196 scsi_get_prot_op(sc), guard_type);
1162 break; 1197 break;
1163 } 1198 }
@@ -1259,7 +1294,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1259 uint16_t apptagmask, apptagval; 1294 uint16_t apptagmask, apptagval;
1260 1295
1261 pde1 = (struct lpfc_pde *) bpl; 1296 pde1 = (struct lpfc_pde *) bpl;
1262 prof = lpfc_sc_to_sli_prof(sc); 1297 prof = lpfc_sc_to_sli_prof(phba, sc);
1263 1298
1264 if (prof == LPFC_PROF_INVALID) 1299 if (prof == LPFC_PROF_INVALID)
1265 goto out; 1300 goto out;
@@ -1359,7 +1394,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1359 return 0; 1394 return 0;
1360 } 1395 }
1361 1396
1362 prof = lpfc_sc_to_sli_prof(sc); 1397 prof = lpfc_sc_to_sli_prof(phba, sc);
1363 if (prof == LPFC_PROF_INVALID) 1398 if (prof == LPFC_PROF_INVALID)
1364 goto out; 1399 goto out;
1365 1400
@@ -1408,7 +1443,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1408 subtotal = 0; /* total bytes processed for current prot grp */ 1443 subtotal = 0; /* total bytes processed for current prot grp */
1409 while (!pgdone) { 1444 while (!pgdone) {
1410 if (!sgde) { 1445 if (!sgde) {
1411 printk(KERN_ERR "%s Invalid data segment\n", 1446 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1447 "9065 BLKGRD:%s Invalid data segment\n",
1412 __func__); 1448 __func__);
1413 return 0; 1449 return 0;
1414 } 1450 }
@@ -1462,7 +1498,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1462 reftag += protgrp_blks; 1498 reftag += protgrp_blks;
1463 } else { 1499 } else {
1464 /* if we're here, we have a bug */ 1500 /* if we're here, we have a bug */
1465 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__); 1501 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1502 "9054 BLKGRD: bug in %s\n", __func__);
1466 } 1503 }
1467 1504
1468 } while (!alldone); 1505 } while (!alldone);
@@ -1544,8 +1581,10 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1544 1581
1545 lpfc_cmd->seg_cnt = datasegcnt; 1582 lpfc_cmd->seg_cnt = datasegcnt;
1546 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1583 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1547 printk(KERN_ERR "%s: Too many sg segments from " 1584 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1548 "dma_map_sg. Config %d, seg_cnt %d\n", 1585 "9067 BLKGRD: %s: Too many sg segments"
1586 " from dma_map_sg. Config %d, seg_cnt"
1587 " %d\n",
1549 __func__, phba->cfg_sg_seg_cnt, 1588 __func__, phba->cfg_sg_seg_cnt,
1550 lpfc_cmd->seg_cnt); 1589 lpfc_cmd->seg_cnt);
1551 scsi_dma_unmap(scsi_cmnd); 1590 scsi_dma_unmap(scsi_cmnd);
@@ -1558,7 +1597,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1558 case LPFC_PG_TYPE_NO_DIF: 1597 case LPFC_PG_TYPE_NO_DIF:
1559 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 1598 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1560 datasegcnt); 1599 datasegcnt);
1561 /* we shoud have 2 or more entries in buffer list */ 1600 /* we should have 2 or more entries in buffer list */
1562 if (num_bde < 2) 1601 if (num_bde < 2)
1563 goto err; 1602 goto err;
1564 break; 1603 break;
@@ -1579,8 +1618,9 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1579 lpfc_cmd->prot_seg_cnt = protsegcnt; 1618 lpfc_cmd->prot_seg_cnt = protsegcnt;
1580 if (lpfc_cmd->prot_seg_cnt 1619 if (lpfc_cmd->prot_seg_cnt
1581 > phba->cfg_prot_sg_seg_cnt) { 1620 > phba->cfg_prot_sg_seg_cnt) {
1582 printk(KERN_ERR "%s: Too many prot sg segments " 1621 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1583 "from dma_map_sg. Config %d," 1622 "9068 BLKGRD: %s: Too many prot sg "
1623 "segments from dma_map_sg. Config %d,"
1584 "prot_seg_cnt %d\n", __func__, 1624 "prot_seg_cnt %d\n", __func__,
1585 phba->cfg_prot_sg_seg_cnt, 1625 phba->cfg_prot_sg_seg_cnt,
1586 lpfc_cmd->prot_seg_cnt); 1626 lpfc_cmd->prot_seg_cnt);
@@ -1594,7 +1634,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1594 1634
1595 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 1635 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1596 datasegcnt, protsegcnt); 1636 datasegcnt, protsegcnt);
1597 /* we shoud have 3 or more entries in buffer list */ 1637 /* we should have 3 or more entries in buffer list */
1598 if (num_bde < 3) 1638 if (num_bde < 3)
1599 goto err; 1639 goto err;
1600 break; 1640 break;
@@ -1671,23 +1711,26 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1671 uint32_t bgstat = bgf->bgstat; 1711 uint32_t bgstat = bgf->bgstat;
1672 uint64_t failing_sector = 0; 1712 uint64_t failing_sector = 0;
1673 1713
1674 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x " 1714 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
1715 " 0x%x lba 0x%llx blk cnt 0x%x "
1675 "bgstat=0x%x bghm=0x%x\n", 1716 "bgstat=0x%x bghm=0x%x\n",
1676 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1717 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1677 blk_rq_sectors(cmd->request), bgstat, bghm); 1718 blk_rq_sectors(cmd->request), bgstat, bghm);
1678 1719
1679 spin_lock(&_dump_buf_lock); 1720 spin_lock(&_dump_buf_lock);
1680 if (!_dump_buf_done) { 1721 if (!_dump_buf_done) {
1681 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n", 1722 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
1723 " Data for %u blocks to debugfs\n",
1682 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1724 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1683 lpfc_debug_save_data(cmd); 1725 lpfc_debug_save_data(phba, cmd);
1684 1726
1685 /* If we have a prot sgl, save the DIF buffer */ 1727 /* If we have a prot sgl, save the DIF buffer */
1686 if (lpfc_prot_group_type(phba, cmd) == 1728 if (lpfc_prot_group_type(phba, cmd) ==
1687 LPFC_PG_TYPE_DIF_BUF) { 1729 LPFC_PG_TYPE_DIF_BUF) {
1688 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n", 1730 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
1689 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1731 "Saving DIF for %u blocks to debugfs\n",
1690 lpfc_debug_save_dif(cmd); 1732 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1733 lpfc_debug_save_dif(phba, cmd);
1691 } 1734 }
1692 1735
1693 _dump_buf_done = 1; 1736 _dump_buf_done = 1;
@@ -1696,15 +1739,17 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1696 1739
1697 if (lpfc_bgs_get_invalid_prof(bgstat)) { 1740 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1698 cmd->result = ScsiResult(DID_ERROR, 0); 1741 cmd->result = ScsiResult(DID_ERROR, 0);
1699 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n", 1742 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
1700 bgstat); 1743 " BlockGuard profile. bgstat:0x%x\n",
1744 bgstat);
1701 ret = (-1); 1745 ret = (-1);
1702 goto out; 1746 goto out;
1703 } 1747 }
1704 1748
1705 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 1749 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1706 cmd->result = ScsiResult(DID_ERROR, 0); 1750 cmd->result = ScsiResult(DID_ERROR, 0);
1707 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 1751 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
1752 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1708 bgstat); 1753 bgstat);
1709 ret = (-1); 1754 ret = (-1);
1710 goto out; 1755 goto out;
@@ -1718,7 +1763,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1718 cmd->result = DRIVER_SENSE << 24 1763 cmd->result = DRIVER_SENSE << 24
1719 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1764 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1720 phba->bg_guard_err_cnt++; 1765 phba->bg_guard_err_cnt++;
1721 printk(KERN_ERR "BLKGRD: guard_tag error\n"); 1766 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1767 "9055 BLKGRD: guard_tag error\n");
1722 } 1768 }
1723 1769
1724 if (lpfc_bgs_get_reftag_err(bgstat)) { 1770 if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -1730,7 +1776,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1730 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1776 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1731 1777
1732 phba->bg_reftag_err_cnt++; 1778 phba->bg_reftag_err_cnt++;
1733 printk(KERN_ERR "BLKGRD: ref_tag error\n"); 1779 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1780 "9056 BLKGRD: ref_tag error\n");
1734 } 1781 }
1735 1782
1736 if (lpfc_bgs_get_apptag_err(bgstat)) { 1783 if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -1742,7 +1789,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1742 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1789 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1743 1790
1744 phba->bg_apptag_err_cnt++; 1791 phba->bg_apptag_err_cnt++;
1745 printk(KERN_ERR "BLKGRD: app_tag error\n"); 1792 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1793 "9061 BLKGRD: app_tag error\n");
1746 } 1794 }
1747 1795
1748 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1796 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -1763,7 +1811,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1763 if (!ret) { 1811 if (!ret) {
1764 /* No error was reported - problem in FW? */ 1812 /* No error was reported - problem in FW? */
1765 cmd->result = ScsiResult(DID_ERROR, 0); 1813 cmd->result = ScsiResult(DID_ERROR, 0);
1766 printk(KERN_ERR "BLKGRD: no errors reported!\n"); 1814 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1815 "9057 BLKGRD: no errors reported!\n");
1767 } 1816 }
1768 1817
1769out: 1818out:
@@ -1822,9 +1871,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1822 1871
1823 lpfc_cmd->seg_cnt = nseg; 1872 lpfc_cmd->seg_cnt = nseg;
1824 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1873 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1825 printk(KERN_ERR "%s: Too many sg segments from " 1874 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
1826 "dma_map_sg. Config %d, seg_cnt %d\n", 1875 " %s: Too many sg segments from "
1827 __func__, phba->cfg_sg_seg_cnt, 1876 "dma_map_sg. Config %d, seg_cnt %d\n",
1877 __func__, phba->cfg_sg_seg_cnt,
1828 lpfc_cmd->seg_cnt); 1878 lpfc_cmd->seg_cnt);
1829 scsi_dma_unmap(scsi_cmnd); 1879 scsi_dma_unmap(scsi_cmnd);
1830 return 1; 1880 return 1;
@@ -1842,7 +1892,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1842 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1892 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1843 physaddr = sg_dma_address(sgel); 1893 physaddr = sg_dma_address(sgel);
1844 dma_len = sg_dma_len(sgel); 1894 dma_len = sg_dma_len(sgel);
1845 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1846 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 1895 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1847 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 1896 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1848 if ((num_bde + 1) == nseg) 1897 if ((num_bde + 1) == nseg)
@@ -1851,7 +1900,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1851 bf_set(lpfc_sli4_sge_last, sgl, 0); 1900 bf_set(lpfc_sli4_sge_last, sgl, 0);
1852 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1901 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1853 sgl->word2 = cpu_to_le32(sgl->word2); 1902 sgl->word2 = cpu_to_le32(sgl->word2);
1854 sgl->word3 = cpu_to_le32(sgl->word3); 1903 sgl->sge_len = cpu_to_le32(dma_len);
1855 dma_offset += dma_len; 1904 dma_offset += dma_len;
1856 sgl++; 1905 sgl++;
1857 } 1906 }
@@ -2050,6 +2099,31 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2050 goto out; 2099 goto out;
2051 } 2100 }
2052 2101
2102 if (resp_info & RSP_LEN_VALID) {
2103 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2104 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
2105 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2106 "2719 Invalid response length: "
2107 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2108 cmnd->device->id,
2109 cmnd->device->lun, cmnd->cmnd[0],
2110 rsplen);
2111 host_status = DID_ERROR;
2112 goto out;
2113 }
2114 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2115 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2116 "2757 Protocol failure detected during "
2117 "processing of FCP I/O op: "
2118 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2119 cmnd->device->id,
2120 cmnd->device->lun, cmnd->cmnd[0],
2121 fcprsp->rspInfo3);
2122 host_status = DID_ERROR;
2123 goto out;
2124 }
2125 }
2126
2053 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2127 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2054 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 2128 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2055 if (snslen > SCSI_SENSE_BUFFERSIZE) 2129 if (snslen > SCSI_SENSE_BUFFERSIZE)
@@ -2074,15 +2148,6 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2074 be32_to_cpu(fcprsp->rspRspLen), 2148 be32_to_cpu(fcprsp->rspRspLen),
2075 fcprsp->rspInfo3); 2149 fcprsp->rspInfo3);
2076 2150
2077 if (resp_info & RSP_LEN_VALID) {
2078 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2079 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
2080 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2081 host_status = DID_ERROR;
2082 goto out;
2083 }
2084 }
2085
2086 scsi_set_resid(cmnd, 0); 2151 scsi_set_resid(cmnd, 0);
2087 if (resp_info & RESID_UNDER) { 2152 if (resp_info & RESID_UNDER) {
2088 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 2153 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
@@ -2180,7 +2245,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2180 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2245 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2181 int result; 2246 int result;
2182 struct scsi_device *tmp_sdev; 2247 struct scsi_device *tmp_sdev;
2183 int depth = 0; 2248 int depth;
2184 unsigned long flags; 2249 unsigned long flags;
2185 struct lpfc_fast_path_event *fast_path_evt; 2250 struct lpfc_fast_path_event *fast_path_evt;
2186 struct Scsi_Host *shost = cmd->device->host; 2251 struct Scsi_Host *shost = cmd->device->host;
@@ -2188,6 +2253,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2188 2253
2189 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 2254 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2190 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 2255 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2256 /* pick up SLI4 exhange busy status from HBA */
2257 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
2258
2191 if (pnode && NLP_CHK_NODE_ACT(pnode)) 2259 if (pnode && NLP_CHK_NODE_ACT(pnode))
2192 atomic_dec(&pnode->cmd_pending); 2260 atomic_dec(&pnode->cmd_pending);
2193 2261
@@ -2264,7 +2332,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2264 lpfc_printf_vlog(vport, KERN_WARNING, 2332 lpfc_printf_vlog(vport, KERN_WARNING,
2265 LOG_BG, 2333 LOG_BG,
2266 "9031 non-zero BGSTAT " 2334 "9031 non-zero BGSTAT "
2267 "on unprotected cmd"); 2335 "on unprotected cmd\n");
2268 } 2336 }
2269 } 2337 }
2270 2338
@@ -2347,67 +2415,29 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2347 return; 2415 return;
2348 } 2416 }
2349 2417
2350
2351 if (!result) 2418 if (!result)
2352 lpfc_rampup_queue_depth(vport, queue_depth); 2419 lpfc_rampup_queue_depth(vport, queue_depth);
2353 2420
2354 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
2355 ((jiffies - pnode->last_ramp_up_time) >
2356 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2357 ((jiffies - pnode->last_q_full_time) >
2358 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2359 (vport->cfg_lun_queue_depth > queue_depth)) {
2360 shost_for_each_device(tmp_sdev, shost) {
2361 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
2362 if (tmp_sdev->id != scsi_id)
2363 continue;
2364 if (tmp_sdev->ordered_tags)
2365 scsi_adjust_queue_depth(tmp_sdev,
2366 MSG_ORDERED_TAG,
2367 tmp_sdev->queue_depth+1);
2368 else
2369 scsi_adjust_queue_depth(tmp_sdev,
2370 MSG_SIMPLE_TAG,
2371 tmp_sdev->queue_depth+1);
2372
2373 pnode->last_ramp_up_time = jiffies;
2374 }
2375 }
2376 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
2377 0xFFFFFFFF,
2378 queue_depth , queue_depth + 1);
2379 }
2380
2381 /* 2421 /*
2382 * Check for queue full. If the lun is reporting queue full, then 2422 * Check for queue full. If the lun is reporting queue full, then
2383 * back off the lun queue depth to prevent target overloads. 2423 * back off the lun queue depth to prevent target overloads.
2384 */ 2424 */
2385 if (result == SAM_STAT_TASK_SET_FULL && pnode && 2425 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2386 NLP_CHK_NODE_ACT(pnode)) { 2426 NLP_CHK_NODE_ACT(pnode)) {
2387 pnode->last_q_full_time = jiffies;
2388
2389 shost_for_each_device(tmp_sdev, shost) { 2427 shost_for_each_device(tmp_sdev, shost) {
2390 if (tmp_sdev->id != scsi_id) 2428 if (tmp_sdev->id != scsi_id)
2391 continue; 2429 continue;
2392 depth = scsi_track_queue_full(tmp_sdev, 2430 depth = scsi_track_queue_full(tmp_sdev,
2393 tmp_sdev->queue_depth - 1); 2431 tmp_sdev->queue_depth-1);
2394 } 2432 if (depth <= 0)
2395 /* 2433 continue;
2396 * The queue depth cannot be lowered any more.
2397 * Modify the returned error code to store
2398 * the final depth value set by
2399 * scsi_track_queue_full.
2400 */
2401 if (depth == -1)
2402 depth = shost->cmd_per_lun;
2403
2404 if (depth) {
2405 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2434 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2406 "0711 detected queue full - lun queue " 2435 "0711 detected queue full - lun queue "
2407 "depth adjusted to %d.\n", depth); 2436 "depth adjusted to %d.\n", depth);
2408 lpfc_send_sdev_queuedepth_change_event(phba, vport, 2437 lpfc_send_sdev_queuedepth_change_event(phba, vport,
2409 pnode, 0xFFFFFFFF, 2438 pnode,
2410 depth+1, depth); 2439 tmp_sdev->lun,
2440 depth+1, depth);
2411 } 2441 }
2412 } 2442 }
2413 2443
@@ -2642,6 +2672,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2642 } 2672 }
2643 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; 2673 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2644 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 2674 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2675 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2645 return 0; 2676 return 0;
2646} 2677}
2647 2678
@@ -2700,6 +2731,13 @@ lpfc_info(struct Scsi_Host *host)
2700 " port %s", 2731 " port %s",
2701 phba->Port); 2732 phba->Port);
2702 } 2733 }
2734 len = strlen(lpfcinfobuf);
2735 if (phba->sli4_hba.link_state.logical_speed) {
2736 snprintf(lpfcinfobuf + len,
2737 384-len,
2738 " Logical Link Speed: %d Mbps",
2739 phba->sli4_hba.link_state.logical_speed * 10);
2740 }
2703 } 2741 }
2704 return lpfcinfobuf; 2742 return lpfcinfobuf;
2705} 2743}
@@ -2745,7 +2783,9 @@ void lpfc_poll_timeout(unsigned long ptr)
2745 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2783 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2746 2784
2747 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2785 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2748 lpfc_sli_poll_fcp_ring (phba); 2786 lpfc_sli_handle_fast_ring_event(phba,
2787 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2788
2749 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2789 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2750 lpfc_poll_rearm_timer(phba); 2790 lpfc_poll_rearm_timer(phba);
2751 } 2791 }
@@ -2771,7 +2811,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2771 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2811 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2772 struct lpfc_hba *phba = vport->phba; 2812 struct lpfc_hba *phba = vport->phba;
2773 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2813 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2774 struct lpfc_nodelist *ndlp = rdata->pnode; 2814 struct lpfc_nodelist *ndlp;
2775 struct lpfc_scsi_buf *lpfc_cmd; 2815 struct lpfc_scsi_buf *lpfc_cmd;
2776 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 2816 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2777 int err; 2817 int err;
@@ -2781,13 +2821,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2781 cmnd->result = err; 2821 cmnd->result = err;
2782 goto out_fail_command; 2822 goto out_fail_command;
2783 } 2823 }
2824 ndlp = rdata->pnode;
2784 2825
2785 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 2826 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2786 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2827 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2787 2828
2788 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x " 2829 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2789 "str=%s without registering for BlockGuard - " 2830 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
2790 "Rejecting command\n", 2831 " op:%02x str=%s without registering for"
2832 " BlockGuard - Rejecting command\n",
2791 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2833 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2792 dif_op_str[scsi_get_prot_op(cmnd)]); 2834 dif_op_str[scsi_get_prot_op(cmnd)]);
2793 goto out_fail_command; 2835 goto out_fail_command;
@@ -2827,61 +2869,66 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2827 cmnd->scsi_done = done; 2869 cmnd->scsi_done = done;
2828 2870
2829 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2871 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2830 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2872 if (vport->phba->cfg_enable_bg) {
2873 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2831 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " 2874 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2832 "str=%s\n", 2875 "str=%s\n",
2833 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2876 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2834 dif_op_str[scsi_get_prot_op(cmnd)]); 2877 dif_op_str[scsi_get_prot_op(cmnd)]);
2835 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2878 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2836 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2879 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2837 "%02x %02x %02x %02x %02x\n", 2880 "%02x %02x %02x %02x %02x\n",
2838 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2881 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2839 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2882 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2840 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2883 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2841 cmnd->cmnd[9]); 2884 cmnd->cmnd[9]);
2842 if (cmnd->cmnd[0] == READ_10) 2885 if (cmnd->cmnd[0] == READ_10)
2843 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2886 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2844 "9035 BLKGRD: READ @ sector %llu, " 2887 "9035 BLKGRD: READ @ sector %llu, "
2845 "count %u\n", 2888 "count %u\n",
2846 (unsigned long long)scsi_get_lba(cmnd), 2889 (unsigned long long)scsi_get_lba(cmnd),
2847 blk_rq_sectors(cmnd->request)); 2890 blk_rq_sectors(cmnd->request));
2848 else if (cmnd->cmnd[0] == WRITE_10) 2891 else if (cmnd->cmnd[0] == WRITE_10)
2849 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2892 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2850 "9036 BLKGRD: WRITE @ sector %llu, " 2893 "9036 BLKGRD: WRITE @ sector %llu, "
2851 "count %u cmd=%p\n", 2894 "count %u cmd=%p\n",
2852 (unsigned long long)scsi_get_lba(cmnd), 2895 (unsigned long long)scsi_get_lba(cmnd),
2853 blk_rq_sectors(cmnd->request), 2896 blk_rq_sectors(cmnd->request),
2854 cmnd); 2897 cmnd);
2898 }
2855 2899
2856 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2900 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2857 } else { 2901 } else {
2858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2902 if (vport->phba->cfg_enable_bg) {
2859 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2860 " str=%s\n",
2861 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2862 dif_op_str[scsi_get_prot_op(cmnd)]);
2863 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2864 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2865 "%02x %02x %02x %02x %02x\n",
2866 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2867 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2868 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2869 cmnd->cmnd[9]);
2870 if (cmnd->cmnd[0] == READ_10)
2871 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2903 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2872 "9040 dbg: READ @ sector %llu, " 2904 "9038 BLKGRD: rcvd unprotected cmd:"
2873 "count %u\n", 2905 "%02x op:%02x str=%s\n",
2874 (unsigned long long)scsi_get_lba(cmnd), 2906 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2907 dif_op_str[scsi_get_prot_op(cmnd)]);
2908 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2909 "9039 BLKGRD: CDB: %02x %02x %02x "
2910 "%02x %02x %02x %02x %02x %02x %02x\n",
2911 cmnd->cmnd[0], cmnd->cmnd[1],
2912 cmnd->cmnd[2], cmnd->cmnd[3],
2913 cmnd->cmnd[4], cmnd->cmnd[5],
2914 cmnd->cmnd[6], cmnd->cmnd[7],
2915 cmnd->cmnd[8], cmnd->cmnd[9]);
2916 if (cmnd->cmnd[0] == READ_10)
2917 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2918 "9040 dbg: READ @ sector %llu, "
2919 "count %u\n",
2920 (unsigned long long)scsi_get_lba(cmnd),
2875 blk_rq_sectors(cmnd->request)); 2921 blk_rq_sectors(cmnd->request));
2876 else if (cmnd->cmnd[0] == WRITE_10) 2922 else if (cmnd->cmnd[0] == WRITE_10)
2877 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2923 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2878 "9041 dbg: WRITE @ sector %llu, " 2924 "9041 dbg: WRITE @ sector %llu, "
2879 "count %u cmd=%p\n", 2925 "count %u cmd=%p\n",
2880 (unsigned long long)scsi_get_lba(cmnd), 2926 (unsigned long long)scsi_get_lba(cmnd),
2881 blk_rq_sectors(cmnd->request), cmnd); 2927 blk_rq_sectors(cmnd->request), cmnd);
2882 else 2928 else
2883 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2929 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2884 "9042 dbg: parser not implemented\n"); 2930 "9042 dbg: parser not implemented\n");
2931 }
2885 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2932 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2886 } 2933 }
2887 2934
@@ -2898,7 +2945,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2898 goto out_host_busy_free_buf; 2945 goto out_host_busy_free_buf;
2899 } 2946 }
2900 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2947 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2901 lpfc_sli_poll_fcp_ring(phba); 2948 spin_unlock(shost->host_lock);
2949 lpfc_sli_handle_fast_ring_event(phba,
2950 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2951
2952 spin_lock(shost->host_lock);
2902 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2953 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2903 lpfc_poll_rearm_timer(phba); 2954 lpfc_poll_rearm_timer(phba);
2904 } 2955 }
@@ -2917,28 +2968,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2917} 2968}
2918 2969
2919/** 2970/**
2920 * lpfc_block_error_handler - Routine to block error handler
2921 * @cmnd: Pointer to scsi_cmnd data structure.
2922 *
2923 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2924 **/
2925static void
2926lpfc_block_error_handler(struct scsi_cmnd *cmnd)
2927{
2928 struct Scsi_Host *shost = cmnd->device->host;
2929 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2930
2931 spin_lock_irq(shost->host_lock);
2932 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
2933 spin_unlock_irq(shost->host_lock);
2934 msleep(1000);
2935 spin_lock_irq(shost->host_lock);
2936 }
2937 spin_unlock_irq(shost->host_lock);
2938 return;
2939}
2940
2941/**
2942 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 2971 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
2943 * @cmnd: Pointer to scsi_cmnd data structure. 2972 * @cmnd: Pointer to scsi_cmnd data structure.
2944 * 2973 *
@@ -2961,7 +2990,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2961 int ret = SUCCESS; 2990 int ret = SUCCESS;
2962 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 2991 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
2963 2992
2964 lpfc_block_error_handler(cmnd); 2993 fc_block_scsi_eh(cmnd);
2965 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 2994 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2966 BUG_ON(!lpfc_cmd); 2995 BUG_ON(!lpfc_cmd);
2967 2996
@@ -3001,6 +3030,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3001 3030
3002 icmd->ulpLe = 1; 3031 icmd->ulpLe = 1;
3003 icmd->ulpClass = cmd->ulpClass; 3032 icmd->ulpClass = cmd->ulpClass;
3033
3034 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3035 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
3036 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
3037
3004 if (lpfc_is_link_up(phba)) 3038 if (lpfc_is_link_up(phba))
3005 icmd->ulpCommand = CMD_ABORT_XRI_CN; 3039 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3006 else 3040 else
@@ -3016,7 +3050,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3016 } 3050 }
3017 3051
3018 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3052 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3019 lpfc_sli_poll_fcp_ring (phba); 3053 lpfc_sli_handle_fast_ring_event(phba,
3054 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3020 3055
3021 lpfc_cmd->waitq = &waitq; 3056 lpfc_cmd->waitq = &waitq;
3022 /* Wait for abort to complete */ 3057 /* Wait for abort to complete */
@@ -3166,9 +3201,15 @@ static int
3166lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 3201lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3167{ 3202{
3168 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3203 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3169 struct lpfc_nodelist *pnode = rdata->pnode; 3204 struct lpfc_nodelist *pnode;
3170 unsigned long later; 3205 unsigned long later;
3171 3206
3207 if (!rdata) {
3208 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3209 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
3210 return FAILED;
3211 }
3212 pnode = rdata->pnode;
3172 /* 3213 /*
3173 * If target is not in a MAPPED state, delay until 3214 * If target is not in a MAPPED state, delay until
3174 * target is rediscovered or devloss timeout expires. 3215 * target is rediscovered or devloss timeout expires.
@@ -3253,13 +3294,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3253 struct Scsi_Host *shost = cmnd->device->host; 3294 struct Scsi_Host *shost = cmnd->device->host;
3254 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3295 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3255 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3296 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3256 struct lpfc_nodelist *pnode = rdata->pnode; 3297 struct lpfc_nodelist *pnode;
3257 unsigned tgt_id = cmnd->device->id; 3298 unsigned tgt_id = cmnd->device->id;
3258 unsigned int lun_id = cmnd->device->lun; 3299 unsigned int lun_id = cmnd->device->lun;
3259 struct lpfc_scsi_event_header scsi_event; 3300 struct lpfc_scsi_event_header scsi_event;
3260 int status; 3301 int status;
3261 3302
3262 lpfc_block_error_handler(cmnd); 3303 if (!rdata) {
3304 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3305 "0798 Device Reset rport failure: rdata x%p\n", rdata);
3306 return FAILED;
3307 }
3308 pnode = rdata->pnode;
3309 fc_block_scsi_eh(cmnd);
3263 3310
3264 status = lpfc_chk_tgt_mapped(vport, cmnd); 3311 status = lpfc_chk_tgt_mapped(vport, cmnd);
3265 if (status == FAILED) { 3312 if (status == FAILED) {
@@ -3312,13 +3359,19 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3312 struct Scsi_Host *shost = cmnd->device->host; 3359 struct Scsi_Host *shost = cmnd->device->host;
3313 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3360 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3314 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3361 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3315 struct lpfc_nodelist *pnode = rdata->pnode; 3362 struct lpfc_nodelist *pnode;
3316 unsigned tgt_id = cmnd->device->id; 3363 unsigned tgt_id = cmnd->device->id;
3317 unsigned int lun_id = cmnd->device->lun; 3364 unsigned int lun_id = cmnd->device->lun;
3318 struct lpfc_scsi_event_header scsi_event; 3365 struct lpfc_scsi_event_header scsi_event;
3319 int status; 3366 int status;
3320 3367
3321 lpfc_block_error_handler(cmnd); 3368 if (!rdata) {
3369 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3370 "0799 Target Reset rport failure: rdata x%p\n", rdata);
3371 return FAILED;
3372 }
3373 pnode = rdata->pnode;
3374 fc_block_scsi_eh(cmnd);
3322 3375
3323 status = lpfc_chk_tgt_mapped(vport, cmnd); 3376 status = lpfc_chk_tgt_mapped(vport, cmnd);
3324 if (status == FAILED) { 3377 if (status == FAILED) {
@@ -3384,7 +3437,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3384 fc_host_post_vendor_event(shost, fc_get_event_number(), 3437 fc_host_post_vendor_event(shost, fc_get_event_number(),
3385 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3438 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3386 3439
3387 lpfc_block_error_handler(cmnd); 3440 fc_block_scsi_eh(cmnd);
3388 3441
3389 /* 3442 /*
3390 * Since the driver manages a single bus device, reset all 3443 * Since the driver manages a single bus device, reset all
@@ -3498,6 +3551,8 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3498 "Allocated %d buffers.\n", 3551 "Allocated %d buffers.\n",
3499 num_to_alloc, num_allocated); 3552 num_to_alloc, num_allocated);
3500 } 3553 }
3554 if (num_allocated > 0)
3555 phba->total_scsi_bufs += num_allocated;
3501 return 0; 3556 return 0;
3502} 3557}
3503 3558
@@ -3534,7 +3589,8 @@ lpfc_slave_configure(struct scsi_device *sdev)
3534 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3589 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3535 3590
3536 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3591 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3537 lpfc_sli_poll_fcp_ring(phba); 3592 lpfc_sli_handle_fast_ring_event(phba,
3593 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3538 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3594 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3539 lpfc_poll_rearm_timer(phba); 3595 lpfc_poll_rearm_timer(phba);
3540 } 3596 }
@@ -3576,6 +3632,7 @@ struct scsi_host_template lpfc_template = {
3576 .shost_attrs = lpfc_hba_attrs, 3632 .shost_attrs = lpfc_hba_attrs,
3577 .max_sectors = 0xFFFF, 3633 .max_sectors = 0xFFFF,
3578 .vendor_id = LPFC_NL_VENDOR_ID, 3634 .vendor_id = LPFC_NL_VENDOR_ID,
3635 .change_queue_depth = lpfc_change_queue_depth,
3579}; 3636};
3580 3637
3581struct scsi_host_template lpfc_vport_template = { 3638struct scsi_host_template lpfc_vport_template = {
@@ -3597,4 +3654,5 @@ struct scsi_host_template lpfc_vport_template = {
3597 .use_clustering = ENABLE_CLUSTERING, 3654 .use_clustering = ENABLE_CLUSTERING,
3598 .shost_attrs = lpfc_vport_attrs, 3655 .shost_attrs = lpfc_vport_attrs,
3599 .max_sectors = 0xFFFF, 3656 .max_sectors = 0xFFFF,
3657 .change_queue_depth = lpfc_change_queue_depth,
3600}; 3658};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 65dfc8bd5b49..5932273870a5 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -118,6 +118,7 @@ struct lpfc_scsi_buf {
118 118
119 uint32_t timeout; 119 uint32_t timeout;
120 120
121 uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
121 uint16_t status; /* From IOCB Word 7- ulpStatus */ 122 uint16_t status; /* From IOCB Word 7- ulpStatus */
122 uint32_t result; /* From IOCB Word 4. */ 123 uint32_t result; /* From IOCB Word 4. */
123 124
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 43cbe336f1f8..049fb9a17b3f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/slab.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -30,6 +31,7 @@
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 32#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h> 33#include <scsi/fc/fc_fs.h>
34#include <linux/aer.h>
33 35
34#include "lpfc_hw4.h" 36#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 37#include "lpfc_hw.h"
@@ -58,8 +60,11 @@ typedef enum _lpfc_iocb_type {
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 60static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t); 61 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 62static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *); 63 uint8_t *, uint32_t *);
62 64static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
63static IOCB_t * 68static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 69lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{ 70{
@@ -259,6 +264,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 264 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 265 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 266 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
267 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
268 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
269 readl(q->phba->sli4_hba.EQCQDBregaddr);
262 return released; 270 return released;
263} 271}
264 272
@@ -487,7 +495,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
487 * 495 *
488 * Returns sglq ponter = success, NULL = Failure. 496 * Returns sglq ponter = success, NULL = Failure.
489 **/ 497 **/
490static struct lpfc_sglq * 498struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 499__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{ 500{
493 uint16_t adj_xri; 501 uint16_t adj_xri;
@@ -515,8 +523,11 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
515 struct lpfc_sglq *sglq = NULL; 523 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri; 524 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 525 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
526 if (!sglq)
527 return NULL;
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 528 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 529 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
530 sglq->state = SGL_ALLOCATED;
520 return sglq; 531 return sglq;
521} 532}
522 533
@@ -571,18 +582,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
571 else 582 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 583 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) { 584 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED 585 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 586 (sglq->state != SGL_XRI_ABORTED)) {
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag); 588 iflag);
580 list_add(&sglq->list, 589 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list); 590 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore( 591 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag); 592 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else 593 } else {
594 sglq->state = SGL_FREED;
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 595 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
596 }
586 } 597 }
587 598
588 599
@@ -755,10 +766,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
755 case DSSCMD_IWRITE64_CX: 766 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR: 767 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX: 768 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
762 type = LPFC_SOL_IOCB; 769 type = LPFC_SOL_IOCB;
763 break; 770 break;
764 case CMD_ABORT_XRI_CN: 771 case CMD_ABORT_XRI_CN:
@@ -767,6 +774,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
767 case CMD_CLOSE_XRI_CX: 774 case CMD_CLOSE_XRI_CX:
768 case CMD_XRI_ABORTED_CX: 775 case CMD_XRI_ABORTED_CX:
769 case CMD_ABORT_MXRI64_CN: 776 case CMD_ABORT_MXRI64_CN:
777 case CMD_XMIT_BLS_RSP64_CX:
770 type = LPFC_ABORT_IOCB; 778 type = LPFC_ABORT_IOCB;
771 break; 779 break;
772 case CMD_RCV_SEQUENCE_CX: 780 case CMD_RCV_SEQUENCE_CX:
@@ -1373,7 +1381,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1373/* HBQ for ELS and CT traffic. */ 1381/* HBQ for ELS and CT traffic. */
1374static struct lpfc_hbq_init lpfc_els_hbq = { 1382static struct lpfc_hbq_init lpfc_els_hbq = {
1375 .rn = 1, 1383 .rn = 1,
1376 .entry_count = 200, 1384 .entry_count = 256,
1377 .mask_count = 0, 1385 .mask_count = 0,
1378 .profile = 0, 1386 .profile = 0,
1379 .ring_mask = (1 << LPFC_ELS_RING), 1387 .ring_mask = (1 << LPFC_ELS_RING),
@@ -1472,8 +1480,11 @@ err:
1472int 1480int
1473lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1481lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1474{ 1482{
1475 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1483 if (phba->sli_rev == LPFC_SLI_REV4)
1476 lpfc_hbq_defs[qno]->add_count)); 1484 return 0;
1485 else
1486 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1487 lpfc_hbq_defs[qno]->add_count);
1477} 1488}
1478 1489
1479/** 1490/**
@@ -1488,8 +1499,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1488static int 1499static int
1489lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1500lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1490{ 1501{
1491 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1502 if (phba->sli_rev == LPFC_SLI_REV4)
1492 lpfc_hbq_defs[qno]->init_count)); 1503 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1504 lpfc_hbq_defs[qno]->entry_count);
1505 else
1506 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1507 lpfc_hbq_defs[qno]->init_count);
1493} 1508}
1494 1509
1495/** 1510/**
@@ -1700,6 +1715,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1700 struct lpfc_dmabuf *mp; 1715 struct lpfc_dmabuf *mp;
1701 uint16_t rpi, vpi; 1716 uint16_t rpi, vpi;
1702 int rc; 1717 int rc;
1718 struct lpfc_vport *vport = pmb->vport;
1703 1719
1704 mp = (struct lpfc_dmabuf *) (pmb->context1); 1720 mp = (struct lpfc_dmabuf *) (pmb->context1);
1705 1721
@@ -1728,6 +1744,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1728 return; 1744 return;
1729 } 1745 }
1730 1746
1747 /* Unreg VPI, if the REG_VPI succeed after VLink failure */
1748 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
1749 !(phba->pport->load_flag & FC_UNLOADING) &&
1750 !pmb->u.mb.mbxStatus) {
1751 lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
1752 pmb->vport = vport;
1753 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1754 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1755 if (rc != MBX_NOT_FINISHED)
1756 return;
1757 }
1758
1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 1759 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1732 lpfc_sli4_mbox_cmd_free(phba, pmb); 1760 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else 1761 else
@@ -1794,7 +1822,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1794 */ 1822 */
1795 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 1823 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
1796 MBX_SHUTDOWN) { 1824 MBX_SHUTDOWN) {
1797 /* Unknow mailbox command compl */ 1825 /* Unknown mailbox command compl */
1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1826 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1799 "(%d):0323 Unknown Mailbox command " 1827 "(%d):0323 Unknown Mailbox command "
1800 "x%x (x%x) Cmpl\n", 1828 "x%x (x%x) Cmpl\n",
@@ -2068,8 +2096,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2068 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2096 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2069 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2097 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2070 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2098 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2071 Rctl = FC_ELS_REQ; 2099 Rctl = FC_RCTL_ELS_REQ;
2072 Type = FC_ELS_DATA; 2100 Type = FC_TYPE_ELS;
2073 } else { 2101 } else {
2074 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2102 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2075 Rctl = w5p->hcsw.Rctl; 2103 Rctl = w5p->hcsw.Rctl;
@@ -2079,8 +2107,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2079 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2107 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2080 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2108 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2081 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2109 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2082 Rctl = FC_ELS_REQ; 2110 Rctl = FC_RCTL_ELS_REQ;
2083 Type = FC_ELS_DATA; 2111 Type = FC_TYPE_ELS;
2084 w5p->hcsw.Rctl = Rctl; 2112 w5p->hcsw.Rctl = Rctl;
2085 w5p->hcsw.Type = Type; 2113 w5p->hcsw.Type = Type;
2086 } 2114 }
@@ -2211,9 +2239,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2211 * All other are passed to the completion callback. 2239 * All other are passed to the completion callback.
2212 */ 2240 */
2213 if (pring->ringno == LPFC_ELS_RING) { 2241 if (pring->ringno == LPFC_ELS_RING) {
2214 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 2242 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2243 (cmdiocbp->iocb_flag &
2244 LPFC_DRIVER_ABORTED)) {
2245 spin_lock_irqsave(&phba->hbalock,
2246 iflag);
2215 cmdiocbp->iocb_flag &= 2247 cmdiocbp->iocb_flag &=
2216 ~LPFC_DRIVER_ABORTED; 2248 ~LPFC_DRIVER_ABORTED;
2249 spin_unlock_irqrestore(&phba->hbalock,
2250 iflag);
2217 saveq->iocb.ulpStatus = 2251 saveq->iocb.ulpStatus =
2218 IOSTAT_LOCAL_REJECT; 2252 IOSTAT_LOCAL_REJECT;
2219 saveq->iocb.un.ulpWord[4] = 2253 saveq->iocb.un.ulpWord[4] =
@@ -2223,7 +2257,62 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2223 * of DMAing payload, so don't free data 2257 * of DMAing payload, so don't free data
2224 * buffer till after a hbeat. 2258 * buffer till after a hbeat.
2225 */ 2259 */
2260 spin_lock_irqsave(&phba->hbalock,
2261 iflag);
2226 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2262 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2263 spin_unlock_irqrestore(&phba->hbalock,
2264 iflag);
2265 }
2266 if (phba->sli_rev == LPFC_SLI_REV4) {
2267 if (saveq->iocb_flag &
2268 LPFC_EXCHANGE_BUSY) {
2269 /* Set cmdiocb flag for the
2270 * exchange busy so sgl (xri)
2271 * will not be released until
2272 * the abort xri is received
2273 * from hba.
2274 */
2275 spin_lock_irqsave(
2276 &phba->hbalock, iflag);
2277 cmdiocbp->iocb_flag |=
2278 LPFC_EXCHANGE_BUSY;
2279 spin_unlock_irqrestore(
2280 &phba->hbalock, iflag);
2281 }
2282 if (cmdiocbp->iocb_flag &
2283 LPFC_DRIVER_ABORTED) {
2284 /*
2285 * Clear LPFC_DRIVER_ABORTED
2286 * bit in case it was driver
2287 * initiated abort.
2288 */
2289 spin_lock_irqsave(
2290 &phba->hbalock, iflag);
2291 cmdiocbp->iocb_flag &=
2292 ~LPFC_DRIVER_ABORTED;
2293 spin_unlock_irqrestore(
2294 &phba->hbalock, iflag);
2295 cmdiocbp->iocb.ulpStatus =
2296 IOSTAT_LOCAL_REJECT;
2297 cmdiocbp->iocb.un.ulpWord[4] =
2298 IOERR_ABORT_REQUESTED;
2299 /*
2300 * For SLI4, irsiocb contains
2301 * NO_XRI in sli_xritag, it
2302 * shall not affect releasing
2303 * sgl (xri) process.
2304 */
2305 saveq->iocb.ulpStatus =
2306 IOSTAT_LOCAL_REJECT;
2307 saveq->iocb.un.ulpWord[4] =
2308 IOERR_SLI_ABORTED;
2309 spin_lock_irqsave(
2310 &phba->hbalock, iflag);
2311 saveq->iocb_flag |=
2312 LPFC_DELAY_MEM_FREE;
2313 spin_unlock_irqrestore(
2314 &phba->hbalock, iflag);
2315 }
2227 } 2316 }
2228 } 2317 }
2229 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2318 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2324,168 +2413,6 @@ void lpfc_poll_eratt(unsigned long ptr)
2324 return; 2413 return;
2325} 2414}
2326 2415
2327/**
2328 * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
2329 * @phba: Pointer to HBA context object.
2330 *
2331 * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
2332 * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
2333 * is enabled.
2334 *
2335 * The caller does not hold any lock.
2336 * The function processes each response iocb in the response ring until it
2337 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2338 * LE bit set. The function will call the completion handler of the command iocb
2339 * if the response iocb indicates a completion for a command iocb or it is
2340 * an abort completion.
2341 **/
2342void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2343{
2344 struct lpfc_sli *psli = &phba->sli;
2345 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
2346 IOCB_t *irsp = NULL;
2347 IOCB_t *entry = NULL;
2348 struct lpfc_iocbq *cmdiocbq = NULL;
2349 struct lpfc_iocbq rspiocbq;
2350 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2351 uint32_t status;
2352 uint32_t portRspPut, portRspMax;
2353 int type;
2354 uint32_t rsp_cmpl = 0;
2355 uint32_t ha_copy;
2356 unsigned long iflags;
2357
2358 pring->stats.iocb_event++;
2359
2360 /*
2361 * The next available response entry should never exceed the maximum
2362 * entries. If it does, treat it as an adapter hardware error.
2363 */
2364 portRspMax = pring->numRiocb;
2365 portRspPut = le32_to_cpu(pgp->rspPutInx);
2366 if (unlikely(portRspPut >= portRspMax)) {
2367 lpfc_sli_rsp_pointers_error(phba, pring);
2368 return;
2369 }
2370
2371 rmb();
2372 while (pring->rspidx != portRspPut) {
2373 entry = lpfc_resp_iocb(phba, pring);
2374 if (++pring->rspidx >= portRspMax)
2375 pring->rspidx = 0;
2376
2377 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2378 (uint32_t *) &rspiocbq.iocb,
2379 phba->iocb_rsp_size);
2380 irsp = &rspiocbq.iocb;
2381 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2382 pring->stats.iocb_rsp++;
2383 rsp_cmpl++;
2384
2385 if (unlikely(irsp->ulpStatus)) {
2386 /* Rsp ring <ringno> error: IOCB */
2387 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2388 "0326 Rsp Ring %d error: IOCB Data: "
2389 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2390 pring->ringno,
2391 irsp->un.ulpWord[0],
2392 irsp->un.ulpWord[1],
2393 irsp->un.ulpWord[2],
2394 irsp->un.ulpWord[3],
2395 irsp->un.ulpWord[4],
2396 irsp->un.ulpWord[5],
2397 *(uint32_t *)&irsp->un1,
2398 *((uint32_t *)&irsp->un1 + 1));
2399 }
2400
2401 switch (type) {
2402 case LPFC_ABORT_IOCB:
2403 case LPFC_SOL_IOCB:
2404 /*
2405 * Idle exchange closed via ABTS from port. No iocb
2406 * resources need to be recovered.
2407 */
2408 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2409 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2410 "0314 IOCB cmd 0x%x "
2411 "processed. Skipping "
2412 "completion",
2413 irsp->ulpCommand);
2414 break;
2415 }
2416
2417 spin_lock_irqsave(&phba->hbalock, iflags);
2418 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2419 &rspiocbq);
2420 spin_unlock_irqrestore(&phba->hbalock, iflags);
2421 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2422 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2423 &rspiocbq);
2424 }
2425 break;
2426 default:
2427 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2428 char adaptermsg[LPFC_MAX_ADPTMSG];
2429 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2430 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2431 MAX_MSG_DATA);
2432 dev_warn(&((phba->pcidev)->dev),
2433 "lpfc%d: %s\n",
2434 phba->brd_no, adaptermsg);
2435 } else {
2436 /* Unknown IOCB command */
2437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2438 "0321 Unknown IOCB command "
2439 "Data: x%x, x%x x%x x%x x%x\n",
2440 type, irsp->ulpCommand,
2441 irsp->ulpStatus,
2442 irsp->ulpIoTag,
2443 irsp->ulpContext);
2444 }
2445 break;
2446 }
2447
2448 /*
2449 * The response IOCB has been processed. Update the ring
2450 * pointer in SLIM. If the port response put pointer has not
2451 * been updated, sync the pgp->rspPutInx and fetch the new port
2452 * response put pointer.
2453 */
2454 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2455
2456 if (pring->rspidx == portRspPut)
2457 portRspPut = le32_to_cpu(pgp->rspPutInx);
2458 }
2459
2460 ha_copy = readl(phba->HAregaddr);
2461 ha_copy >>= (LPFC_FCP_RING * 4);
2462
2463 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2464 spin_lock_irqsave(&phba->hbalock, iflags);
2465 pring->stats.iocb_rsp_full++;
2466 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
2467 writel(status, phba->CAregaddr);
2468 readl(phba->CAregaddr);
2469 spin_unlock_irqrestore(&phba->hbalock, iflags);
2470 }
2471 if ((ha_copy & HA_R0CE_RSP) &&
2472 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2473 spin_lock_irqsave(&phba->hbalock, iflags);
2474 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2475 pring->stats.iocb_cmd_empty++;
2476
2477 /* Force update of the local copy of cmdGetInx */
2478 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2479 lpfc_sli_resume_iocb(phba, pring);
2480
2481 if ((pring->lpfc_sli_cmd_available))
2482 (pring->lpfc_sli_cmd_available) (phba, pring);
2483
2484 spin_unlock_irqrestore(&phba->hbalock, iflags);
2485 }
2486
2487 return;
2488}
2489 2416
2490/** 2417/**
2491 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2418 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
@@ -2502,9 +2429,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2502 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2429 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2503 * function if this is an unsolicited iocb. 2430 * function if this is an unsolicited iocb.
2504 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2431 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2505 * to check it explicitly. This function always returns 1. 2432 * to check it explicitly.
2506 **/ 2433 */
2507static int 2434int
2508lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2435lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2509 struct lpfc_sli_ring *pring, uint32_t mask) 2436 struct lpfc_sli_ring *pring, uint32_t mask)
2510{ 2437{
@@ -2534,6 +2461,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2534 spin_unlock_irqrestore(&phba->hbalock, iflag); 2461 spin_unlock_irqrestore(&phba->hbalock, iflag);
2535 return 1; 2462 return 1;
2536 } 2463 }
2464 if (phba->fcp_ring_in_use) {
2465 spin_unlock_irqrestore(&phba->hbalock, iflag);
2466 return 1;
2467 } else
2468 phba->fcp_ring_in_use = 1;
2537 2469
2538 rmb(); 2470 rmb();
2539 while (pring->rspidx != portRspPut) { 2471 while (pring->rspidx != portRspPut) {
@@ -2603,18 +2535,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2603 2535
2604 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2536 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2605 &rspiocbq); 2537 &rspiocbq);
2606 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2538 if (unlikely(!cmdiocbq))
2607 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2539 break;
2608 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2540 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2609 &rspiocbq); 2541 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2610 } else { 2542 if (cmdiocbq->iocb_cmpl) {
2611 spin_unlock_irqrestore(&phba->hbalock, 2543 spin_unlock_irqrestore(&phba->hbalock, iflag);
2612 iflag); 2544 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2613 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2545 &rspiocbq);
2614 &rspiocbq); 2546 spin_lock_irqsave(&phba->hbalock, iflag);
2615 spin_lock_irqsave(&phba->hbalock,
2616 iflag);
2617 }
2618 } 2547 }
2619 break; 2548 break;
2620 case LPFC_UNSOL_IOCB: 2549 case LPFC_UNSOL_IOCB:
@@ -2675,6 +2604,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2675 2604
2676 } 2605 }
2677 2606
2607 phba->fcp_ring_in_use = 0;
2678 spin_unlock_irqrestore(&phba->hbalock, iflag); 2608 spin_unlock_irqrestore(&phba->hbalock, iflag);
2679 return rc; 2609 return rc;
2680} 2610}
@@ -3018,16 +2948,39 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask) 2948 struct lpfc_sli_ring *pring, uint32_t mask)
3019{ 2949{
3020 struct lpfc_iocbq *irspiocbq; 2950 struct lpfc_iocbq *irspiocbq;
2951 struct hbq_dmabuf *dmabuf;
2952 struct lpfc_cq_event *cq_event;
3021 unsigned long iflag; 2953 unsigned long iflag;
3022 2954
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { 2955 spin_lock_irqsave(&phba->hbalock, iflag);
2956 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
2957 spin_unlock_irqrestore(&phba->hbalock, iflag);
2958 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3024 /* Get the response iocb from the head of work queue */ 2959 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag); 2960 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, 2961 list_remove_head(&phba->sli4_hba.sp_queue_event,
3027 irspiocbq, struct lpfc_iocbq, list); 2962 cq_event, struct lpfc_cq_event, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag); 2963 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */ 2964
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); 2965 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
2966 case CQE_CODE_COMPL_WQE:
2967 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
2968 cq_event);
2969 /* Translate ELS WCQE to response IOCBQ */
2970 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
2971 irspiocbq);
2972 if (irspiocbq)
2973 lpfc_sli_sp_handle_rspiocb(phba, pring,
2974 irspiocbq);
2975 break;
2976 case CQE_CODE_RECEIVE:
2977 dmabuf = container_of(cq_event, struct hbq_dmabuf,
2978 cq_event);
2979 lpfc_sli4_handle_received_buffer(phba, dmabuf);
2980 break;
2981 default:
2982 break;
2983 }
3031 } 2984 }
3032} 2985}
3033 2986
@@ -3160,6 +3113,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3160 3113
3161 /* Check to see if any errors occurred during init */ 3114 /* Check to see if any errors occurred during init */
3162 if ((status & HS_FFERM) || (i >= 20)) { 3115 if ((status & HS_FFERM) || (i >= 20)) {
3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3117 "2751 Adapter failed to restart, "
3118 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3119 status,
3120 readl(phba->MBslimaddr + 0xa8),
3121 readl(phba->MBslimaddr + 0xac));
3163 phba->link_state = LPFC_HBA_ERROR; 3122 phba->link_state = LPFC_HBA_ERROR;
3164 retval = 1; 3123 retval = 1;
3165 } 3124 }
@@ -3347,6 +3306,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3347 if (retval != MBX_SUCCESS) { 3306 if (retval != MBX_SUCCESS) {
3348 if (retval != MBX_BUSY) 3307 if (retval != MBX_BUSY)
3349 mempool_free(pmb, phba->mbox_mem_pool); 3308 mempool_free(pmb, phba->mbox_mem_pool);
3309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3310 "2752 KILL_BOARD command failed retval %d\n",
3311 retval);
3350 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
3351 phba->link_flag &= ~LS_IGNORE_ERATT; 3313 phba->link_flag &= ~LS_IGNORE_ERATT;
3352 spin_unlock_irq(&phba->hbalock); 3314 spin_unlock_irq(&phba->hbalock);
@@ -3416,6 +3378,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
3416 3378
3417 /* perform board reset */ 3379 /* perform board reset */
3418 phba->fc_eventTag = 0; 3380 phba->fc_eventTag = 0;
3381 phba->link_events = 0;
3419 phba->pport->fc_myDID = 0; 3382 phba->pport->fc_myDID = 0;
3420 phba->pport->fc_prevDID = 0; 3383 phba->pport->fc_prevDID = 0;
3421 3384
@@ -3476,6 +3439,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3476 3439
3477 /* perform board reset */ 3440 /* perform board reset */
3478 phba->fc_eventTag = 0; 3441 phba->fc_eventTag = 0;
3442 phba->link_events = 0;
3479 phba->pport->fc_myDID = 0; 3443 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0; 3444 phba->pport->fc_prevDID = 0;
3481 3445
@@ -3495,7 +3459,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3495 list_del_init(&phba->sli4_hba.dat_rq->list); 3459 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list); 3460 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list); 3461 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3462 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3463 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3464 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
@@ -3531,9 +3494,13 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3531 struct lpfc_sli *psli; 3494 struct lpfc_sli *psli;
3532 volatile uint32_t word0; 3495 volatile uint32_t word0;
3533 void __iomem *to_slim; 3496 void __iomem *to_slim;
3497 uint32_t hba_aer_enabled;
3534 3498
3535 spin_lock_irq(&phba->hbalock); 3499 spin_lock_irq(&phba->hbalock);
3536 3500
3501 /* Take PCIe device Advanced Error Reporting (AER) state */
3502 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3503
3537 psli = &phba->sli; 3504 psli = &phba->sli;
3538 3505
3539 /* Restart HBA */ 3506 /* Restart HBA */
@@ -3573,6 +3540,10 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3573 /* Give the INITFF and Post time to settle. */ 3540 /* Give the INITFF and Post time to settle. */
3574 mdelay(100); 3541 mdelay(100);
3575 3542
3543 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3544 if (hba_aer_enabled)
3545 pci_disable_pcie_error_reporting(phba->pcidev);
3546
3576 lpfc_hba_down_post(phba); 3547 lpfc_hba_down_post(phba);
3577 3548
3578 return 0; 3549 return 0;
@@ -4042,6 +4013,24 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4042 if (rc) 4013 if (rc)
4043 goto lpfc_sli_hba_setup_error; 4014 goto lpfc_sli_hba_setup_error;
4044 4015
4016 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4017 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4018 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4019 if (!rc) {
4020 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4021 "2709 This device supports "
4022 "Advanced Error Reporting (AER)\n");
4023 spin_lock_irq(&phba->hbalock);
4024 phba->hba_flag |= HBA_AER_ENABLED;
4025 spin_unlock_irq(&phba->hbalock);
4026 } else {
4027 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4028 "2708 This device does not support "
4029 "Advanced Error Reporting (AER)\n");
4030 phba->cfg_aer_support = 0;
4031 }
4032 }
4033
4045 if (phba->sli_rev == 3) { 4034 if (phba->sli_rev == 3) {
4046 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4035 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4047 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4036 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
@@ -4077,7 +4066,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4077 4066
4078lpfc_sli_hba_setup_error: 4067lpfc_sli_hba_setup_error:
4079 phba->link_state = LPFC_HBA_ERROR; 4068 phba->link_state = LPFC_HBA_ERROR;
4080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4081 "0445 Firmware initialization failed\n"); 4070 "0445 Firmware initialization failed\n");
4082 return rc; 4071 return rc;
4083} 4072}
@@ -4163,7 +4152,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4163 * addition, this routine gets the port vpd data. 4152 * addition, this routine gets the port vpd data.
4164 * 4153 *
4165 * Return codes 4154 * Return codes
4166 * 0 - sucessful 4155 * 0 - successful
4167 * ENOMEM - could not allocated memory. 4156 * ENOMEM - could not allocated memory.
4168 **/ 4157 **/
4169static int 4158static int
@@ -4211,6 +4200,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4211 if (rc) { 4200 if (rc) {
4212 dma_free_coherent(&phba->pcidev->dev, dma_size, 4201 dma_free_coherent(&phba->pcidev->dev, dma_size,
4213 dmabuf->virt, dmabuf->phys); 4202 dmabuf->virt, dmabuf->phys);
4203 kfree(dmabuf);
4214 return -EIO; 4204 return -EIO;
4215 } 4205 }
4216 4206
@@ -4243,7 +4233,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4243 4233
4244 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4234 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4245 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4235 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4246 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4247 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4236 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4248 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4237 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4249 LPFC_QUEUE_REARM); 4238 LPFC_QUEUE_REARM);
@@ -4322,6 +4311,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4311 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4312 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4324 phba->hba_flag |= HBA_FCOE_SUPPORT; 4313 phba->hba_flag |= HBA_FCOE_SUPPORT;
4314
4315 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4316 LPFC_DCBX_CEE_MODE)
4317 phba->hba_flag |= HBA_FIP_SUPPORT;
4318 else
4319 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4320
4325 if (phba->sli_rev != LPFC_SLI_REV4 || 4321 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) { 4322 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4323 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -4423,7 +4419,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4423 spin_unlock_irq(&phba->hbalock); 4419 spin_unlock_irq(&phba->hbalock);
4424 4420
4425 /* Read the port's service parameters. */ 4421 /* Read the port's service parameters. */
4426 lpfc_read_sparam(phba, mboxq, vport->vpi); 4422 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4423 if (rc) {
4424 phba->link_state = LPFC_HBA_ERROR;
4425 rc = -ENOMEM;
4426 goto out_free_vpd;
4427 }
4428
4427 mboxq->vport = vport; 4429 mboxq->vport = vport;
4428 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4430 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4429 mp = (struct lpfc_dmabuf *) mboxq->context1; 4431 mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4468,7 +4470,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4468 rc = lpfc_sli4_post_sgl_list(phba); 4470 rc = lpfc_sli4_post_sgl_list(phba);
4469 if (unlikely(rc)) { 4471 if (unlikely(rc)) {
4470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4471 "0582 Error %d during sgl post operation", rc); 4473 "0582 Error %d during sgl post operation\n",
4474 rc);
4472 rc = -ENODEV; 4475 rc = -ENODEV;
4473 goto out_free_vpd; 4476 goto out_free_vpd;
4474 } 4477 }
@@ -4477,8 +4480,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4477 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 4480 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4478 if (unlikely(rc)) { 4481 if (unlikely(rc)) {
4479 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4482 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4480 "0383 Error %d during scsi sgl post opeation", 4483 "0383 Error %d during scsi sgl post "
4481 rc); 4484 "operation\n", rc);
4482 /* Some Scsi buffers were moved to the abort scsi list */ 4485 /* Some Scsi buffers were moved to the abort scsi list */
4483 /* A pci function reset will repost them */ 4486 /* A pci function reset will repost them */
4484 rc = -ENODEV; 4487 rc = -ENODEV;
@@ -4494,10 +4497,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4494 rc = -ENODEV; 4497 rc = -ENODEV;
4495 goto out_free_vpd; 4498 goto out_free_vpd;
4496 } 4499 }
4497 if (phba->cfg_enable_fip)
4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4501 4500
4502 /* Set up all the queues to the device */ 4501 /* Set up all the queues to the device */
4503 rc = lpfc_sli4_queue_setup(phba); 4502 rc = lpfc_sli4_queue_setup(phba);
@@ -4521,6 +4520,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4521 /* Post receive buffers to the device */ 4520 /* Post receive buffers to the device */
4522 lpfc_sli4_rb_setup(phba); 4521 lpfc_sli4_rb_setup(phba);
4523 4522
4523 /* Reset HBA FCF states after HBA reset */
4524 phba->fcf.fcf_flag = 0;
4525 phba->fcf.current_rec.flag = 0;
4526
4524 /* Start the ELS watchdog timer */ 4527 /* Start the ELS watchdog timer */
4525 mod_timer(&vport->els_tmofunc, 4528 mod_timer(&vport->els_tmofunc,
4526 jiffies + HZ * (phba->fc_ratov * 2)); 4529 jiffies + HZ * (phba->fc_ratov * 2));
@@ -5669,7 +5672,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
5669 case CMD_GEN_REQUEST64_CX: 5672 case CMD_GEN_REQUEST64_CX:
5670 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 5673 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
5671 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 5674 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
5672 FC_FCP_CMND) || 5675 FC_RCTL_DD_UNSOL_CMD) ||
5673 (piocb->iocb.un.genreq64.w5.hcsw.Type != 5676 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
5674 MENLO_TRANSPORT_TYPE)) 5677 MENLO_TRANSPORT_TYPE))
5675 5678
@@ -5777,19 +5780,19 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5777 5780
5778 for (i = 0; i < numBdes; i++) { 5781 for (i = 0; i < numBdes; i++) {
5779 /* Should already be byte swapped. */ 5782 /* Should already be byte swapped. */
5780 sgl->addr_hi = bpl->addrHigh; 5783 sgl->addr_hi = bpl->addrHigh;
5781 sgl->addr_lo = bpl->addrLow; 5784 sgl->addr_lo = bpl->addrLow;
5782 /* swap the size field back to the cpu so we 5785
5783 * can assign it to the sgl.
5784 */
5785 bde.tus.w = le32_to_cpu(bpl->tus.w);
5786 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5787 if ((i+1) == numBdes) 5786 if ((i+1) == numBdes)
5788 bf_set(lpfc_sli4_sge_last, sgl, 1); 5787 bf_set(lpfc_sli4_sge_last, sgl, 1);
5789 else 5788 else
5790 bf_set(lpfc_sli4_sge_last, sgl, 0); 5789 bf_set(lpfc_sli4_sge_last, sgl, 0);
5791 sgl->word2 = cpu_to_le32(sgl->word2); 5790 sgl->word2 = cpu_to_le32(sgl->word2);
5792 sgl->word3 = cpu_to_le32(sgl->word3); 5791 /* swap the size field back to the cpu so we
5792 * can assign it to the sgl.
5793 */
5794 bde.tus.w = le32_to_cpu(bpl->tus.w);
5795 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
5793 bpl++; 5796 bpl++;
5794 sgl++; 5797 sgl++;
5795 } 5798 }
@@ -5802,11 +5805,10 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5802 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 5805 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5803 sgl->addr_lo = 5806 sgl->addr_lo =
5804 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 5807 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5805 bf_set(lpfc_sli4_sge_len, sgl,
5806 icmd->un.genreq64.bdl.bdeSize);
5807 bf_set(lpfc_sli4_sge_last, sgl, 1); 5808 bf_set(lpfc_sli4_sge_last, sgl, 1);
5808 sgl->word2 = cpu_to_le32(sgl->word2); 5809 sgl->word2 = cpu_to_le32(sgl->word2);
5809 sgl->word3 = cpu_to_le32(sgl->word3); 5810 sgl->sge_len =
5811 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
5810 } 5812 }
5811 return sglq->sli4_xritag; 5813 return sglq->sli4_xritag;
5812} 5814}
@@ -5849,7 +5851,7 @@ static int
5849lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 5851lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5850 union lpfc_wqe *wqe) 5852 union lpfc_wqe *wqe)
5851{ 5853{
5852 uint32_t payload_len = 0; 5854 uint32_t xmit_len = 0, total_len = 0;
5853 uint8_t ct = 0; 5855 uint8_t ct = 0;
5854 uint32_t fip; 5856 uint32_t fip;
5855 uint32_t abort_tag; 5857 uint32_t abort_tag;
@@ -5857,12 +5859,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5857 uint8_t cmnd; 5859 uint8_t cmnd;
5858 uint16_t xritag; 5860 uint16_t xritag;
5859 struct ulp_bde64 *bpl = NULL; 5861 struct ulp_bde64 *bpl = NULL;
5862 uint32_t els_id = ELS_ID_DEFAULT;
5863 int numBdes, i;
5864 struct ulp_bde64 bde;
5860 5865
5861 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); 5866 fip = phba->hba_flag & HBA_FIP_SUPPORT;
5862 /* The fcp commands will set command type */ 5867 /* The fcp commands will set command type */
5863 if (iocbq->iocb_flag & LPFC_IO_FCP) 5868 if (iocbq->iocb_flag & LPFC_IO_FCP)
5864 command_type = FCP_COMMAND; 5869 command_type = FCP_COMMAND;
5865 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) 5870 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
5866 command_type = ELS_COMMAND_FIP; 5871 command_type = ELS_COMMAND_FIP;
5867 else 5872 else
5868 command_type = ELS_COMMAND_NON_FIP; 5873 command_type = ELS_COMMAND_NON_FIP;
@@ -5874,6 +5879,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5874 wqe->words[7] = 0; /* The ct field has moved so reset */ 5879 wqe->words[7] = 0; /* The ct field has moved so reset */
5875 /* words0-2 bpl convert bde */ 5880 /* words0-2 bpl convert bde */
5876 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5881 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5882 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
5883 sizeof(struct ulp_bde64);
5877 bpl = (struct ulp_bde64 *) 5884 bpl = (struct ulp_bde64 *)
5878 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 5885 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5879 if (!bpl) 5886 if (!bpl)
@@ -5886,9 +5893,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5886 * can assign it to the sgl. 5893 * can assign it to the sgl.
5887 */ 5894 */
5888 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 5895 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5889 payload_len = wqe->generic.bde.tus.f.bdeSize; 5896 xmit_len = wqe->generic.bde.tus.f.bdeSize;
5897 total_len = 0;
5898 for (i = 0; i < numBdes; i++) {
5899 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
5900 total_len += bde.tus.f.bdeSize;
5901 }
5890 } else 5902 } else
5891 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 5903 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5892 5904
5893 iocbq->iocb.ulpIoTag = iocbq->iotag; 5905 iocbq->iocb.ulpIoTag = iocbq->iotag;
5894 cmnd = iocbq->iocb.ulpCommand; 5906 cmnd = iocbq->iocb.ulpCommand;
@@ -5902,7 +5914,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5902 iocbq->iocb.ulpCommand); 5914 iocbq->iocb.ulpCommand);
5903 return IOCB_ERROR; 5915 return IOCB_ERROR;
5904 } 5916 }
5905 wqe->els_req.payload_len = payload_len; 5917 wqe->els_req.payload_len = xmit_len;
5906 /* Els_reguest64 has a TMO */ 5918 /* Els_reguest64 has a TMO */
5907 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 5919 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5908 iocbq->iocb.ulpTimeout); 5920 iocbq->iocb.ulpTimeout);
@@ -5923,7 +5935,22 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5923 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 5935 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5924 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5936 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5925 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 5937 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5938
5939 if (command_type == ELS_COMMAND_FIP) {
5940 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
5941 >> LPFC_FIP_ELS_ID_SHIFT);
5942 }
5943 bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
5944
5926 break; 5945 break;
5946 case CMD_XMIT_SEQUENCE64_CX:
5947 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5948 iocbq->iocb.un.ulpWord[3]);
5949 wqe->generic.word3 = 0;
5950 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5951 /* The entire sequence is transmitted for this IOCB */
5952 xmit_len = total_len;
5953 cmnd = CMD_XMIT_SEQUENCE64_CR;
5927 case CMD_XMIT_SEQUENCE64_CR: 5954 case CMD_XMIT_SEQUENCE64_CR:
5928 /* word3 iocb=io_tag32 wqe=payload_offset */ 5955 /* word3 iocb=io_tag32 wqe=payload_offset */
5929 /* payload offset used for multilpe outstanding 5956 /* payload offset used for multilpe outstanding
@@ -5933,7 +5960,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5933 /* word4 relative_offset memcpy */ 5960 /* word4 relative_offset memcpy */
5934 /* word5 r_ctl/df_ctl memcpy */ 5961 /* word5 r_ctl/df_ctl memcpy */
5935 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5962 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5936 wqe->xmit_sequence.xmit_len = payload_len; 5963 wqe->xmit_sequence.xmit_len = xmit_len;
5964 command_type = OTHER_COMMAND;
5937 break; 5965 break;
5938 case CMD_XMIT_BCAST64_CN: 5966 case CMD_XMIT_BCAST64_CN:
5939 /* word3 iocb=iotag32 wqe=payload_len */ 5967 /* word3 iocb=iotag32 wqe=payload_len */
@@ -5962,7 +5990,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5962 case CMD_FCP_IREAD64_CR: 5990 case CMD_FCP_IREAD64_CR:
5963 /* FCP_CMD is always the 1st sgl entry */ 5991 /* FCP_CMD is always the 1st sgl entry */
5964 wqe->fcp_iread.payload_len = 5992 wqe->fcp_iread.payload_len =
5965 payload_len + sizeof(struct fcp_rsp); 5993 xmit_len + sizeof(struct fcp_rsp);
5966 5994
5967 /* word 4 (xfer length) should have been set on the memcpy */ 5995 /* word 4 (xfer length) should have been set on the memcpy */
5968 5996
@@ -5999,7 +6027,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5999 * sgl[1] = rsp. 6027 * sgl[1] = rsp.
6000 * 6028 *
6001 */ 6029 */
6002 wqe->gen_req.command_len = payload_len; 6030 wqe->gen_req.command_len = xmit_len;
6003 /* Word4 parameter copied in the memcpy */ 6031 /* Word4 parameter copied in the memcpy */
6004 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ 6032 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
6005 /* word6 context tag copied in memcpy */ 6033 /* word6 context tag copied in memcpy */
@@ -6051,12 +6079,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6051 else 6079 else
6052 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6080 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
6053 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6081 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
6054 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6055 wqe->words[5] = 0; 6082 wqe->words[5] = 0;
6056 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6083 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6057 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6084 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6058 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6085 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6059 wqe->generic.abort_tag = abort_tag;
6060 /* 6086 /*
6061 * The abort handler will send us CMD_ABORT_XRI_CN or 6087 * The abort handler will send us CMD_ABORT_XRI_CN or
6062 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6088 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
@@ -6066,6 +6092,38 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6066 command_type = OTHER_COMMAND; 6092 command_type = OTHER_COMMAND;
6067 xritag = 0; 6093 xritag = 0;
6068 break; 6094 break;
6095 case CMD_XMIT_BLS_RSP64_CX:
6096 /* As BLS ABTS-ACC WQE is very different from other WQEs,
6097 * we re-construct this WQE here based on information in
6098 * iocbq from scratch.
6099 */
6100 memset(wqe, 0, sizeof(union lpfc_wqe));
6101 /* OX_ID is invariable to who sent ABTS to CT exchange */
6102 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
6103 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
6104 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
6105 LPFC_ABTS_UNSOL_INT) {
6106 /* ABTS sent by initiator to CT exchange, the
6107 * RX_ID field will be filled with the newly
6108 * allocated responder XRI.
6109 */
6110 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6111 iocbq->sli4_xritag);
6112 } else {
6113 /* ABTS sent by responder to CT exchange, the
6114 * RX_ID field will be filled with the responder
6115 * RX_ID from ABTS.
6116 */
6117 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6118 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
6119 }
6120 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
6121 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6122 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6123 iocbq->iocb.ulpContext);
6124 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6125 command_type = OTHER_COMMAND;
6126 break;
6069 case CMD_XRI_ABORTED_CX: 6127 case CMD_XRI_ABORTED_CX:
6070 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6128 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
6071 /* words0-2 are all 0's no bde */ 6129 /* words0-2 are all 0's no bde */
@@ -6120,11 +6178,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6120 uint16_t xritag; 6178 uint16_t xritag;
6121 union lpfc_wqe wqe; 6179 union lpfc_wqe wqe;
6122 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6180 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6123 uint32_t fcp_wqidx;
6124 6181
6125 if (piocb->sli4_xritag == NO_XRI) { 6182 if (piocb->sli4_xritag == NO_XRI) {
6126 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6183 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6127 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6184 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6128 sglq = NULL; 6185 sglq = NULL;
6129 else { 6186 else {
6130 sglq = __lpfc_sli_get_sglq(phba); 6187 sglq = __lpfc_sli_get_sglq(phba);
@@ -6154,9 +6211,18 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6154 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 6211 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6155 return IOCB_ERROR; 6212 return IOCB_ERROR;
6156 6213
6157 if (piocb->iocb_flag & LPFC_IO_FCP) { 6214 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
6158 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 6215 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
6159 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) 6216 /*
6217 * For FCP command IOCB, get a new WQ index to distribute
6218 * WQE across the WQsr. On the other hand, for abort IOCB,
6219 * it carries the same WQ index to the original command
6220 * IOCB.
6221 */
6222 if (piocb->iocb_flag & LPFC_IO_FCP)
6223 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6224 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
6225 &wqe))
6160 return IOCB_ERROR; 6226 return IOCB_ERROR;
6161 } else { 6227 } else {
6162 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 6228 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
@@ -6449,31 +6515,37 @@ lpfc_sli_setup(struct lpfc_hba *phba)
6449 pring->iotag_max = 4096; 6515 pring->iotag_max = 4096;
6450 pring->lpfc_sli_rcv_async_status = 6516 pring->lpfc_sli_rcv_async_status =
6451 lpfc_sli_async_event_handler; 6517 lpfc_sli_async_event_handler;
6452 pring->num_mask = 4; 6518 pring->num_mask = LPFC_MAX_RING_MASK;
6453 pring->prt[0].profile = 0; /* Mask 0 */ 6519 pring->prt[0].profile = 0; /* Mask 0 */
6454 pring->prt[0].rctl = FC_ELS_REQ; 6520 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
6455 pring->prt[0].type = FC_ELS_DATA; 6521 pring->prt[0].type = FC_TYPE_ELS;
6456 pring->prt[0].lpfc_sli_rcv_unsol_event = 6522 pring->prt[0].lpfc_sli_rcv_unsol_event =
6457 lpfc_els_unsol_event; 6523 lpfc_els_unsol_event;
6458 pring->prt[1].profile = 0; /* Mask 1 */ 6524 pring->prt[1].profile = 0; /* Mask 1 */
6459 pring->prt[1].rctl = FC_ELS_RSP; 6525 pring->prt[1].rctl = FC_RCTL_ELS_REP;
6460 pring->prt[1].type = FC_ELS_DATA; 6526 pring->prt[1].type = FC_TYPE_ELS;
6461 pring->prt[1].lpfc_sli_rcv_unsol_event = 6527 pring->prt[1].lpfc_sli_rcv_unsol_event =
6462 lpfc_els_unsol_event; 6528 lpfc_els_unsol_event;
6463 pring->prt[2].profile = 0; /* Mask 2 */ 6529 pring->prt[2].profile = 0; /* Mask 2 */
6464 /* NameServer Inquiry */ 6530 /* NameServer Inquiry */
6465 pring->prt[2].rctl = FC_UNSOL_CTL; 6531 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
6466 /* NameServer */ 6532 /* NameServer */
6467 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 6533 pring->prt[2].type = FC_TYPE_CT;
6468 pring->prt[2].lpfc_sli_rcv_unsol_event = 6534 pring->prt[2].lpfc_sli_rcv_unsol_event =
6469 lpfc_ct_unsol_event; 6535 lpfc_ct_unsol_event;
6470 pring->prt[3].profile = 0; /* Mask 3 */ 6536 pring->prt[3].profile = 0; /* Mask 3 */
6471 /* NameServer response */ 6537 /* NameServer response */
6472 pring->prt[3].rctl = FC_SOL_CTL; 6538 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
6473 /* NameServer */ 6539 /* NameServer */
6474 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 6540 pring->prt[3].type = FC_TYPE_CT;
6475 pring->prt[3].lpfc_sli_rcv_unsol_event = 6541 pring->prt[3].lpfc_sli_rcv_unsol_event =
6476 lpfc_ct_unsol_event; 6542 lpfc_ct_unsol_event;
6543 /* abort unsolicited sequence */
6544 pring->prt[4].profile = 0; /* Mask 4 */
6545 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
6546 pring->prt[4].type = FC_TYPE_BLS;
6547 pring->prt[4].lpfc_sli_rcv_unsol_event =
6548 lpfc_sli4_ct_abort_unsol_event;
6477 break; 6549 break;
6478 } 6550 }
6479 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 6551 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
@@ -6976,8 +7048,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6976 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 7048 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
6977 7049
6978 spin_lock_irq(&phba->hbalock); 7050 spin_lock_irq(&phba->hbalock);
6979 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 7051 if (phba->sli_rev < LPFC_SLI_REV4) {
6980 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 7052 if (abort_iotag != 0 &&
7053 abort_iotag <= phba->sli.last_iotag)
7054 abort_iocb =
7055 phba->sli.iocbq_lookup[abort_iotag];
7056 } else
7057 /* For sli4 the abort_tag is the XRI,
7058 * so the abort routine puts the iotag of the iocb
7059 * being aborted in the context field of the abort
7060 * IOCB.
7061 */
7062 abort_iocb = phba->sli.iocbq_lookup[abort_context];
6981 7063
6982 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 7064 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
6983 "0327 Cannot abort els iocb %p " 7065 "0327 Cannot abort els iocb %p "
@@ -6991,9 +7073,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6991 * might have completed already. Do not free it again. 7073 * might have completed already. Do not free it again.
6992 */ 7074 */
6993 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 7075 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
6994 spin_unlock_irq(&phba->hbalock); 7076 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
6995 lpfc_sli_release_iocbq(phba, cmdiocb); 7077 spin_unlock_irq(&phba->hbalock);
6996 return; 7078 lpfc_sli_release_iocbq(phba, cmdiocb);
7079 return;
7080 }
7081 /* For SLI4 the ulpContext field for abort IOCB
7082 * holds the iotag of the IOCB being aborted so
7083 * the local abort_context needs to be reset to
7084 * match the aborted IOCBs ulpContext.
7085 */
7086 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
7087 abort_context = abort_iocb->iocb.ulpContext;
6997 } 7088 }
6998 /* 7089 /*
6999 * make sure we have the right iocbq before taking it 7090 * make sure we have the right iocbq before taking it
@@ -7003,7 +7094,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7003 abort_iocb->iocb.ulpContext != abort_context || 7094 abort_iocb->iocb.ulpContext != abort_context ||
7004 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 7095 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
7005 spin_unlock_irq(&phba->hbalock); 7096 spin_unlock_irq(&phba->hbalock);
7006 else { 7097 else if (phba->sli_rev < LPFC_SLI_REV4) {
7098 /*
7099 * leave the SLI4 aborted command on the txcmplq
7100 * list and the command complete WCQE's XB bit
7101 * will tell whether the SGL (XRI) can be released
7102 * immediately or to the aborted SGL list for the
7103 * following abort XRI from the HBA.
7104 */
7007 list_del_init(&abort_iocb->list); 7105 list_del_init(&abort_iocb->list);
7008 pring->txcmplq_cnt--; 7106 pring->txcmplq_cnt--;
7009 spin_unlock_irq(&phba->hbalock); 7107 spin_unlock_irq(&phba->hbalock);
@@ -7012,11 +7110,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7012 * payload, so don't free data buffer till after 7110 * payload, so don't free data buffer till after
7013 * a hbeat. 7111 * a hbeat.
7014 */ 7112 */
7113 spin_lock_irq(&phba->hbalock);
7015 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7114 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
7016
7017 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7115 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
7116 spin_unlock_irq(&phba->hbalock);
7117
7018 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7118 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
7019 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 7119 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
7020 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7120 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
7021 } 7121 }
7022 } 7122 }
@@ -7105,20 +7205,27 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7105 return 0; 7205 return 0;
7106 7206
7107 /* This signals the response to set the correct status 7207 /* This signals the response to set the correct status
7108 * before calling the completion handler. 7208 * before calling the completion handler
7109 */ 7209 */
7110 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 7210 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
7111 7211
7112 iabt = &abtsiocbp->iocb; 7212 iabt = &abtsiocbp->iocb;
7113 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7213 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
7114 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7214 iabt->un.acxri.abortContextTag = icmd->ulpContext;
7115 if (phba->sli_rev == LPFC_SLI_REV4) 7215 if (phba->sli_rev == LPFC_SLI_REV4) {
7116 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 7216 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7217 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
7218 }
7117 else 7219 else
7118 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7220 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
7119 iabt->ulpLe = 1; 7221 iabt->ulpLe = 1;
7120 iabt->ulpClass = icmd->ulpClass; 7222 iabt->ulpClass = icmd->ulpClass;
7121 7223
7224 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7225 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
7226 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
7227 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
7228
7122 if (phba->link_state >= LPFC_LINK_UP) 7229 if (phba->link_state >= LPFC_LINK_UP)
7123 iabt->ulpCommand = CMD_ABORT_XRI_CN; 7230 iabt->ulpCommand = CMD_ABORT_XRI_CN;
7124 else 7231 else
@@ -7322,6 +7429,11 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
7322 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7429 abtsiocb->iocb.ulpClass = cmd->ulpClass;
7323 abtsiocb->vport = phba->pport; 7430 abtsiocb->vport = phba->pport;
7324 7431
7432 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7433 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
7434 if (iocbq->iocb_flag & LPFC_IO_FCP)
7435 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
7436
7325 if (lpfc_is_link_up(phba)) 7437 if (lpfc_is_link_up(phba))
7326 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 7438 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
7327 else 7439 else
@@ -7365,6 +7477,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7365{ 7477{
7366 wait_queue_head_t *pdone_q; 7478 wait_queue_head_t *pdone_q;
7367 unsigned long iflags; 7479 unsigned long iflags;
7480 struct lpfc_scsi_buf *lpfc_cmd;
7368 7481
7369 spin_lock_irqsave(&phba->hbalock, iflags); 7482 spin_lock_irqsave(&phba->hbalock, iflags);
7370 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7483 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7372,6 +7485,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7372 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7485 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7373 &rspiocbq->iocb, sizeof(IOCB_t)); 7486 &rspiocbq->iocb, sizeof(IOCB_t));
7374 7487
7488 /* Set the exchange busy flag for task management commands */
7489 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
7490 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
7491 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
7492 cur_iocbq);
7493 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
7494 }
7495
7375 pdone_q = cmdiocbq->context_un.wait_queue; 7496 pdone_q = cmdiocbq->context_un.wait_queue;
7376 if (pdone_q) 7497 if (pdone_q)
7377 wake_up(pdone_q); 7498 wake_up(pdone_q);
@@ -7687,31 +7808,28 @@ static int
7687lpfc_sli4_eratt_read(struct lpfc_hba *phba) 7808lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7688{ 7809{
7689 uint32_t uerr_sta_hi, uerr_sta_lo; 7810 uint32_t uerr_sta_hi, uerr_sta_lo;
7690 uint32_t onlnreg0, onlnreg1;
7691 7811
7692 /* For now, use the SLI4 device internal unrecoverable error 7812 /* For now, use the SLI4 device internal unrecoverable error
7693 * registers for error attention. This can be changed later. 7813 * registers for error attention. This can be changed later.
7694 */ 7814 */
7695 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 7815 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7696 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 7816 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7697 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 7817 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
7698 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); 7818 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
7699 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); 7819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7700 if (uerr_sta_lo || uerr_sta_hi) { 7820 "1423 HBA Unrecoverable error: "
7701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7821 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7702 "1423 HBA Unrecoverable error: " 7822 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
7703 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 7823 uerr_sta_lo, uerr_sta_hi,
7704 "online0_reg=0x%x, online1_reg=0x%x\n", 7824 phba->sli4_hba.ue_mask_lo,
7705 uerr_sta_lo, uerr_sta_hi, 7825 phba->sli4_hba.ue_mask_hi);
7706 onlnreg0, onlnreg1); 7826 phba->work_status[0] = uerr_sta_lo;
7707 phba->work_status[0] = uerr_sta_lo; 7827 phba->work_status[1] = uerr_sta_hi;
7708 phba->work_status[1] = uerr_sta_hi; 7828 /* Set the driver HA work bitmap */
7709 /* Set the driver HA work bitmap */ 7829 phba->work_ha |= HA_ERATT;
7710 phba->work_ha |= HA_ERATT; 7830 /* Indicate polling handles this ERATT */
7711 /* Indicate polling handles this ERATT */ 7831 phba->hba_flag |= HBA_ERATT_HANDLED;
7712 phba->hba_flag |= HBA_ERATT_HANDLED; 7832 return 1;
7713 return 1;
7714 }
7715 } 7833 }
7716 return 0; 7834 return 0;
7717} 7835}
@@ -7834,7 +7952,7 @@ irqreturn_t
7834lpfc_sli_sp_intr_handler(int irq, void *dev_id) 7952lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7835{ 7953{
7836 struct lpfc_hba *phba; 7954 struct lpfc_hba *phba;
7837 uint32_t ha_copy; 7955 uint32_t ha_copy, hc_copy;
7838 uint32_t work_ha_copy; 7956 uint32_t work_ha_copy;
7839 unsigned long status; 7957 unsigned long status;
7840 unsigned long iflag; 7958 unsigned long iflag;
@@ -7892,8 +8010,13 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7892 } 8010 }
7893 8011
7894 /* Clear up only attention source related to slow-path */ 8012 /* Clear up only attention source related to slow-path */
8013 hc_copy = readl(phba->HCregaddr);
8014 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
8015 HC_LAINT_ENA | HC_ERINT_ENA),
8016 phba->HCregaddr);
7895 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 8017 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
7896 phba->HAregaddr); 8018 phba->HAregaddr);
8019 writel(hc_copy, phba->HCregaddr);
7897 readl(phba->HAregaddr); /* flush */ 8020 readl(phba->HAregaddr); /* flush */
7898 spin_unlock_irqrestore(&phba->hbalock, iflag); 8021 spin_unlock_irqrestore(&phba->hbalock, iflag);
7899 } else 8022 } else
@@ -8049,7 +8172,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8049 KERN_ERR, 8172 KERN_ERR,
8050 LOG_MBOX | LOG_SLI, 8173 LOG_MBOX | LOG_SLI,
8051 "0350 rc should have" 8174 "0350 rc should have"
8052 "been MBX_BUSY"); 8175 "been MBX_BUSY\n");
8053 if (rc != MBX_NOT_FINISHED) 8176 if (rc != MBX_NOT_FINISHED)
8054 goto send_current_mbox; 8177 goto send_current_mbox;
8055 } 8178 }
@@ -8078,7 +8201,7 @@ send_current_mbox:
8078 if (rc != MBX_SUCCESS) 8201 if (rc != MBX_SUCCESS)
8079 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8202 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8080 LOG_SLI, "0349 rc should be " 8203 LOG_SLI, "0349 rc should be "
8081 "MBX_SUCCESS"); 8204 "MBX_SUCCESS\n");
8082 } 8205 }
8083 8206
8084 spin_lock_irqsave(&phba->hbalock, iflag); 8207 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -8203,6 +8326,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8203 struct lpfc_hba *phba; 8326 struct lpfc_hba *phba;
8204 irqreturn_t sp_irq_rc, fp_irq_rc; 8327 irqreturn_t sp_irq_rc, fp_irq_rc;
8205 unsigned long status1, status2; 8328 unsigned long status1, status2;
8329 uint32_t hc_copy;
8206 8330
8207 /* 8331 /*
8208 * Get the driver's phba structure from the dev_id and 8332 * Get the driver's phba structure from the dev_id and
@@ -8240,7 +8364,12 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8240 } 8364 }
8241 8365
8242 /* Clear attention sources except link and error attentions */ 8366 /* Clear attention sources except link and error attentions */
8367 hc_copy = readl(phba->HCregaddr);
8368 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
8369 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
8370 phba->HCregaddr);
8243 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 8371 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
8372 writel(hc_copy, phba->HCregaddr);
8244 readl(phba->HAregaddr); /* flush */ 8373 readl(phba->HAregaddr); /* flush */
8245 spin_unlock(&phba->hbalock); 8374 spin_unlock(&phba->hbalock);
8246 8375
@@ -8342,17 +8471,28 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8342 } 8471 }
8343} 8472}
8344 8473
8474/**
8475 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
8476 * @phba: pointer to lpfc hba data structure
8477 * @pIocbIn: pointer to the rspiocbq
8478 * @pIocbOut: pointer to the cmdiocbq
8479 * @wcqe: pointer to the complete wcqe
8480 *
8481 * This routine transfers the fields of a command iocbq to a response iocbq
8482 * by copying all the IOCB fields from command iocbq and transferring the
8483 * completion status information from the complete wcqe.
8484 **/
8345static void 8485static void
8346lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, 8486lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
8487 struct lpfc_iocbq *pIocbIn,
8347 struct lpfc_iocbq *pIocbOut, 8488 struct lpfc_iocbq *pIocbOut,
8348 struct lpfc_wcqe_complete *wcqe) 8489 struct lpfc_wcqe_complete *wcqe)
8349{ 8490{
8491 unsigned long iflags;
8350 size_t offset = offsetof(struct lpfc_iocbq, iocb); 8492 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8351 8493
8352 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8494 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8353 sizeof(struct lpfc_iocbq) - offset); 8495 sizeof(struct lpfc_iocbq) - offset);
8354 memset(&pIocbIn->sli4_info, 0,
8355 sizeof(struct lpfc_sli4_rspiocb_info));
8356 /* Map WCQE parameters into irspiocb parameters */ 8496 /* Map WCQE parameters into irspiocb parameters */
8357 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 8497 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8358 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 8498 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
@@ -8362,18 +8502,60 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8362 wcqe->total_data_placed; 8502 wcqe->total_data_placed;
8363 else 8503 else
8364 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8504 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8365 else 8505 else {
8366 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8506 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8367 /* Load in additional WCQE parameters */ 8507 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
8368 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); 8508 }
8369 pIocbIn->sli4_info.bfield = 0; 8509
8370 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 8510 /* Pick up HBA exchange busy condition */
8371 pIocbIn->sli4_info.bfield |= LPFC_XB; 8511 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
8372 if (bf_get(lpfc_wcqe_c_pv, wcqe)) { 8512 spin_lock_irqsave(&phba->hbalock, iflags);
8373 pIocbIn->sli4_info.bfield |= LPFC_PV; 8513 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
8374 pIocbIn->sli4_info.priority = 8514 spin_unlock_irqrestore(&phba->hbalock, iflags);
8375 bf_get(lpfc_wcqe_c_priority, wcqe); 8515 }
8516}
8517
8518/**
8519 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
8520 * @phba: Pointer to HBA context object.
8521 * @wcqe: Pointer to work-queue completion queue entry.
8522 *
8523 * This routine handles an ELS work-queue completion event and construct
8524 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
8525 * discovery engine to handle.
8526 *
8527 * Return: Pointer to the receive IOCBQ, NULL otherwise.
8528 **/
8529static struct lpfc_iocbq *
8530lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
8531 struct lpfc_iocbq *irspiocbq)
8532{
8533 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8534 struct lpfc_iocbq *cmdiocbq;
8535 struct lpfc_wcqe_complete *wcqe;
8536 unsigned long iflags;
8537
8538 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
8539 spin_lock_irqsave(&phba->hbalock, iflags);
8540 pring->stats.iocb_event++;
8541 /* Look up the ELS command IOCB and create pseudo response IOCB */
8542 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8543 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8544 spin_unlock_irqrestore(&phba->hbalock, iflags);
8545
8546 if (unlikely(!cmdiocbq)) {
8547 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8548 "0386 ELS complete with no corresponding "
8549 "cmdiocb: iotag (%d)\n",
8550 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8551 lpfc_sli_release_iocbq(phba, irspiocbq);
8552 return NULL;
8376 } 8553 }
8554
8555 /* Fake the irspiocbq and copy necessary response information */
8556 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
8557
8558 return irspiocbq;
8377} 8559}
8378 8560
8379/** 8561/**
@@ -8566,45 +8748,26 @@ static bool
8566lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 8748lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8567 struct lpfc_wcqe_complete *wcqe) 8749 struct lpfc_wcqe_complete *wcqe)
8568{ 8750{
8569 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8570 struct lpfc_iocbq *cmdiocbq;
8571 struct lpfc_iocbq *irspiocbq; 8751 struct lpfc_iocbq *irspiocbq;
8572 unsigned long iflags; 8752 unsigned long iflags;
8573 bool workposted = false;
8574 8753
8575 spin_lock_irqsave(&phba->hbalock, iflags); 8754 /* Get an irspiocbq for later ELS response processing use */
8576 pring->stats.iocb_event++;
8577 /* Look up the ELS command IOCB and create pseudo response IOCB */
8578 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8579 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8580 spin_unlock_irqrestore(&phba->hbalock, iflags);
8581
8582 if (unlikely(!cmdiocbq)) {
8583 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8584 "0386 ELS complete with no corresponding "
8585 "cmdiocb: iotag (%d)\n",
8586 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8587 return workposted;
8588 }
8589
8590 /* Fake the irspiocbq and copy necessary response information */
8591 irspiocbq = lpfc_sli_get_iocbq(phba); 8755 irspiocbq = lpfc_sli_get_iocbq(phba);
8592 if (!irspiocbq) { 8756 if (!irspiocbq) {
8593 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8594 "0387 Failed to allocate an iocbq\n"); 8758 "0387 Failed to allocate an iocbq\n");
8595 return workposted; 8759 return false;
8596 } 8760 }
8597 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8598 8761
8599 /* Add the irspiocb to the response IOCB work list */ 8762 /* Save off the slow-path queue event for work thread to process */
8763 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
8600 spin_lock_irqsave(&phba->hbalock, iflags); 8764 spin_lock_irqsave(&phba->hbalock, iflags);
8601 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); 8765 list_add_tail(&irspiocbq->cq_event.list,
8602 /* Indicate ELS ring attention */ 8766 &phba->sli4_hba.sp_queue_event);
8603 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); 8767 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8604 spin_unlock_irqrestore(&phba->hbalock, iflags); 8768 spin_unlock_irqrestore(&phba->hbalock, iflags);
8605 workposted = true;
8606 8769
8607 return workposted; 8770 return true;
8608} 8771}
8609 8772
8610/** 8773/**
@@ -8690,52 +8853,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8690} 8853}
8691 8854
8692/** 8855/**
8693 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8694 * @phba: Pointer to HBA context object.
8695 * @cq: Pointer to the completion queue.
8696 * @wcqe: Pointer to a completion queue entry.
8697 *
8698 * This routine process a slow-path work-queue completion queue entry.
8699 *
8700 * Return: true if work posted to worker thread, otherwise false.
8701 **/
8702static bool
8703lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8704 struct lpfc_cqe *cqe)
8705{
8706 struct lpfc_wcqe_complete wcqe;
8707 bool workposted = false;
8708
8709 /* Copy the work queue CQE and convert endian order if needed */
8710 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8711
8712 /* Check and process for different type of WCQE and dispatch */
8713 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8714 case CQE_CODE_COMPL_WQE:
8715 /* Process the WQ complete event */
8716 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8717 (struct lpfc_wcqe_complete *)&wcqe);
8718 break;
8719 case CQE_CODE_RELEASE_WQE:
8720 /* Process the WQ release event */
8721 lpfc_sli4_sp_handle_rel_wcqe(phba,
8722 (struct lpfc_wcqe_release *)&wcqe);
8723 break;
8724 case CQE_CODE_XRI_ABORTED:
8725 /* Process the WQ XRI abort event */
8726 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8727 (struct sli4_wcqe_xri_aborted *)&wcqe);
8728 break;
8729 default:
8730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8731 "0388 Not a valid WCQE code: x%x\n",
8732 bf_get(lpfc_wcqe_c_code, &wcqe));
8733 break;
8734 }
8735 return workposted;
8736}
8737
8738/**
8739 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 8856 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8740 * @phba: Pointer to HBA context object. 8857 * @phba: Pointer to HBA context object.
8741 * @rcqe: Pointer to receive-queue completion queue entry. 8858 * @rcqe: Pointer to receive-queue completion queue entry.
@@ -8745,9 +8862,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8745 * Return: true if work posted to worker thread, otherwise false. 8862 * Return: true if work posted to worker thread, otherwise false.
8746 **/ 8863 **/
8747static bool 8864static bool
8748lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 8865lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8749{ 8866{
8750 struct lpfc_rcqe rcqe;
8751 bool workposted = false; 8867 bool workposted = false;
8752 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 8868 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8753 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 8869 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
@@ -8755,31 +8871,28 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8755 uint32_t status; 8871 uint32_t status;
8756 unsigned long iflags; 8872 unsigned long iflags;
8757 8873
8758 /* Copy the receive queue CQE and convert endian order if needed */ 8874 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
8759 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8760 lpfc_sli4_rq_release(hrq, drq);
8761 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8762 goto out;
8763 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8764 goto out; 8875 goto out;
8765 8876
8766 status = bf_get(lpfc_rcqe_status, &rcqe); 8877 status = bf_get(lpfc_rcqe_status, rcqe);
8767 switch (status) { 8878 switch (status) {
8768 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 8879 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8770 "2537 Receive Frame Truncated!!\n"); 8881 "2537 Receive Frame Truncated!!\n");
8771 case FC_STATUS_RQ_SUCCESS: 8882 case FC_STATUS_RQ_SUCCESS:
8883 lpfc_sli4_rq_release(hrq, drq);
8772 spin_lock_irqsave(&phba->hbalock, iflags); 8884 spin_lock_irqsave(&phba->hbalock, iflags);
8773 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 8885 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8774 if (!dma_buf) { 8886 if (!dma_buf) {
8775 spin_unlock_irqrestore(&phba->hbalock, iflags); 8887 spin_unlock_irqrestore(&phba->hbalock, iflags);
8776 goto out; 8888 goto out;
8777 } 8889 }
8778 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); 8890 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
8779 /* save off the frame for the word thread to process */ 8891 /* save off the frame for the word thread to process */
8780 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); 8892 list_add_tail(&dma_buf->cq_event.list,
8893 &phba->sli4_hba.sp_queue_event);
8781 /* Frame received */ 8894 /* Frame received */
8782 phba->hba_flag |= HBA_RECEIVE_BUFFER; 8895 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8783 spin_unlock_irqrestore(&phba->hbalock, iflags); 8896 spin_unlock_irqrestore(&phba->hbalock, iflags);
8784 workposted = true; 8897 workposted = true;
8785 break; 8898 break;
@@ -8794,7 +8907,58 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8794 } 8907 }
8795out: 8908out:
8796 return workposted; 8909 return workposted;
8910}
8911
8912/**
8913 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
8914 * @phba: Pointer to HBA context object.
8915 * @cq: Pointer to the completion queue.
8916 * @wcqe: Pointer to a completion queue entry.
8917 *
8918 * This routine process a slow-path work-queue or recieve queue completion queue
8919 * entry.
8920 *
8921 * Return: true if work posted to worker thread, otherwise false.
8922 **/
8923static bool
8924lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8925 struct lpfc_cqe *cqe)
8926{
8927 struct lpfc_cqe cqevt;
8928 bool workposted = false;
8929
8930 /* Copy the work queue CQE and convert endian order if needed */
8931 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
8797 8932
8933 /* Check and process for different type of WCQE and dispatch */
8934 switch (bf_get(lpfc_cqe_code, &cqevt)) {
8935 case CQE_CODE_COMPL_WQE:
8936 /* Process the WQ/RQ complete event */
8937 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8938 (struct lpfc_wcqe_complete *)&cqevt);
8939 break;
8940 case CQE_CODE_RELEASE_WQE:
8941 /* Process the WQ release event */
8942 lpfc_sli4_sp_handle_rel_wcqe(phba,
8943 (struct lpfc_wcqe_release *)&cqevt);
8944 break;
8945 case CQE_CODE_XRI_ABORTED:
8946 /* Process the WQ XRI abort event */
8947 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8948 (struct sli4_wcqe_xri_aborted *)&cqevt);
8949 break;
8950 case CQE_CODE_RECEIVE:
8951 /* Process the RQ event */
8952 workposted = lpfc_sli4_sp_handle_rcqe(phba,
8953 (struct lpfc_rcqe *)&cqevt);
8954 break;
8955 default:
8956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8957 "0388 Not a valid WCQE code: x%x\n",
8958 bf_get(lpfc_cqe_code, &cqevt));
8959 break;
8960 }
8961 return workposted;
8798} 8962}
8799 8963
8800/** 8964/**
@@ -8819,8 +8983,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8819 int ecount = 0; 8983 int ecount = 0;
8820 uint16_t cqid; 8984 uint16_t cqid;
8821 8985
8822 if (bf_get(lpfc_eqe_major_code, eqe) != 0 || 8986 if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
8823 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8825 "0359 Not a valid slow-path completion " 8988 "0359 Not a valid slow-path completion "
8826 "event: majorcode=x%x, minorcode=x%x\n", 8989 "event: majorcode=x%x, minorcode=x%x\n",
@@ -8858,14 +9021,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8858 break; 9021 break;
8859 case LPFC_WCQ: 9022 case LPFC_WCQ:
8860 while ((cqe = lpfc_sli4_cq_get(cq))) { 9023 while ((cqe = lpfc_sli4_cq_get(cq))) {
8861 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); 9024 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
8862 if (!(++ecount % LPFC_GET_QE_REL_INT))
8863 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8864 }
8865 break;
8866 case LPFC_RCQ:
8867 while ((cqe = lpfc_sli4_cq_get(cq))) {
8868 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8869 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9025 if (!(++ecount % LPFC_GET_QE_REL_INT))
8870 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9026 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8871 } 9027 }
@@ -8953,7 +9109,13 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8953 } 9109 }
8954 9110
8955 /* Fake the irspiocb and copy necessary response information */ 9111 /* Fake the irspiocb and copy necessary response information */
8956 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); 9112 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
9113
9114 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
9115 spin_lock_irqsave(&phba->hbalock, iflags);
9116 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
9117 spin_unlock_irqrestore(&phba->hbalock, iflags);
9118 }
8957 9119
8958 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9120 /* Pass the cmd_iocb and the rsp state to the upper layer */
8959 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9121 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
@@ -9059,8 +9221,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9059 uint16_t cqid; 9221 uint16_t cqid;
9060 int ecount = 0; 9222 int ecount = 0;
9061 9223
9062 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || 9224 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
9063 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9065 "0366 Not a valid fast-path completion " 9226 "0366 Not a valid fast-path completion "
9066 "event: majorcode=x%x, minorcode=x%x\n", 9227 "event: majorcode=x%x, minorcode=x%x\n",
@@ -10427,8 +10588,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10427 return xritag; 10588 return xritag;
10428 } 10589 }
10429 spin_unlock_irq(&phba->hbalock); 10590 spin_unlock_irq(&phba->hbalock);
10430 10591 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10431 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10432 "2004 Failed to allocate XRI.last XRITAG is %d" 10592 "2004 Failed to allocate XRI.last XRITAG is %d"
10433 " Max XRI is %d, Used XRI is %d\n", 10593 " Max XRI is %d, Used XRI is %d\n",
10434 phba->sli4_hba.next_xri, 10594 phba->sli4_hba.next_xri,
@@ -10492,15 +10652,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10492 lpfc_sli4_mbox_cmd_free(phba, mbox); 10652 lpfc_sli4_mbox_cmd_free(phba, mbox);
10493 return -ENOMEM; 10653 return -ENOMEM;
10494 } 10654 }
10495
10496 /* Get the first SGE entry from the non-embedded DMA memory */ 10655 /* Get the first SGE entry from the non-embedded DMA memory */
10497 if (unlikely(!mbox->sge_array)) {
10498 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10499 "2525 Failed to get the non-embedded SGE "
10500 "virtual address\n");
10501 lpfc_sli4_mbox_cmd_free(phba, mbox);
10502 return -ENOMEM;
10503 }
10504 viraddr = mbox->sge_array->addr[0]; 10656 viraddr = mbox->sge_array->addr[0];
10505 10657
10506 /* Set up the SGL pages in the non-embedded DMA pages */ 10658 /* Set up the SGL pages in the non-embedded DMA pages */
@@ -10524,8 +10676,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10524 sgl_pg_pairs++; 10676 sgl_pg_pairs++;
10525 } 10677 }
10526 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 10678 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10527 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; 10679 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
10528 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10529 /* Perform endian conversion if necessary */ 10680 /* Perform endian conversion if necessary */
10530 sgl->word0 = cpu_to_le32(sgl->word0); 10681 sgl->word0 = cpu_to_le32(sgl->word0);
10531 10682
@@ -10607,15 +10758,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10607 lpfc_sli4_mbox_cmd_free(phba, mbox); 10758 lpfc_sli4_mbox_cmd_free(phba, mbox);
10608 return -ENOMEM; 10759 return -ENOMEM;
10609 } 10760 }
10610
10611 /* Get the first SGE entry from the non-embedded DMA memory */ 10761 /* Get the first SGE entry from the non-embedded DMA memory */
10612 if (unlikely(!mbox->sge_array)) {
10613 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10614 "2565 Failed to get the non-embedded SGE "
10615 "virtual address\n");
10616 lpfc_sli4_mbox_cmd_free(phba, mbox);
10617 return -ENOMEM;
10618 }
10619 viraddr = mbox->sge_array->addr[0]; 10762 viraddr = mbox->sge_array->addr[0];
10620 10763
10621 /* Set up the SGL pages in the non-embedded DMA pages */ 10764 /* Set up the SGL pages in the non-embedded DMA pages */
@@ -10802,6 +10945,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10802} 10945}
10803 10946
10804/** 10947/**
10948 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
10949 * @vport: The vport to work on.
10950 *
10951 * This function updates the receive sequence time stamp for this vport. The
10952 * receive sequence time stamp indicates the time that the last frame of the
10953 * the sequence that has been idle for the longest amount of time was received.
10954 * the driver uses this time stamp to indicate if any received sequences have
10955 * timed out.
10956 **/
10957void
10958lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
10959{
10960 struct lpfc_dmabuf *h_buf;
10961 struct hbq_dmabuf *dmabuf = NULL;
10962
10963 /* get the oldest sequence on the rcv list */
10964 h_buf = list_get_first(&vport->rcv_buffer_list,
10965 struct lpfc_dmabuf, list);
10966 if (!h_buf)
10967 return;
10968 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10969 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
10970}
10971
10972/**
10973 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
10974 * @vport: The vport that the received sequences were sent to.
10975 *
10976 * This function cleans up all outstanding received sequences. This is called
10977 * by the driver when a link event or user action invalidates all the received
10978 * sequences.
10979 **/
10980void
10981lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
10982{
10983 struct lpfc_dmabuf *h_buf, *hnext;
10984 struct lpfc_dmabuf *d_buf, *dnext;
10985 struct hbq_dmabuf *dmabuf = NULL;
10986
10987 /* start with the oldest sequence on the rcv list */
10988 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10989 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10990 list_del_init(&dmabuf->hbuf.list);
10991 list_for_each_entry_safe(d_buf, dnext,
10992 &dmabuf->dbuf.list, list) {
10993 list_del_init(&d_buf->list);
10994 lpfc_in_buf_free(vport->phba, d_buf);
10995 }
10996 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10997 }
10998}
10999
11000/**
11001 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
11002 * @vport: The vport that the received sequences were sent to.
11003 *
11004 * This function determines whether any received sequences have timed out by
11005 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
11006 * indicates that there is at least one timed out sequence this routine will
11007 * go through the received sequences one at a time from most inactive to most
11008 * active to determine which ones need to be cleaned up. Once it has determined
11009 * that a sequence needs to be cleaned up it will simply free up the resources
11010 * without sending an abort.
11011 **/
11012void
11013lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
11014{
11015 struct lpfc_dmabuf *h_buf, *hnext;
11016 struct lpfc_dmabuf *d_buf, *dnext;
11017 struct hbq_dmabuf *dmabuf = NULL;
11018 unsigned long timeout;
11019 int abort_count = 0;
11020
11021 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
11022 vport->rcv_buffer_time_stamp);
11023 if (list_empty(&vport->rcv_buffer_list) ||
11024 time_before(jiffies, timeout))
11025 return;
11026 /* start with the oldest sequence on the rcv list */
11027 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
11028 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
11029 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
11030 dmabuf->time_stamp);
11031 if (time_before(jiffies, timeout))
11032 break;
11033 abort_count++;
11034 list_del_init(&dmabuf->hbuf.list);
11035 list_for_each_entry_safe(d_buf, dnext,
11036 &dmabuf->dbuf.list, list) {
11037 list_del_init(&d_buf->list);
11038 lpfc_in_buf_free(vport->phba, d_buf);
11039 }
11040 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
11041 }
11042 if (abort_count)
11043 lpfc_update_rcv_time_stamp(vport);
11044}
11045
11046/**
10805 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 11047 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10806 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 11048 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10807 * 11049 *
@@ -10823,6 +11065,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10823 struct hbq_dmabuf *seq_dmabuf = NULL; 11065 struct hbq_dmabuf *seq_dmabuf = NULL;
10824 struct hbq_dmabuf *temp_dmabuf = NULL; 11066 struct hbq_dmabuf *temp_dmabuf = NULL;
10825 11067
11068 INIT_LIST_HEAD(&dmabuf->dbuf.list);
11069 dmabuf->time_stamp = jiffies;
10826 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11070 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10827 /* Use the hdr_buf to find the sequence that this frame belongs to */ 11071 /* Use the hdr_buf to find the sequence that this frame belongs to */
10828 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11072 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
@@ -10841,13 +11085,27 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10841 * Queue the buffer on the vport's rcv_buffer_list. 11085 * Queue the buffer on the vport's rcv_buffer_list.
10842 */ 11086 */
10843 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11087 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
11088 lpfc_update_rcv_time_stamp(vport);
10844 return dmabuf; 11089 return dmabuf;
10845 } 11090 }
10846 temp_hdr = seq_dmabuf->hbuf.virt; 11091 temp_hdr = seq_dmabuf->hbuf.virt;
10847 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { 11092 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
10848 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); 11093 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
11094 list_del_init(&seq_dmabuf->hbuf.list);
11095 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
11096 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
11097 lpfc_update_rcv_time_stamp(vport);
10849 return dmabuf; 11098 return dmabuf;
10850 } 11099 }
11100 /* move this sequence to the tail to indicate a young sequence */
11101 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
11102 seq_dmabuf->time_stamp = jiffies;
11103 lpfc_update_rcv_time_stamp(vport);
11104 if (list_empty(&seq_dmabuf->dbuf.list)) {
11105 temp_hdr = dmabuf->hbuf.virt;
11106 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
11107 return seq_dmabuf;
11108 }
10851 /* find the correct place in the sequence to insert this frame */ 11109 /* find the correct place in the sequence to insert this frame */
10852 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 11110 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10853 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11111 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10856,7 +11114,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10856 * If the frame's sequence count is greater than the frame on 11114 * If the frame's sequence count is greater than the frame on
10857 * the list then insert the frame right after this frame 11115 * the list then insert the frame right after this frame
10858 */ 11116 */
10859 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { 11117 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
11118 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
10860 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 11119 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10861 return seq_dmabuf; 11120 return seq_dmabuf;
10862 } 11121 }
@@ -10865,6 +11124,210 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10865} 11124}
10866 11125
10867/** 11126/**
11127 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
11128 * @vport: pointer to a vitural port
11129 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11130 *
11131 * This function tries to abort from the partially assembed sequence, described
11132 * by the information from basic abbort @dmabuf. It checks to see whether such
11133 * partially assembled sequence held by the driver. If so, it shall free up all
11134 * the frames from the partially assembled sequence.
11135 *
11136 * Return
11137 * true -- if there is matching partially assembled sequence present and all
11138 * the frames freed with the sequence;
11139 * false -- if there is no matching partially assembled sequence present so
11140 * nothing got aborted in the lower layer driver
11141 **/
11142static bool
11143lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
11144 struct hbq_dmabuf *dmabuf)
11145{
11146 struct fc_frame_header *new_hdr;
11147 struct fc_frame_header *temp_hdr;
11148 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
11149 struct hbq_dmabuf *seq_dmabuf = NULL;
11150
11151 /* Use the hdr_buf to find the sequence that matches this frame */
11152 INIT_LIST_HEAD(&dmabuf->dbuf.list);
11153 INIT_LIST_HEAD(&dmabuf->hbuf.list);
11154 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11155 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
11156 temp_hdr = (struct fc_frame_header *)h_buf->virt;
11157 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
11158 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
11159 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
11160 continue;
11161 /* found a pending sequence that matches this frame */
11162 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
11163 break;
11164 }
11165
11166 /* Free up all the frames from the partially assembled sequence */
11167 if (seq_dmabuf) {
11168 list_for_each_entry_safe(d_buf, n_buf,
11169 &seq_dmabuf->dbuf.list, list) {
11170 list_del_init(&d_buf->list);
11171 lpfc_in_buf_free(vport->phba, d_buf);
11172 }
11173 return true;
11174 }
11175 return false;
11176}
11177
11178/**
11179 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
11180 * @phba: Pointer to HBA context object.
11181 * @cmd_iocbq: pointer to the command iocbq structure.
11182 * @rsp_iocbq: pointer to the response iocbq structure.
11183 *
11184 * This function handles the sequence abort accept iocb command complete
11185 * event. It properly releases the memory allocated to the sequence abort
11186 * accept iocb.
11187 **/
11188static void
11189lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
11190 struct lpfc_iocbq *cmd_iocbq,
11191 struct lpfc_iocbq *rsp_iocbq)
11192{
11193 if (cmd_iocbq)
11194 lpfc_sli_release_iocbq(phba, cmd_iocbq);
11195}
11196
11197/**
11198 * lpfc_sli4_seq_abort_acc - Accept sequence abort
11199 * @phba: Pointer to HBA context object.
11200 * @fc_hdr: pointer to a FC frame header.
11201 *
11202 * This function sends a basic accept to a previous unsol sequence abort
11203 * event after aborting the sequence handling.
11204 **/
11205static void
11206lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11207 struct fc_frame_header *fc_hdr)
11208{
11209 struct lpfc_iocbq *ctiocb = NULL;
11210 struct lpfc_nodelist *ndlp;
11211 uint16_t oxid, rxid;
11212 uint32_t sid, fctl;
11213 IOCB_t *icmd;
11214
11215 if (!lpfc_is_link_up(phba))
11216 return;
11217
11218 sid = sli4_sid_from_fc_hdr(fc_hdr);
11219 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
11220 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
11221
11222 ndlp = lpfc_findnode_did(phba->pport, sid);
11223 if (!ndlp) {
11224 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
11225 "1268 Find ndlp returned NULL for oxid:x%x "
11226 "SID:x%x\n", oxid, sid);
11227 return;
11228 }
11229
11230 /* Allocate buffer for acc iocb */
11231 ctiocb = lpfc_sli_get_iocbq(phba);
11232 if (!ctiocb)
11233 return;
11234
11235 /* Extract the F_CTL field from FC_HDR */
11236 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
11237
11238 icmd = &ctiocb->iocb;
11239 icmd->un.xseq64.bdl.bdeSize = 0;
11240 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
11241 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11242 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
11243 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
11244
11245 /* Fill in the rest of iocb fields */
11246 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
11247 icmd->ulpBdeCount = 0;
11248 icmd->ulpLe = 1;
11249 icmd->ulpClass = CLASS3;
11250 icmd->ulpContext = ndlp->nlp_rpi;
11251
11252 ctiocb->iocb_cmpl = NULL;
11253 ctiocb->vport = phba->pport;
11254 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
11255
11256 if (fctl & FC_FC_EX_CTX) {
11257 /* ABTS sent by responder to CT exchange, construction
11258 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
11259 * field and RX_ID from ABTS for RX_ID field.
11260 */
11261 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
11262 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
11263 ctiocb->sli4_xritag = oxid;
11264 } else {
11265 /* ABTS sent by initiator to CT exchange, construction
11266 * of BA_ACC will need to allocate a new XRI as for the
11267 * XRI_TAG and RX_ID fields.
11268 */
11269 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
11270 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
11271 ctiocb->sli4_xritag = NO_XRI;
11272 }
11273 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
11274
11275 /* Xmit CT abts accept on exchange <xid> */
11276 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11277 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
11278 CMD_XMIT_BLS_RSP64_CX, phba->link_state);
11279 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
11280}
11281
11282/**
11283 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
11284 * @vport: Pointer to the vport on which this sequence was received
11285 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11286 *
11287 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
11288 * receive sequence is only partially assembed by the driver, it shall abort
11289 * the partially assembled frames for the sequence. Otherwise, if the
11290 * unsolicited receive sequence has been completely assembled and passed to
11291 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
11292 * unsolicited sequence has been aborted. After that, it will issue a basic
11293 * accept to accept the abort.
11294 **/
11295void
11296lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
11297 struct hbq_dmabuf *dmabuf)
11298{
11299 struct lpfc_hba *phba = vport->phba;
11300 struct fc_frame_header fc_hdr;
11301 uint32_t fctl;
11302 bool abts_par;
11303
11304 /* Make a copy of fc_hdr before the dmabuf being released */
11305 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
11306 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
11307
11308 if (fctl & FC_FC_EX_CTX) {
11309 /*
11310 * ABTS sent by responder to exchange, just free the buffer
11311 */
11312 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11313 } else {
11314 /*
11315 * ABTS sent by initiator to exchange, need to do cleanup
11316 */
11317 /* Try to abort partially assembled seq */
11318 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
11319
11320 /* Send abort to ULP if partially seq abort failed */
11321 if (abts_par == false)
11322 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
11323 else
11324 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11325 }
11326 /* Send basic accept (BA_ACC) to the abort requester */
11327 lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
11328}
11329
11330/**
10868 * lpfc_seq_complete - Indicates if a sequence is complete 11331 * lpfc_seq_complete - Indicates if a sequence is complete
10869 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11332 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10870 * 11333 *
@@ -10899,7 +11362,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10899 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11362 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10900 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11363 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10901 /* If there is a hole in the sequence count then fail. */ 11364 /* If there is a hole in the sequence count then fail. */
10902 if (++seq_count != hdr->fh_seq_cnt) 11365 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
10903 return 0; 11366 return 0;
10904 fctl = (hdr->fh_f_ctl[0] << 16 | 11367 fctl = (hdr->fh_f_ctl[0] << 16 |
10905 hdr->fh_f_ctl[1] << 8 | 11368 hdr->fh_f_ctl[1] << 8 |
@@ -10931,14 +11394,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10931 struct lpfc_iocbq *first_iocbq, *iocbq; 11394 struct lpfc_iocbq *first_iocbq, *iocbq;
10932 struct fc_frame_header *fc_hdr; 11395 struct fc_frame_header *fc_hdr;
10933 uint32_t sid; 11396 uint32_t sid;
11397 struct ulp_bde64 *pbde;
10934 11398
10935 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11399 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10936 /* remove from receive buffer list */ 11400 /* remove from receive buffer list */
10937 list_del_init(&seq_dmabuf->hbuf.list); 11401 list_del_init(&seq_dmabuf->hbuf.list);
11402 lpfc_update_rcv_time_stamp(vport);
10938 /* get the Remote Port's SID */ 11403 /* get the Remote Port's SID */
10939 sid = (fc_hdr->fh_s_id[0] << 16 | 11404 sid = sli4_sid_from_fc_hdr(fc_hdr);
10940 fc_hdr->fh_s_id[1] << 8 |
10941 fc_hdr->fh_s_id[2]);
10942 /* Get an iocbq struct to fill in. */ 11405 /* Get an iocbq struct to fill in. */
10943 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 11406 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10944 if (first_iocbq) { 11407 if (first_iocbq) {
@@ -10957,7 +11420,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10957 LPFC_DATA_BUF_SIZE; 11420 LPFC_DATA_BUF_SIZE;
10958 first_iocbq->iocb.un.rcvels.remoteID = sid; 11421 first_iocbq->iocb.un.rcvels.remoteID = sid;
10959 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11422 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10960 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11423 bf_get(lpfc_rcqe_length,
11424 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10961 } 11425 }
10962 iocbq = first_iocbq; 11426 iocbq = first_iocbq;
10963 /* 11427 /*
@@ -10972,10 +11436,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10972 if (!iocbq->context3) { 11436 if (!iocbq->context3) {
10973 iocbq->context3 = d_buf; 11437 iocbq->context3 = d_buf;
10974 iocbq->iocb.ulpBdeCount++; 11438 iocbq->iocb.ulpBdeCount++;
10975 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 11439 pbde = (struct ulp_bde64 *)
10976 LPFC_DATA_BUF_SIZE; 11440 &iocbq->iocb.unsli3.sli3Words[4];
11441 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
10977 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11442 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10978 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11443 bf_get(lpfc_rcqe_length,
11444 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10979 } else { 11445 } else {
10980 iocbq = lpfc_sli_get_iocbq(vport->phba); 11446 iocbq = lpfc_sli_get_iocbq(vport->phba);
10981 if (!iocbq) { 11447 if (!iocbq) {
@@ -10994,7 +11460,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10994 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11460 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10995 LPFC_DATA_BUF_SIZE; 11461 LPFC_DATA_BUF_SIZE;
10996 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11462 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10997 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11463 bf_get(lpfc_rcqe_length,
11464 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10998 iocbq->iocb.un.rcvels.remoteID = sid; 11465 iocbq->iocb.un.rcvels.remoteID = sid;
10999 list_add_tail(&iocbq->list, &first_iocbq->list); 11466 list_add_tail(&iocbq->list, &first_iocbq->list);
11000 } 11467 }
@@ -11002,6 +11469,43 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11002 return first_iocbq; 11469 return first_iocbq;
11003} 11470}
11004 11471
11472static void
11473lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
11474 struct hbq_dmabuf *seq_dmabuf)
11475{
11476 struct fc_frame_header *fc_hdr;
11477 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
11478 struct lpfc_hba *phba = vport->phba;
11479
11480 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11481 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11482 if (!iocbq) {
11483 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11484 "2707 Ring %d handler: Failed to allocate "
11485 "iocb Rctl x%x Type x%x received\n",
11486 LPFC_ELS_RING,
11487 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11488 return;
11489 }
11490 if (!lpfc_complete_unsol_iocb(phba,
11491 &phba->sli.ring[LPFC_ELS_RING],
11492 iocbq, fc_hdr->fh_r_ctl,
11493 fc_hdr->fh_type))
11494 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11495 "2540 Ring %d handler: unexpected Rctl "
11496 "x%x Type x%x received\n",
11497 LPFC_ELS_RING,
11498 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11499
11500 /* Free iocb created in lpfc_prep_seq */
11501 list_for_each_entry_safe(curr_iocb, next_iocb,
11502 &iocbq->list, list) {
11503 list_del_init(&curr_iocb->list);
11504 lpfc_sli_release_iocbq(phba, curr_iocb);
11505 }
11506 lpfc_sli_release_iocbq(phba, iocbq);
11507}
11508
11005/** 11509/**
11006 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 11510 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
11007 * @phba: Pointer to HBA context object. 11511 * @phba: Pointer to HBA context object.
@@ -11014,67 +11518,48 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11014 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 11518 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
11015 * appropriate receive function when the final frame in a sequence is received. 11519 * appropriate receive function when the final frame in a sequence is received.
11016 **/ 11520 **/
11017int 11521void
11018lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) 11522lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11523 struct hbq_dmabuf *dmabuf)
11019{ 11524{
11020 LIST_HEAD(cmplq); 11525 struct hbq_dmabuf *seq_dmabuf;
11021 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
11022 struct fc_frame_header *fc_hdr; 11526 struct fc_frame_header *fc_hdr;
11023 struct lpfc_vport *vport; 11527 struct lpfc_vport *vport;
11024 uint32_t fcfi; 11528 uint32_t fcfi;
11025 struct lpfc_iocbq *iocbq;
11026
11027 /* Clear hba flag and get all received buffers into the cmplq */
11028 spin_lock_irq(&phba->hbalock);
11029 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11030 list_splice_init(&phba->rb_pend_list, &cmplq);
11031 spin_unlock_irq(&phba->hbalock);
11032 11529
11033 /* Process each received buffer */ 11530 /* Process each received buffer */
11034 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { 11531 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11035 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11532 /* check to see if this a valid type of frame */
11036 /* check to see if this a valid type of frame */ 11533 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11037 if (lpfc_fc_frame_check(phba, fc_hdr)) { 11534 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11038 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11535 return;
11039 continue; 11536 }
11040 } 11537 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
11041 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); 11538 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11042 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 11539 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
11043 if (!vport) { 11540 /* throw out the frame */
11044 /* throw out the frame */ 11541 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11045 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11542 return;
11046 continue; 11543 }
11047 } 11544 /* Handle the basic abort sequence (BA_ABTS) event */
11048 /* Link this frame */ 11545 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
11049 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 11546 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
11050 if (!seq_dmabuf) { 11547 return;
11051 /* unable to add frame to vport - throw it out */ 11548 }
11052 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11549
11053 continue; 11550 /* Link this frame */
11054 } 11551 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11055 /* If not last frame in sequence continue processing frames. */ 11552 if (!seq_dmabuf) {
11056 if (!lpfc_seq_complete(seq_dmabuf)) { 11553 /* unable to add frame to vport - throw it out */
11057 /* 11554 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11058 * When saving off frames post a new one and mark this 11555 return;
11059 * frame to be freed when it is finished. 11556 }
11060 **/ 11557 /* If not last frame in sequence continue processing frames. */
11061 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); 11558 if (!lpfc_seq_complete(seq_dmabuf))
11062 dmabuf->tag = -1; 11559 return;
11063 continue; 11560
11064 } 11561 /* Send the complete sequence to the upper layer protocol */
11065 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11562 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
11066 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11067 if (!lpfc_complete_unsol_iocb(phba,
11068 &phba->sli.ring[LPFC_ELS_RING],
11069 iocbq, fc_hdr->fh_r_ctl,
11070 fc_hdr->fh_type))
11071 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11072 "2540 Ring %d handler: unexpected Rctl "
11073 "x%x Type x%x received\n",
11074 LPFC_ELS_RING,
11075 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11076 };
11077 return 0;
11078} 11563}
11079 11564
11080/** 11565/**
@@ -11091,7 +11576,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
11091 * sequential. 11576 * sequential.
11092 * 11577 *
11093 * Return codes 11578 * Return codes
11094 * 0 - sucessful 11579 * 0 - successful
11095 * EIO - The mailbox failed to complete successfully. 11580 * EIO - The mailbox failed to complete successfully.
11096 * When this error occurs, the driver is not guaranteed 11581 * When this error occurs, the driver is not guaranteed
11097 * to have any rpi regions posted to the device and 11582 * to have any rpi regions posted to the device and
@@ -11129,7 +11614,7 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11129 * maps up to 64 rpi context regions. 11614 * maps up to 64 rpi context regions.
11130 * 11615 *
11131 * Return codes 11616 * Return codes
11132 * 0 - sucessful 11617 * 0 - successful
11133 * ENOMEM - No available memory 11618 * ENOMEM - No available memory
11134 * EIO - The mailbox failed to complete successfully. 11619 * EIO - The mailbox failed to complete successfully.
11135 **/ 11620 **/
@@ -11191,7 +11676,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11191 * PAGE_SIZE modulo 64 rpi context headers. 11676 * PAGE_SIZE modulo 64 rpi context headers.
11192 * 11677 *
11193 * Returns 11678 * Returns
11194 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful 11679 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
11195 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 11680 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11196 **/ 11681 **/
11197int 11682int
@@ -11334,6 +11819,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11334{ 11819{
11335 LPFC_MBOXQ_t *mboxq; 11820 LPFC_MBOXQ_t *mboxq;
11336 int rc = 0; 11821 int rc = 0;
11822 int retval = MBX_SUCCESS;
11337 uint32_t mbox_tmo; 11823 uint32_t mbox_tmo;
11338 11824
11339 if (vpi == 0) 11825 if (vpi == 0)
@@ -11344,16 +11830,17 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11344 lpfc_init_vpi(phba, mboxq, vpi); 11830 lpfc_init_vpi(phba, mboxq, vpi);
11345 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 11831 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11832 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11347 if (rc != MBX_TIMEOUT)
11348 mempool_free(mboxq, phba->mbox_mem_pool);
11349 if (rc != MBX_SUCCESS) { 11833 if (rc != MBX_SUCCESS) {
11350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11351 "2022 INIT VPI Mailbox failed " 11835 "2022 INIT VPI Mailbox failed "
11352 "status %d, mbxStatus x%x\n", rc, 11836 "status %d, mbxStatus x%x\n", rc,
11353 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 11837 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11354 rc = -EIO; 11838 retval = -EIO;
11355 } 11839 }
11356 return rc; 11840 if (rc != MBX_TIMEOUT)
11841 mempool_free(mboxq, phba->mbox_mem_pool);
11842
11843 return retval;
11357} 11844}
11358 11845
11359/** 11846/**
@@ -11438,13 +11925,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11438 */ 11925 */
11439 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 11926 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11440 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 11927 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11441 if (unlikely(!mboxq->sge_array)) {
11442 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11443 "2526 Failed to get the non-embedded SGE "
11444 "virtual address\n");
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 return -ENOMEM;
11447 }
11448 virt_addr = mboxq->sge_array->addr[0]; 11928 virt_addr = mboxq->sge_array->addr[0];
11449 /* 11929 /*
11450 * Configure the FCF record for FCFI 0. This is the driver's 11930 * Configure the FCF record for FCFI 0. This is the driver's
@@ -11517,24 +11997,22 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11517} 11997}
11518 11998
11519/** 11999/**
11520 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. 12000 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
11521 * @phba: pointer to lpfc hba data structure. 12001 * @phba: pointer to lpfc hba data structure.
11522 * @fcf_index: FCF table entry offset. 12002 * @fcf_index: FCF table entry offset.
11523 * 12003 *
11524 * This routine is invoked to read up to @fcf_num of FCF record from the 12004 * This routine is invoked to scan the entire FCF table by reading FCF
11525 * device starting with the given @fcf_index. 12005 * record and processing it one at a time starting from the @fcf_index
12006 * for initial FCF discovery or fast FCF failover rediscovery.
12007 *
12008 * Return 0 if the mailbox command is submitted sucessfully, none 0
12009 * otherwise.
11526 **/ 12010 **/
11527int 12011int
11528lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) 12012lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
11529{ 12013{
11530 int rc = 0, error; 12014 int rc = 0, error;
11531 LPFC_MBOXQ_t *mboxq; 12015 LPFC_MBOXQ_t *mboxq;
11532 void *virt_addr;
11533 dma_addr_t phys_addr;
11534 uint8_t *bytep;
11535 struct lpfc_mbx_sge sge;
11536 uint32_t alloc_len, req_len;
11537 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11538 12016
11539 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 12017 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
11540 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12018 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11542,59 +12020,347 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11543 "2000 Failed to allocate mbox for " 12021 "2000 Failed to allocate mbox for "
11544 "READ_FCF cmd\n"); 12022 "READ_FCF cmd\n");
11545 return -ENOMEM; 12023 error = -ENOMEM;
12024 goto fail_fcf_scan;
11546 } 12025 }
12026 /* Construct the read FCF record mailbox command */
12027 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12028 if (rc) {
12029 error = -EINVAL;
12030 goto fail_fcf_scan;
12031 }
12032 /* Issue the mailbox command asynchronously */
12033 mboxq->vport = phba->pport;
12034 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
12035 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12036 if (rc == MBX_NOT_FINISHED)
12037 error = -EIO;
12038 else {
12039 spin_lock_irq(&phba->hbalock);
12040 phba->hba_flag |= FCF_DISC_INPROGRESS;
12041 spin_unlock_irq(&phba->hbalock);
12042 /* Reset FCF round robin index bmask for new scan */
12043 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
12044 memset(phba->fcf.fcf_rr_bmask, 0,
12045 sizeof(*phba->fcf.fcf_rr_bmask));
12046 error = 0;
12047 }
12048fail_fcf_scan:
12049 if (error) {
12050 if (mboxq)
12051 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12052 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
12053 spin_lock_irq(&phba->hbalock);
12054 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
12055 spin_unlock_irq(&phba->hbalock);
12056 }
12057 return error;
12058}
11547 12059
11548 req_len = sizeof(struct fcf_record) + 12060/**
11549 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); 12061 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
12062 * @phba: pointer to lpfc hba data structure.
12063 * @fcf_index: FCF table entry offset.
12064 *
12065 * This routine is invoked to read an FCF record indicated by @fcf_index
12066 * and to use it for FLOGI round robin FCF failover.
12067 *
12068 * Return 0 if the mailbox command is submitted sucessfully, none 0
12069 * otherwise.
12070 **/
12071int
12072lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12073{
12074 int rc = 0, error;
12075 LPFC_MBOXQ_t *mboxq;
11550 12076
11551 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ 12077 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11552 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 12078 if (!mboxq) {
11553 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, 12079 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
11554 LPFC_SLI4_MBX_NEMBED); 12080 "2763 Failed to allocate mbox for "
12081 "READ_FCF cmd\n");
12082 error = -ENOMEM;
12083 goto fail_fcf_read;
12084 }
12085 /* Construct the read FCF record mailbox command */
12086 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12087 if (rc) {
12088 error = -EINVAL;
12089 goto fail_fcf_read;
12090 }
12091 /* Issue the mailbox command asynchronously */
12092 mboxq->vport = phba->pport;
12093 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
12094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12095 if (rc == MBX_NOT_FINISHED)
12096 error = -EIO;
12097 else
12098 error = 0;
11555 12099
11556 if (alloc_len < req_len) { 12100fail_fcf_read:
11557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12101 if (error && mboxq)
11558 "0291 Allocated DMA memory size (x%x) is "
11559 "less than the requested DMA memory "
11560 "size (x%x)\n", alloc_len, req_len);
11561 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12102 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11562 return -ENOMEM; 12103 return error;
12104}
12105
12106/**
12107 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
12108 * @phba: pointer to lpfc hba data structure.
12109 * @fcf_index: FCF table entry offset.
12110 *
12111 * This routine is invoked to read an FCF record indicated by @fcf_index to
12112 * determine whether it's eligible for FLOGI round robin failover list.
12113 *
12114 * Return 0 if the mailbox command is submitted sucessfully, none 0
12115 * otherwise.
12116 **/
12117int
12118lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12119{
12120 int rc = 0, error;
12121 LPFC_MBOXQ_t *mboxq;
12122
12123 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12124 if (!mboxq) {
12125 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12126 "2758 Failed to allocate mbox for "
12127 "READ_FCF cmd\n");
12128 error = -ENOMEM;
12129 goto fail_fcf_read;
12130 }
12131 /* Construct the read FCF record mailbox command */
12132 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12133 if (rc) {
12134 error = -EINVAL;
12135 goto fail_fcf_read;
11563 } 12136 }
12137 /* Issue the mailbox command asynchronously */
12138 mboxq->vport = phba->pport;
12139 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
12140 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12141 if (rc == MBX_NOT_FINISHED)
12142 error = -EIO;
12143 else
12144 error = 0;
11564 12145
11565 /* Get the first SGE entry from the non-embedded DMA memory. This 12146fail_fcf_read:
11566 * routine only uses a single SGE. 12147 if (error && mboxq)
11567 */
11568 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11569 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11570 if (unlikely(!mboxq->sge_array)) {
11571 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11572 "2527 Failed to get the non-embedded SGE "
11573 "virtual address\n");
11574 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12148 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12149 return error;
12150}
12151
12152/**
12153 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
12154 * @phba: pointer to lpfc hba data structure.
12155 *
12156 * This routine is to get the next eligible FCF record index in a round
12157 * robin fashion. If the next eligible FCF record index equals to the
12158 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12159 * shall be returned, otherwise, the next eligible FCF record's index
12160 * shall be returned.
12161 **/
12162uint16_t
12163lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12164{
12165 uint16_t next_fcf_index;
12166
12167 /* Search from the currently registered FCF index */
12168 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12169 LPFC_SLI4_FCF_TBL_INDX_MAX,
12170 phba->fcf.current_rec.fcf_indx);
12171 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12172 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12173 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12174 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12175 /* Round robin failover stop condition */
12176 if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
12177 return LPFC_FCOE_FCF_NEXT_NONE;
12178
12179 return next_fcf_index;
12180}
12181
12182/**
12183 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
12184 * @phba: pointer to lpfc hba data structure.
12185 *
12186 * This routine sets the FCF record index in to the eligible bmask for
12187 * round robin failover search. It checks to make sure that the index
12188 * does not go beyond the range of the driver allocated bmask dimension
12189 * before setting the bit.
12190 *
12191 * Returns 0 if the index bit successfully set, otherwise, it returns
12192 * -EINVAL.
12193 **/
12194int
12195lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12196{
12197 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12198 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12199 "2610 HBA FCF index reached driver's "
12200 "book keeping dimension: fcf_index:%d, "
12201 "driver_bmask_max:%d\n",
12202 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12203 return -EINVAL;
12204 }
12205 /* Set the eligible FCF record index bmask */
12206 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12207
12208 return 0;
12209}
12210
12211/**
12212 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
12213 * @phba: pointer to lpfc hba data structure.
12214 *
12215 * This routine clears the FCF record index from the eligible bmask for
12216 * round robin failover search. It checks to make sure that the index
12217 * does not go beyond the range of the driver allocated bmask dimension
12218 * before clearing the bit.
12219 **/
12220void
12221lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12222{
12223 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12224 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12225 "2762 HBA FCF index goes beyond driver's "
12226 "book keeping dimension: fcf_index:%d, "
12227 "driver_bmask_max:%d\n",
12228 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12229 return;
12230 }
12231 /* Clear the eligible FCF record index bmask */
12232 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12233}
12234
12235/**
12236 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
12237 * @phba: pointer to lpfc hba data structure.
12238 *
12239 * This routine is the completion routine for the rediscover FCF table mailbox
12240 * command. If the mailbox command returned failure, it will try to stop the
12241 * FCF rediscover wait timer.
12242 **/
12243void
12244lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12245{
12246 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12247 uint32_t shdr_status, shdr_add_status;
12248
12249 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
12250
12251 shdr_status = bf_get(lpfc_mbox_hdr_status,
12252 &redisc_fcf->header.cfg_shdr.response);
12253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12254 &redisc_fcf->header.cfg_shdr.response);
12255 if (shdr_status || shdr_add_status) {
12256 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12257 "2746 Requesting for FCF rediscovery failed "
12258 "status x%x add_status x%x\n",
12259 shdr_status, shdr_add_status);
12260 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
12261 spin_lock_irq(&phba->hbalock);
12262 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
12263 spin_unlock_irq(&phba->hbalock);
12264 /*
12265 * CVL event triggered FCF rediscover request failed,
12266 * last resort to re-try current registered FCF entry.
12267 */
12268 lpfc_retry_pport_discovery(phba);
12269 } else {
12270 spin_lock_irq(&phba->hbalock);
12271 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
12272 spin_unlock_irq(&phba->hbalock);
12273 /*
12274 * DEAD FCF event triggered FCF rediscover request
12275 * failed, last resort to fail over as a link down
12276 * to FCF registration.
12277 */
12278 lpfc_sli4_fcf_dead_failthrough(phba);
12279 }
12280 } else {
12281 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12282 "2775 Start FCF rediscovery quiescent period "
12283 "wait timer before scaning FCF table\n");
12284 /*
12285 * Start FCF rediscovery wait timer for pending FCF
12286 * before rescan FCF record table.
12287 */
12288 lpfc_fcf_redisc_wait_start_timer(phba);
12289 }
12290
12291 mempool_free(mbox, phba->mbox_mem_pool);
12292}
12293
12294/**
12295 * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
12296 * @phba: pointer to lpfc hba data structure.
12297 *
12298 * This routine is invoked to request for rediscovery of the entire FCF table
12299 * by the port.
12300 **/
12301int
12302lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12303{
12304 LPFC_MBOXQ_t *mbox;
12305 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12306 int rc, length;
12307
12308 /* Cancel retry delay timers to all vports before FCF rediscover */
12309 lpfc_cancel_all_vport_retry_delay_timer(phba);
12310
12311 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12312 if (!mbox) {
12313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12314 "2745 Failed to allocate mbox for "
12315 "requesting FCF rediscover.\n");
11575 return -ENOMEM; 12316 return -ENOMEM;
11576 } 12317 }
11577 virt_addr = mboxq->sge_array->addr[0];
11578 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11579 12318
11580 /* Set up command fields */ 12319 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
11581 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); 12320 sizeof(struct lpfc_sli4_cfg_mhdr));
11582 /* Perform necessary endian conversion */ 12321 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
11583 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 12322 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
11584 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); 12323 length, LPFC_SLI4_MBX_EMBED);
11585 mboxq->vport = phba->pport; 12324
11586 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 12325 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
11587 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12326 /* Set count to 0 for invalidating the entire FCF database */
12327 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
12328
12329 /* Issue the mailbox command asynchronously */
12330 mbox->vport = phba->pport;
12331 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
12332 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
12333
11588 if (rc == MBX_NOT_FINISHED) { 12334 if (rc == MBX_NOT_FINISHED) {
11589 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12335 mempool_free(mbox, phba->mbox_mem_pool);
11590 error = -EIO; 12336 return -EIO;
11591 } else {
11592 spin_lock_irq(&phba->hbalock);
11593 phba->hba_flag |= FCF_DISC_INPROGRESS;
11594 spin_unlock_irq(&phba->hbalock);
11595 error = 0;
11596 } 12337 }
11597 return error; 12338 return 0;
12339}
12340
12341/**
12342 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
12343 * @phba: pointer to lpfc hba data structure.
12344 *
12345 * This function is the failover routine as a last resort to the FCF DEAD
12346 * event when driver failed to perform fast FCF failover.
12347 **/
12348void
12349lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
12350{
12351 uint32_t link_state;
12352
12353 /*
12354 * Last resort as FCF DEAD event failover will treat this as
12355 * a link down, but save the link state because we don't want
12356 * it to be changed to Link Down unless it is already down.
12357 */
12358 link_state = phba->link_state;
12359 lpfc_linkdown(phba);
12360 phba->link_state = link_state;
12361
12362 /* Unregister FCF if no devices connected to it */
12363 lpfc_unregister_unused_fcf(phba);
11598} 12364}
11599 12365
11600/** 12366/**
@@ -11725,3 +12491,48 @@ out:
11725 kfree(rgn23_data); 12491 kfree(rgn23_data);
11726 return; 12492 return;
11727} 12493}
12494
12495/**
12496 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
12497 * @vport: pointer to vport data structure.
12498 *
12499 * This function iterate through the mailboxq and clean up all REG_LOGIN
12500 * and REG_VPI mailbox commands associated with the vport. This function
12501 * is called when driver want to restart discovery of the vport due to
12502 * a Clear Virtual Link event.
12503 **/
12504void
12505lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12506{
12507 struct lpfc_hba *phba = vport->phba;
12508 LPFC_MBOXQ_t *mb, *nextmb;
12509 struct lpfc_dmabuf *mp;
12510
12511 spin_lock_irq(&phba->hbalock);
12512 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
12513 if (mb->vport != vport)
12514 continue;
12515
12516 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
12517 (mb->u.mb.mbxCommand != MBX_REG_VPI))
12518 continue;
12519
12520 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12521 mp = (struct lpfc_dmabuf *) (mb->context1);
12522 if (mp) {
12523 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
12524 kfree(mp);
12525 }
12526 }
12527 list_del(&mb->list);
12528 mempool_free(mb, phba->mbox_mem_pool);
12529 }
12530 mb = phba->sli.mbox_active;
12531 if (mb && (mb->vport == vport)) {
12532 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
12533 (mb->u.mb.mbxCommand == MBX_REG_VPI))
12534 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12535 }
12536 spin_unlock_irq(&phba->hbalock);
12537}
12538
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 3c53316cf6d0..b4a639c47616 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */ 32struct lpfc_cq_event {
33struct lpfc_sli4_rspiocb_info { 33 struct list_head list;
34 uint8_t hw_status; 34 union {
35 uint8_t bfield; 35 struct lpfc_mcqe mcqe_cmpl;
36#define LPFC_XB 0x1 36 struct lpfc_acqe_link acqe_link;
37#define LPFC_PV 0x2 37 struct lpfc_acqe_fcoe acqe_fcoe;
38 uint8_t priority; 38 struct lpfc_acqe_dcbx acqe_dcbx;
39 uint8_t reserved; 39 struct lpfc_rcqe rcqe_cmpl;
40 struct sli4_wcqe_xri_aborted wcqe_axri;
41 struct lpfc_wcqe_complete wcqe_cmpl;
42 } cqe;
40}; 43};
41 44
42/* This structure is used to handle IOCB requests / responses */ 45/* This structure is used to handle IOCB requests / responses */
@@ -46,21 +49,27 @@ struct lpfc_iocbq {
46 struct list_head clist; 49 struct list_head clist;
47 uint16_t iotag; /* pre-assigned IO tag */ 50 uint16_t iotag; /* pre-assigned IO tag */
48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 51 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
52 struct lpfc_cq_event cq_event;
49 53
50 IOCB_t iocb; /* IOCB cmd */ 54 IOCB_t iocb; /* IOCB cmd */
51 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 55 uint8_t retry; /* retry counter for IOCB cmd - if needed */
52 uint8_t iocb_flag; 56 uint16_t iocb_flag;
53#define LPFC_IO_LIBDFC 1 /* libdfc iocb */ 57#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
54#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ 58#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
55#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 59#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 60#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 61#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
59#define LPFC_FIP_ELS 0x40 63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
65#define DSS_SECURITY_OP 0x100 /* security IO */
66
67#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
68#define LPFC_FIP_ELS_ID_SHIFT 14
60 69
61 uint8_t abort_count;
62 uint8_t rsvd2; 70 uint8_t rsvd2;
63 uint32_t drvrTimeout; /* driver timeout in seconds */ 71 uint32_t drvrTimeout; /* driver timeout in seconds */
72 uint32_t fcp_wqidx; /* index to FCP work queue */
64 struct lpfc_vport *vport;/* virtual port pointer */ 73 struct lpfc_vport *vport;/* virtual port pointer */
65 void *context1; /* caller context information */ 74 void *context1; /* caller context information */
66 void *context2; /* caller context information */ 75 void *context2; /* caller context information */
@@ -76,7 +85,6 @@ struct lpfc_iocbq {
76 struct lpfc_iocbq *); 85 struct lpfc_iocbq *);
77 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 86 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
78 struct lpfc_iocbq *); 87 struct lpfc_iocbq *);
79 struct lpfc_sli4_rspiocb_info sli4_info;
80}; 88};
81 89
82#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 90#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -110,7 +118,7 @@ typedef struct lpfcMboxq {
110 return */ 118 return */
111#define MBX_NOWAIT 2 /* issue command then return immediately */ 119#define MBX_NOWAIT 2 /* issue command then return immediately */
112 120
113#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per 121#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per
114 ring */ 122 ring */
115#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ 123#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
116 124
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index b5f4ba1a5c27..4a35e7b9bc5b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -22,13 +22,17 @@
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32 23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10 24#define LPFC_RPI_LOW_WATER_MARK 10
25
26/* Amount of time in seconds for waiting FCF rediscovery to complete */
27#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
28
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ 29/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254 30#define LPFC_NEMBED_MBOX_SGL_CNT 254
27 31
28/* Multi-queue arrangement for fast-path FCP work queues */ 32/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8 33#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1 34#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1 35#define LPFC_FP_EQN_DEF 4
32#define LPFC_FP_EQN_MIN 1 36#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) 37#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34 38
@@ -58,6 +62,16 @@
58#define LPFC_FCOE_FKA_ADV_PER 0 62#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80 63#define LPFC_FCOE_FIP_PRIORITY 0x80
60 64
65#define sli4_sid_from_fc_hdr(fc_hdr) \
66 ((fc_hdr)->fh_s_id[0] << 16 | \
67 (fc_hdr)->fh_s_id[1] << 8 | \
68 (fc_hdr)->fh_s_id[2])
69
70#define sli4_fctl_from_fc_hdr(fc_hdr) \
71 ((fc_hdr)->fh_f_ctl[0] << 16 | \
72 (fc_hdr)->fh_f_ctl[1] << 8 | \
73 (fc_hdr)->fh_f_ctl[2])
74
61enum lpfc_sli4_queue_type { 75enum lpfc_sli4_queue_type {
62 LPFC_EQ, 76 LPFC_EQ,
63 LPFC_GCQ, 77 LPFC_GCQ,
@@ -110,44 +124,56 @@ struct lpfc_queue {
110 union sli4_qe qe[1]; /* array to index entries (must be last) */ 124 union sli4_qe qe[1]; /* array to index entries (must be last) */
111}; 125};
112 126
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link { 127struct lpfc_sli4_link {
126 uint8_t speed; 128 uint8_t speed;
127 uint8_t duplex; 129 uint8_t duplex;
128 uint8_t status; 130 uint8_t status;
129 uint8_t physical; 131 uint8_t physical;
130 uint8_t fault; 132 uint8_t fault;
133 uint16_t logical_speed;
131}; 134};
132 135
133struct lpfc_fcf { 136struct lpfc_fcf_rec {
134 uint8_t fabric_name[8]; 137 uint8_t fabric_name[8];
135 uint8_t switch_name[8]; 138 uint8_t switch_name[8];
136 uint8_t mac_addr[6]; 139 uint8_t mac_addr[6];
137 uint16_t fcf_indx; 140 uint16_t fcf_indx;
141 uint32_t priority;
142 uint16_t vlan_id;
143 uint32_t addr_mode;
144 uint32_t flag;
145#define BOOT_ENABLE 0x01
146#define RECORD_VALID 0x02
147};
148
149struct lpfc_fcf {
138 uint16_t fcfi; 150 uint16_t fcfi;
139 uint32_t fcf_flag; 151 uint32_t fcf_flag;
140#define FCF_AVAILABLE 0x01 /* FCF available for discovery */ 152#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
141#define FCF_REGISTERED 0x02 /* FCF registered with FW */ 153#define FCF_REGISTERED 0x02 /* FCF registered with FW */
142#define FCF_DISCOVERED 0x04 /* FCF discovery started */ 154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
143#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ 155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
144#define FCF_IN_USE 0x10 /* Atleast one discovery completed */ 156#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
145#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ 157#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
146 uint32_t priority; 158#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
159#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
160#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
161#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
147 uint32_t addr_mode; 163 uint32_t addr_mode;
148 uint16_t vlan_id; 164 uint16_t fcf_rr_init_indx;
165 struct lpfc_fcf_rec current_rec;
166 struct lpfc_fcf_rec failover_rec;
167 struct timer_list redisc_wait;
168 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
149}; 169};
150 170
171/*
172 * Maximum FCF table index, it is for driver internal book keeping, it
173 * just needs to be no less than the supported HBA's FCF table size.
174 */
175#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
176
151#define LPFC_REGION23_SIGNATURE "RG23" 177#define LPFC_REGION23_SIGNATURE "RG23"
152#define LPFC_REGION23_VERSION 1 178#define LPFC_REGION23_VERSION 1
153#define LPFC_REGION23_LAST_REC 0xff 179#define LPFC_REGION23_LAST_REC 0xff
@@ -166,7 +192,7 @@ struct lpfc_fip_param_hdr {
166#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 192#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
167#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 193#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
168#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags 194#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
169#define FIPP_MODE_ON 0x2 195#define FIPP_MODE_ON 0x1
170#define FIPP_MODE_OFF 0x0 196#define FIPP_MODE_OFF 0x0
171#define FIPP_VLAN_VALID 0x1 197#define FIPP_VLAN_VALID 0x1
172}; 198};
@@ -250,7 +276,10 @@ struct lpfc_bmbx {
250#define SLI4_CT_VFI 2 276#define SLI4_CT_VFI 2
251#define SLI4_CT_FCFI 3 277#define SLI4_CT_FCFI 3
252 278
253#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 279#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
280#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
281#define LPFC_SLI4_MIN_BUF_SIZE 0x400
282#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
254 283
255/* 284/*
256 * SLI4 specific data structures 285 * SLI4 specific data structures
@@ -284,6 +313,42 @@ struct lpfc_fcp_eq_hdl {
284 struct lpfc_hba *phba; 313 struct lpfc_hba *phba;
285}; 314};
286 315
316/* Port Capabilities for SLI4 Parameters */
317struct lpfc_pc_sli4_params {
318 uint32_t supported;
319 uint32_t if_type;
320 uint32_t sli_rev;
321 uint32_t sli_family;
322 uint32_t featurelevel_1;
323 uint32_t featurelevel_2;
324 uint32_t proto_types;
325#define LPFC_SLI4_PROTO_FCOE 0x0000001
326#define LPFC_SLI4_PROTO_FC 0x0000002
327#define LPFC_SLI4_PROTO_NIC 0x0000004
328#define LPFC_SLI4_PROTO_ISCSI 0x0000008
329#define LPFC_SLI4_PROTO_RDMA 0x0000010
330 uint32_t sge_supp_len;
331 uint32_t if_page_sz;
332 uint32_t rq_db_window;
333 uint32_t loopbk_scope;
334 uint32_t eq_pages_max;
335 uint32_t eqe_size;
336 uint32_t cq_pages_max;
337 uint32_t cqe_size;
338 uint32_t mq_pages_max;
339 uint32_t mqe_size;
340 uint32_t mq_elem_cnt;
341 uint32_t wq_pages_max;
342 uint32_t wqe_size;
343 uint32_t rq_pages_max;
344 uint32_t rqe_size;
345 uint32_t hdr_pages_max;
346 uint32_t hdr_size;
347 uint32_t hdr_pp_align;
348 uint32_t sgl_pages_max;
349 uint32_t sgl_pp_align;
350};
351
287/* SLI4 HBA data structure entries */ 352/* SLI4 HBA data structure entries */
288struct lpfc_sli4_hba { 353struct lpfc_sli4_hba {
289 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 354 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -295,10 +360,9 @@ struct lpfc_sli4_hba {
295 /* BAR0 PCI config space register memory map */ 360 /* BAR0 PCI config space register memory map */
296 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ 361 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
297 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ 362 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
298 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ 363 void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
299 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ 364 void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
300#define LPFC_ONLINE_NERR 0xFFFFFFFF 365 void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */
301 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
302 /* BAR1 FCoE function CSR register memory map */ 366 /* BAR1 FCoE function CSR register memory map */
303 void __iomem *STAregaddr; /* Address to HST_STATE register */ 367 void __iomem *STAregaddr; /* Address to HST_STATE register */
304 void __iomem *ISRregaddr; /* Address to HST_ISR register */ 368 void __iomem *ISRregaddr; /* Address to HST_ISR register */
@@ -311,6 +375,10 @@ struct lpfc_sli4_hba {
311 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ 375 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
312 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ 376 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
313 377
378 uint32_t ue_mask_lo;
379 uint32_t ue_mask_hi;
380 struct lpfc_register sli_intf;
381 struct lpfc_pc_sli4_params pc_sli4_params;
314 struct msix_entry *msix_entries; 382 struct msix_entry *msix_entries;
315 uint32_t cfg_eqn; 383 uint32_t cfg_eqn;
316 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 384 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
@@ -325,7 +393,6 @@ struct lpfc_sli4_hba {
325 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ 393 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
326 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ 394 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
327 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ 395 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
328 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
329 396
330 /* Setup information for various queue parameters */ 397 /* Setup information for various queue parameters */
331 int eq_esize; 398 int eq_esize;
@@ -360,7 +427,7 @@ struct lpfc_sli4_hba {
360 unsigned long *rpi_bmask; 427 unsigned long *rpi_bmask;
361 uint16_t rpi_count; 428 uint16_t rpi_count;
362 struct lpfc_sli4_flags sli4_flags; 429 struct lpfc_sli4_flags sli4_flags;
363 struct list_head sp_rspiocb_work_queue; 430 struct list_head sp_queue_event;
364 struct list_head sp_cqe_event_pool; 431 struct list_head sp_cqe_event_pool;
365 struct list_head sp_asynce_work_queue; 432 struct list_head sp_asynce_work_queue;
366 struct list_head sp_fcp_xri_aborted_work_queue; 433 struct list_head sp_fcp_xri_aborted_work_queue;
@@ -376,11 +443,18 @@ enum lpfc_sge_type {
376 SCSI_BUFF_TYPE 443 SCSI_BUFF_TYPE
377}; 444};
378 445
446enum lpfc_sgl_state {
447 SGL_FREED,
448 SGL_ALLOCATED,
449 SGL_XRI_ABORTED
450};
451
379struct lpfc_sglq { 452struct lpfc_sglq {
380 /* lpfc_sglqs are used in double linked lists */ 453 /* lpfc_sglqs are used in double linked lists */
381 struct list_head list; 454 struct list_head list;
382 struct list_head clist; 455 struct list_head clist;
383 enum lpfc_sge_type buff_type; /* is this a scsi sgl */ 456 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
457 enum lpfc_sgl_state state;
384 uint16_t iotag; /* pre-assigned IO tag */ 458 uint16_t iotag; /* pre-assigned IO tag */
385 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 459 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
386 struct sli4_sge *sgl; /* pre-assigned SGL */ 460 struct sli4_sge *sgl; /* pre-assigned SGL */
@@ -408,6 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
408void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); 482void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
409void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, 483void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
410 struct lpfc_mbx_sge *); 484 struct lpfc_mbx_sge *);
485int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
486 uint16_t);
411 487
412void lpfc_sli4_hba_reset(struct lpfc_hba *); 488void lpfc_sli4_hba_reset(struct lpfc_hba *);
413struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 489struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -450,6 +526,7 @@ int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
450void lpfc_sli4_free_rpi(struct lpfc_hba *, int); 526void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
451void lpfc_sli4_remove_rpis(struct lpfc_hba *); 527void lpfc_sli4_remove_rpis(struct lpfc_hba *);
452void lpfc_sli4_async_event_proc(struct lpfc_hba *); 528void lpfc_sli4_async_event_proc(struct lpfc_hba *);
529void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
453int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); 530int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
454void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); 531void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
455void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); 532void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
@@ -465,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
465uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); 542uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
466uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); 543uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
467void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); 544void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
468int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); 545int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
469void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); 546int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
547int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
548void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
549void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
550void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
551int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
470int lpfc_sli4_post_status_check(struct lpfc_hba *); 552int lpfc_sli4_post_status_check(struct lpfc_hba *);
471uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); 553uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
472 554
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9ae20af4bdb7..013deec5dae8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,8 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.4" 21#define LPFC_DRIVER_VERSION "8.3.10"
22
23#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
25#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 606efa767548..ffd575c379f3 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/slab.h>
29#include <linux/spinlock.h> 30#include <linux/spinlock.h>
30 31
31#include <scsi/scsi.h> 32#include <scsi/scsi.h>
@@ -123,7 +124,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
123 } 124 }
124 mb = &pmb->u.mb; 125 mb = &pmb->u.mb;
125 126
126 lpfc_read_sparam(phba, pmb, vport->vpi); 127 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
128 if (rc) {
129 mempool_free(pmb, phba->mbox_mem_pool);
130 return -ENOMEM;
131 }
132
127 /* 133 /*
128 * Grab buffer pointer and clear context1 so we can use 134 * Grab buffer pointer and clear context1 so we can use
129 * lpfc_sli_issue_box_wait 135 * lpfc_sli_issue_box_wait
@@ -389,7 +395,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
389 * by the port. 395 * by the port.
390 */ 396 */
391 if ((phba->sli_rev == LPFC_SLI_REV4) && 397 if ((phba->sli_rev == LPFC_SLI_REV4) &&
392 (pport->vfi_state & LPFC_VFI_REGISTERED)) { 398 (pport->fc_flag & FC_VFI_REGISTERED)) {
393 rc = lpfc_sli4_init_vpi(phba, vpi); 399 rc = lpfc_sli4_init_vpi(phba, vpi);
394 if (rc) { 400 if (rc) {
395 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 401 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -505,6 +511,7 @@ enable_vport(struct fc_vport *fc_vport)
505 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 511 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
506 struct lpfc_hba *phba = vport->phba; 512 struct lpfc_hba *phba = vport->phba;
507 struct lpfc_nodelist *ndlp = NULL; 513 struct lpfc_nodelist *ndlp = NULL;
514 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
508 515
509 if ((phba->link_state < LPFC_LINK_UP) || 516 if ((phba->link_state < LPFC_LINK_UP) ||
510 (phba->fc_topology == TOPOLOGY_LOOP)) { 517 (phba->fc_topology == TOPOLOGY_LOOP)) {
@@ -512,8 +519,10 @@ enable_vport(struct fc_vport *fc_vport)
512 return VPORT_OK; 519 return VPORT_OK;
513 } 520 }
514 521
522 spin_lock_irq(shost->host_lock);
515 vport->load_flag |= FC_LOADING; 523 vport->load_flag |= FC_LOADING;
516 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 524 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
525 spin_unlock_irq(shost->host_lock);
517 526
518 /* Use the Physical nodes Fabric NDLP to determine if the link is 527 /* Use the Physical nodes Fabric NDLP to determine if the link is
519 * up and ready to FDISC. 528 * up and ready to FDISC.
@@ -700,6 +709,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
700 } 709 }
701 spin_unlock_irq(&phba->ndlp_lock); 710 spin_unlock_irq(&phba->ndlp_lock);
702 } 711 }
712 if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
713 goto skip_logo;
703 vport->unreg_vpi_cmpl = VPORT_INVAL; 714 vport->unreg_vpi_cmpl = VPORT_INVAL;
704 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 715 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
705 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 716 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index c24e86f07804..3893337e3dd3 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -19,10 +19,10 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/nubus.h> 21#include <linux/nubus.h>
22#include <linux/slab.h>
22 23
23#include <asm/irq.h> 24#include <asm/irq.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25
26#include <asm/macints.h> 26#include <asm/macints.h>
27#include <asm/macintosh.h> 27#include <asm/macintosh.h>
28 28
@@ -53,7 +53,6 @@ struct mac_esp_priv {
53 void __iomem *pdma_io; 53 void __iomem *pdma_io;
54 int error; 54 int error;
55}; 55};
56static struct platform_device *internal_pdev, *external_pdev;
57static struct esp *esp_chips[2]; 56static struct esp *esp_chips[2];
58 57
59#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ 58#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
@@ -279,24 +278,27 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
279 * Programmed IO routines follow. 278 * Programmed IO routines follow.
280 */ 279 */
281 280
282static inline int mac_esp_wait_for_fifo(struct esp *esp) 281static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
283{ 282{
284 int i = 500000; 283 int i = 500000;
285 284
286 do { 285 do {
287 if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) 286 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
288 return 0; 287
288 if (fbytes)
289 return fbytes;
289 290
290 udelay(2); 291 udelay(2);
291 } while (--i); 292 } while (--i);
292 293
293 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n", 294 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
294 esp_read8(ESP_STATUS)); 295 esp_read8(ESP_STATUS));
295 return 1; 296 return 0;
296} 297}
297 298
298static inline int mac_esp_wait_for_intr(struct esp *esp) 299static inline int mac_esp_wait_for_intr(struct esp *esp)
299{ 300{
301 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
300 int i = 500000; 302 int i = 500000;
301 303
302 do { 304 do {
@@ -308,6 +310,7 @@ static inline int mac_esp_wait_for_intr(struct esp *esp)
308 } while (--i); 310 } while (--i);
309 311
310 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg); 312 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
313 mep->error = 1;
311 return 1; 314 return 1;
312} 315}
313 316
@@ -347,11 +350,10 @@ static inline int mac_esp_wait_for_intr(struct esp *esp)
347static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, 350static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
348 u32 dma_count, int write, u8 cmd) 351 u32 dma_count, int write, u8 cmd)
349{ 352{
350 unsigned long flags;
351 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 353 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
352 u8 *fifo = esp->regs + ESP_FDATA * 16; 354 u8 *fifo = esp->regs + ESP_FDATA * 16;
353 355
354 local_irq_save(flags); 356 disable_irq(esp->host->irq);
355 357
356 cmd &= ~ESP_CMD_DMA; 358 cmd &= ~ESP_CMD_DMA;
357 mep->error = 0; 359 mep->error = 0;
@@ -359,11 +361,35 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
359 if (write) { 361 if (write) {
360 scsi_esp_cmd(esp, cmd); 362 scsi_esp_cmd(esp, cmd);
361 363
362 if (!mac_esp_wait_for_intr(esp)) { 364 while (1) {
363 if (mac_esp_wait_for_fifo(esp)) 365 unsigned int n;
364 esp_count = 0; 366
365 } else { 367 n = mac_esp_wait_for_fifo(esp);
366 esp_count = 0; 368 if (!n)
369 break;
370
371 if (n > esp_count)
372 n = esp_count;
373 esp_count -= n;
374
375 MAC_ESP_PIO_LOOP("%2@,%0@+", n);
376
377 if (!esp_count)
378 break;
379
380 if (mac_esp_wait_for_intr(esp))
381 break;
382
383 if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
384 ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
385 break;
386
387 esp->ireg = esp_read8(ESP_INTRPT);
388 if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
389 ESP_INTR_BSERV)
390 break;
391
392 scsi_esp_cmd(esp, ESP_CMD_TI);
367 } 393 }
368 } else { 394 } else {
369 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 395 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -374,47 +400,24 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
374 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count); 400 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
375 401
376 scsi_esp_cmd(esp, cmd); 402 scsi_esp_cmd(esp, cmd);
377 }
378
379 while (esp_count) {
380 unsigned int n;
381 403
382 if (mac_esp_wait_for_intr(esp)) { 404 while (esp_count) {
383 mep->error = 1; 405 unsigned int n;
384 break;
385 }
386
387 if (esp->sreg & ESP_STAT_SPAM) {
388 printk(KERN_ERR PFX "gross error\n");
389 mep->error = 1;
390 break;
391 }
392
393 n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
394
395 if (write) {
396 if (n > esp_count)
397 n = esp_count;
398 esp_count -= n;
399
400 MAC_ESP_PIO_LOOP("%2@,%0@+", n);
401 406
402 if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP) 407 if (mac_esp_wait_for_intr(esp))
403 break; 408 break;
404 409
405 if (esp_count) { 410 if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
406 esp->ireg = esp_read8(ESP_INTRPT); 411 ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
407 if (esp->ireg & ESP_INTR_DC) 412 break;
408 break;
409 413
410 scsi_esp_cmd(esp, ESP_CMD_TI);
411 }
412 } else {
413 esp->ireg = esp_read8(ESP_INTRPT); 414 esp->ireg = esp_read8(ESP_INTRPT);
414 if (esp->ireg & ESP_INTR_DC) 415 if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
416 ESP_INTR_BSERV)
415 break; 417 break;
416 418
417 n = MAC_ESP_FIFO_SIZE - n; 419 n = MAC_ESP_FIFO_SIZE -
420 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
418 if (n > esp_count) 421 if (n > esp_count)
419 n = esp_count; 422 n = esp_count;
420 423
@@ -429,7 +432,7 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
429 } 432 }
430 } 433 }
431 434
432 local_irq_restore(flags); 435 enable_irq(esp->host->irq);
433} 436}
434 437
435static int mac_esp_irq_pending(struct esp *esp) 438static int mac_esp_irq_pending(struct esp *esp)
@@ -492,29 +495,12 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
492 struct Scsi_Host *host; 495 struct Scsi_Host *host;
493 struct esp *esp; 496 struct esp *esp;
494 int err; 497 int err;
495 int chips_present;
496 struct mac_esp_priv *mep; 498 struct mac_esp_priv *mep;
497 499
498 if (!MACH_IS_MAC) 500 if (!MACH_IS_MAC)
499 return -ENODEV; 501 return -ENODEV;
500 502
501 switch (macintosh_config->scsi_type) { 503 if (dev->id > 1)
502 case MAC_SCSI_QUADRA:
503 case MAC_SCSI_QUADRA3:
504 chips_present = 1;
505 break;
506 case MAC_SCSI_QUADRA2:
507 if ((macintosh_config->ident == MAC_MODEL_Q900) ||
508 (macintosh_config->ident == MAC_MODEL_Q950))
509 chips_present = 2;
510 else
511 chips_present = 1;
512 break;
513 default:
514 chips_present = 0;
515 }
516
517 if (dev->id + 1 > chips_present)
518 return -ENODEV; 504 return -ENODEV;
519 505
520 host = scsi_host_alloc(tpnt, sizeof(struct esp)); 506 host = scsi_host_alloc(tpnt, sizeof(struct esp));
@@ -639,55 +625,26 @@ static struct platform_driver esp_mac_driver = {
639 .probe = esp_mac_probe, 625 .probe = esp_mac_probe,
640 .remove = __devexit_p(esp_mac_remove), 626 .remove = __devexit_p(esp_mac_remove),
641 .driver = { 627 .driver = {
642 .name = DRV_MODULE_NAME, 628 .name = DRV_MODULE_NAME,
629 .owner = THIS_MODULE,
643 }, 630 },
644}; 631};
645 632
646static int __init mac_esp_init(void) 633static int __init mac_esp_init(void)
647{ 634{
648 int err; 635 return platform_driver_register(&esp_mac_driver);
649
650 err = platform_driver_register(&esp_mac_driver);
651 if (err)
652 return err;
653
654 internal_pdev = platform_device_alloc(DRV_MODULE_NAME, 0);
655 if (internal_pdev && platform_device_add(internal_pdev)) {
656 platform_device_put(internal_pdev);
657 internal_pdev = NULL;
658 }
659 external_pdev = platform_device_alloc(DRV_MODULE_NAME, 1);
660 if (external_pdev && platform_device_add(external_pdev)) {
661 platform_device_put(external_pdev);
662 external_pdev = NULL;
663 }
664
665 if (internal_pdev || external_pdev) {
666 return 0;
667 } else {
668 platform_driver_unregister(&esp_mac_driver);
669 return -ENOMEM;
670 }
671} 636}
672 637
673static void __exit mac_esp_exit(void) 638static void __exit mac_esp_exit(void)
674{ 639{
675 platform_driver_unregister(&esp_mac_driver); 640 platform_driver_unregister(&esp_mac_driver);
676
677 if (internal_pdev) {
678 platform_device_unregister(internal_pdev);
679 internal_pdev = NULL;
680 }
681 if (external_pdev) {
682 platform_device_unregister(external_pdev);
683 external_pdev = NULL;
684 }
685} 641}
686 642
687MODULE_DESCRIPTION("Mac ESP SCSI driver"); 643MODULE_DESCRIPTION("Mac ESP SCSI driver");
688MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>"); 644MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
689MODULE_LICENSE("GPL v2"); 645MODULE_LICENSE("GPL v2");
690MODULE_VERSION(DRV_VERSION); 646MODULE_VERSION(DRV_VERSION);
647MODULE_ALIAS("platform:" DRV_MODULE_NAME);
691 648
692module_init(mac_esp_init); 649module_init(mac_esp_init);
693module_exit(mac_esp_exit); 650module_exit(mac_esp_exit);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 49eb0612d5af..4bf7edca9e69 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -47,6 +47,7 @@
47#include <linux/init.h> 47#include <linux/init.h>
48#include <linux/dma-mapping.h> 48#include <linux/dma-mapping.h>
49#include <linux/smp_lock.h> 49#include <linux/smp_lock.h>
50#include <linux/slab.h>
50#include <scsi/scsicam.h> 51#include <scsi/scsicam.h>
51 52
52#include "scsi.h" 53#include "scsi.h"
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 512c2cc1a33f..d310f49d077e 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -381,7 +381,7 @@ typedef struct {
381 u8 battery_status; /* 381 u8 battery_status; /*
382 * BIT 0: battery module missing 382 * BIT 0: battery module missing
383 * BIT 1: VBAD 383 * BIT 1: VBAD
384 * BIT 2: temprature high 384 * BIT 2: temperature high
385 * BIT 3: battery pack missing 385 * BIT 3: battery pack missing
386 * BIT 4,5: 386 * BIT 4,5:
387 * 00 - charge complete 387 * 00 - charge complete
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index b25b74764ec3..ce2487a888ed 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -497,7 +497,7 @@ typedef struct {
497 * @inserted_drive : channel:Id of inserted drive 497 * @inserted_drive : channel:Id of inserted drive
498 * @battery_status : bit 0: battery module missing 498 * @battery_status : bit 0: battery module missing
499 * bit 1: VBAD 499 * bit 1: VBAD
500 * bit 2: temprature high 500 * bit 2: temperature high
501 * bit 3: battery pack missing 501 * bit 3: battery pack missing
502 * bit 4,5: 502 * bit 4,5:
503 * 00 - charge complete 503 * 00 - charge complete
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 234f0b7eb21c..a7810a106b37 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -70,6 +70,7 @@
70 * For history of changes, see Documentation/ChangeLog.megaraid 70 * For history of changes, see Documentation/ChangeLog.megaraid
71 */ 71 */
72 72
73#include <linux/slab.h>
73#include "megaraid_mbox.h" 74#include "megaraid_mbox.h"
74 75
75static int megaraid_init(void); 76static int megaraid_init(void);
@@ -335,12 +336,17 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
335 * megaraid_change_queue_depth - Change the device's queue depth 336 * megaraid_change_queue_depth - Change the device's queue depth
336 * @sdev: scsi device struct 337 * @sdev: scsi device struct
337 * @qdepth: depth to set 338 * @qdepth: depth to set
339 * @reason: calling context
338 * 340 *
339 * Return value: 341 * Return value:
340 * actual depth set 342 * actual depth set
341 */ 343 */
342static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth) 344static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
345 int reason)
343{ 346{
347 if (reason != SCSI_QDEPTH_DEFAULT)
348 return -EOPNOTSUPP;
349
344 if (qdepth > MBOX_MAX_SCSI_CMDS) 350 if (qdepth > MBOX_MAX_SCSI_CMDS)
345 qdepth = MBOX_MAX_SCSI_CMDS; 351 qdepth = MBOX_MAX_SCSI_CMDS;
346 scsi_adjust_queue_depth(sdev, 0, qdepth); 352 scsi_adjust_queue_depth(sdev, 0, qdepth);
@@ -2704,7 +2710,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2704 } 2710 }
2705 else { 2711 else {
2706 con_log(CL_ANN, (KERN_NOTICE 2712 con_log(CL_ANN, (KERN_NOTICE
2707 "megaraid mbox: reset sequence completed sucessfully\n")); 2713 "megaraid mbox: reset sequence completed successfully\n"));
2708 } 2714 }
2709 2715
2710 2716
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index f680561d2c6f..36e0b7d05c1d 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -15,6 +15,7 @@
15 * Common management module 15 * Common management module
16 */ 16 */
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/slab.h>
18#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
19#include "megaraid_mm.h" 20#include "megaraid_mm.h"
20 21
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index a39addc3a596..99e4478c3f3e 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.04.01-rc1 13 * Version : v00.00.04.17.1-rc1
14 * 14 *
15 * Authors: 15 * Authors:
16 * (email-id : megaraidlinux@lsi.com) 16 * (email-id : megaraidlinux@lsi.com)
@@ -35,11 +35,13 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
37#include <linux/uio.h> 37#include <linux/uio.h>
38#include <linux/slab.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <linux/fs.h> 40#include <linux/fs.h>
40#include <linux/compat.h> 41#include <linux/compat.h>
41#include <linux/blkdev.h> 42#include <linux/blkdev.h>
42#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/poll.h>
43 45
44#include <scsi/scsi.h> 46#include <scsi/scsi.h>
45#include <scsi/scsi_cmnd.h> 47#include <scsi/scsi_cmnd.h>
@@ -75,6 +77,10 @@ static struct pci_device_id megasas_pci_table[] = {
75 /* gen2*/ 77 /* gen2*/
76 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 78 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
77 /* gen2*/ 79 /* gen2*/
80 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
81 /* skinny*/
82 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
83 /* skinny*/
78 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 84 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
79 /* xscale IOP, vega */ 85 /* xscale IOP, vega */
80 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 86 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
@@ -89,8 +95,14 @@ static struct megasas_mgmt_info megasas_mgmt_info;
89static struct fasync_struct *megasas_async_queue; 95static struct fasync_struct *megasas_async_queue;
90static DEFINE_MUTEX(megasas_async_queue_mutex); 96static DEFINE_MUTEX(megasas_async_queue_mutex);
91 97
98static int megasas_poll_wait_aen;
99static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
100static u32 support_poll_for_event;
92static u32 megasas_dbg_lvl; 101static u32 megasas_dbg_lvl;
93 102
103/* define lock for aen poll */
104spinlock_t poll_aen_lock;
105
94static void 106static void
95megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 107megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
96 u8 alt_status); 108 u8 alt_status);
@@ -215,7 +227,10 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
215 * @regs : MFI register set 227 * @regs : MFI register set
216 */ 228 */
217static inline void 229static inline void
218megasas_fire_cmd_xscale(dma_addr_t frame_phys_addr,u32 frame_count, struct megasas_register_set __iomem *regs) 230megasas_fire_cmd_xscale(struct megasas_instance *instance,
231 dma_addr_t frame_phys_addr,
232 u32 frame_count,
233 struct megasas_register_set __iomem *regs)
219{ 234{
220 writel((frame_phys_addr >> 3)|(frame_count), 235 writel((frame_phys_addr >> 3)|(frame_count),
221 &(regs)->inbound_queue_port); 236 &(regs)->inbound_queue_port);
@@ -312,7 +327,10 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
312 * @regs : MFI register set 327 * @regs : MFI register set
313 */ 328 */
314static inline void 329static inline void
315megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) 330megasas_fire_cmd_ppc(struct megasas_instance *instance,
331 dma_addr_t frame_phys_addr,
332 u32 frame_count,
333 struct megasas_register_set __iomem *regs)
316{ 334{
317 writel((frame_phys_addr | (frame_count<<1))|1, 335 writel((frame_phys_addr | (frame_count<<1))|1,
318 &(regs)->inbound_queue_port); 336 &(regs)->inbound_queue_port);
@@ -328,6 +346,104 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
328}; 346};
329 347
330/** 348/**
349 * megasas_enable_intr_skinny - Enables interrupts
350 * @regs: MFI register set
351 */
352static inline void
353megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs)
354{
355 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
356
357 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
358
359 /* Dummy readl to force pci flush */
360 readl(&regs->outbound_intr_mask);
361}
362
363/**
364 * megasas_disable_intr_skinny - Disables interrupt
365 * @regs: MFI register set
366 */
367static inline void
368megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs)
369{
370 u32 mask = 0xFFFFFFFF;
371 writel(mask, &regs->outbound_intr_mask);
372 /* Dummy readl to force pci flush */
373 readl(&regs->outbound_intr_mask);
374}
375
376/**
377 * megasas_read_fw_status_reg_skinny - returns the current FW status value
378 * @regs: MFI register set
379 */
380static u32
381megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
382{
383 return readl(&(regs)->outbound_scratch_pad);
384}
385
386/**
387 * megasas_clear_interrupt_skinny - Check & clear interrupt
388 * @regs: MFI register set
389 */
390static int
391megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
392{
393 u32 status;
394 /*
395 * Check if it is our interrupt
396 */
397 status = readl(&regs->outbound_intr_status);
398
399 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
400 return 1;
401 }
402
403 /*
404 * Clear the interrupt by writing back the same value
405 */
406 writel(status, &regs->outbound_intr_status);
407
408 /*
409 * dummy read to flush PCI
410 */
411 readl(&regs->outbound_intr_status);
412
413 return 0;
414}
415
416/**
417 * megasas_fire_cmd_skinny - Sends command to the FW
418 * @frame_phys_addr : Physical address of cmd
419 * @frame_count : Number of frames for the command
420 * @regs : MFI register set
421 */
422static inline void
423megasas_fire_cmd_skinny(struct megasas_instance *instance,
424 dma_addr_t frame_phys_addr,
425 u32 frame_count,
426 struct megasas_register_set __iomem *regs)
427{
428 unsigned long flags;
429 spin_lock_irqsave(&instance->fire_lock, flags);
430 writel(0, &(regs)->inbound_high_queue_port);
431 writel((frame_phys_addr | (frame_count<<1))|1,
432 &(regs)->inbound_low_queue_port);
433 spin_unlock_irqrestore(&instance->fire_lock, flags);
434}
435
436static struct megasas_instance_template megasas_instance_template_skinny = {
437
438 .fire_cmd = megasas_fire_cmd_skinny,
439 .enable_intr = megasas_enable_intr_skinny,
440 .disable_intr = megasas_disable_intr_skinny,
441 .clear_intr = megasas_clear_intr_skinny,
442 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
443};
444
445
446/**
331* The following functions are defined for gen2 (deviceid : 0x78 0x79) 447* The following functions are defined for gen2 (deviceid : 0x78 0x79)
332* controllers 448* controllers
333*/ 449*/
@@ -404,7 +520,9 @@ megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
404 * @regs : MFI register set 520 * @regs : MFI register set
405 */ 521 */
406static inline void 522static inline void
407megasas_fire_cmd_gen2(dma_addr_t frame_phys_addr, u32 frame_count, 523megasas_fire_cmd_gen2(struct megasas_instance *instance,
524 dma_addr_t frame_phys_addr,
525 u32 frame_count,
408 struct megasas_register_set __iomem *regs) 526 struct megasas_register_set __iomem *regs)
409{ 527{
410 writel((frame_phys_addr | (frame_count<<1))|1, 528 writel((frame_phys_addr | (frame_count<<1))|1,
@@ -446,7 +564,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
446 /* 564 /*
447 * Issue the frame using inbound queue port 565 * Issue the frame using inbound queue port
448 */ 566 */
449 instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); 567 instance->instancet->fire_cmd(instance,
568 cmd->frame_phys_addr, 0, instance->reg_set);
450 569
451 /* 570 /*
452 * Wait for cmd_status to change 571 * Wait for cmd_status to change
@@ -477,7 +596,8 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
477{ 596{
478 cmd->cmd_status = ENODATA; 597 cmd->cmd_status = ENODATA;
479 598
480 instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); 599 instance->instancet->fire_cmd(instance,
600 cmd->frame_phys_addr, 0, instance->reg_set);
481 601
482 wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA), 602 wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA),
483 MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); 603 MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
@@ -522,7 +642,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
522 cmd->sync_cmd = 1; 642 cmd->sync_cmd = 1;
523 cmd->cmd_status = 0xFF; 643 cmd->cmd_status = 0xFF;
524 644
525 instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); 645 instance->instancet->fire_cmd(instance,
646 cmd->frame_phys_addr, 0, instance->reg_set);
526 647
527 /* 648 /*
528 * Wait for this cmd to complete 649 * Wait for this cmd to complete
@@ -592,6 +713,35 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
592 return sge_count; 713 return sge_count;
593} 714}
594 715
716/**
717 * megasas_make_sgl_skinny - Prepares IEEE SGL
718 * @instance: Adapter soft state
719 * @scp: SCSI command from the mid-layer
720 * @mfi_sgl: SGL to be filled in
721 *
722 * If successful, this function returns the number of SG elements. Otherwise,
723 * it returnes -1.
724 */
725static int
726megasas_make_sgl_skinny(struct megasas_instance *instance,
727 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
728{
729 int i;
730 int sge_count;
731 struct scatterlist *os_sgl;
732
733 sge_count = scsi_dma_map(scp);
734
735 if (sge_count) {
736 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
737 mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
738 mfi_sgl->sge_skinny[i].phys_addr =
739 sg_dma_address(os_sgl);
740 }
741 }
742 return sge_count;
743}
744
595 /** 745 /**
596 * megasas_get_frame_count - Computes the number of frames 746 * megasas_get_frame_count - Computes the number of frames
597 * @frame_type : type of frame- io or pthru frame 747 * @frame_type : type of frame- io or pthru frame
@@ -600,7 +750,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
600 * Returns the number of frames required for numnber of sge's (sge_count) 750 * Returns the number of frames required for numnber of sge's (sge_count)
601 */ 751 */
602 752
603static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type) 753static u32 megasas_get_frame_count(struct megasas_instance *instance,
754 u8 sge_count, u8 frame_type)
604{ 755{
605 int num_cnt; 756 int num_cnt;
606 int sge_bytes; 757 int sge_bytes;
@@ -610,6 +761,10 @@ static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type)
610 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 761 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
611 sizeof(struct megasas_sge32); 762 sizeof(struct megasas_sge32);
612 763
764 if (instance->flag_ieee) {
765 sge_sz = sizeof(struct megasas_sge_skinny);
766 }
767
613 /* 768 /*
614 * Main frame can contain 2 SGEs for 64-bit SGLs and 769 * Main frame can contain 2 SGEs for 64-bit SGLs and
615 * 3 SGEs for 32-bit SGLs for ldio & 770 * 3 SGEs for 32-bit SGLs for ldio &
@@ -617,12 +772,16 @@ static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type)
617 * 2 SGEs for 32-bit SGLs for pthru frame 772 * 2 SGEs for 32-bit SGLs for pthru frame
618 */ 773 */
619 if (unlikely(frame_type == PTHRU_FRAME)) { 774 if (unlikely(frame_type == PTHRU_FRAME)) {
620 if (IS_DMA64) 775 if (instance->flag_ieee == 1) {
776 num_cnt = sge_count - 1;
777 } else if (IS_DMA64)
621 num_cnt = sge_count - 1; 778 num_cnt = sge_count - 1;
622 else 779 else
623 num_cnt = sge_count - 2; 780 num_cnt = sge_count - 2;
624 } else { 781 } else {
625 if (IS_DMA64) 782 if (instance->flag_ieee == 1) {
783 num_cnt = sge_count - 1;
784 } else if (IS_DMA64)
626 num_cnt = sge_count - 2; 785 num_cnt = sge_count - 2;
627 else 786 else
628 num_cnt = sge_count - 3; 787 num_cnt = sge_count - 3;
@@ -671,6 +830,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
671 else if (scp->sc_data_direction == PCI_DMA_NONE) 830 else if (scp->sc_data_direction == PCI_DMA_NONE)
672 flags = MFI_FRAME_DIR_NONE; 831 flags = MFI_FRAME_DIR_NONE;
673 832
833 if (instance->flag_ieee == 1) {
834 flags |= MFI_FRAME_IEEE;
835 }
836
674 /* 837 /*
675 * Prepare the DCDB frame 838 * Prepare the DCDB frame
676 */ 839 */
@@ -681,15 +844,31 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
681 pthru->lun = scp->device->lun; 844 pthru->lun = scp->device->lun;
682 pthru->cdb_len = scp->cmd_len; 845 pthru->cdb_len = scp->cmd_len;
683 pthru->timeout = 0; 846 pthru->timeout = 0;
847 pthru->pad_0 = 0;
684 pthru->flags = flags; 848 pthru->flags = flags;
685 pthru->data_xfer_len = scsi_bufflen(scp); 849 pthru->data_xfer_len = scsi_bufflen(scp);
686 850
687 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 851 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
688 852
689 /* 853 /*
854 * If the command is for the tape device, set the
855 * pthru timeout to the os layer timeout value.
856 */
857 if (scp->device->type == TYPE_TAPE) {
858 if ((scp->request->timeout / HZ) > 0xFFFF)
859 pthru->timeout = 0xFFFF;
860 else
861 pthru->timeout = scp->request->timeout / HZ;
862 }
863
864 /*
690 * Construct SGL 865 * Construct SGL
691 */ 866 */
692 if (IS_DMA64) { 867 if (instance->flag_ieee == 1) {
868 pthru->flags |= MFI_FRAME_SGL64;
869 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
870 &pthru->sgl);
871 } else if (IS_DMA64) {
693 pthru->flags |= MFI_FRAME_SGL64; 872 pthru->flags |= MFI_FRAME_SGL64;
694 pthru->sge_count = megasas_make_sgl64(instance, scp, 873 pthru->sge_count = megasas_make_sgl64(instance, scp,
695 &pthru->sgl); 874 &pthru->sgl);
@@ -697,6 +876,12 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
697 pthru->sge_count = megasas_make_sgl32(instance, scp, 876 pthru->sge_count = megasas_make_sgl32(instance, scp,
698 &pthru->sgl); 877 &pthru->sgl);
699 878
879 if (pthru->sge_count > instance->max_num_sge) {
880 printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
881 pthru->sge_count);
882 return 0;
883 }
884
700 /* 885 /*
701 * Sense info specific 886 * Sense info specific
702 */ 887 */
@@ -708,7 +893,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
708 * Compute the total number of frames this command consumes. FW uses 893 * Compute the total number of frames this command consumes. FW uses
709 * this number to pull sufficient number of frames from host memory. 894 * this number to pull sufficient number of frames from host memory.
710 */ 895 */
711 cmd->frame_count = megasas_get_frame_count(pthru->sge_count, 896 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
712 PTHRU_FRAME); 897 PTHRU_FRAME);
713 898
714 return cmd->frame_count; 899 return cmd->frame_count;
@@ -739,6 +924,10 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
739 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 924 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
740 flags = MFI_FRAME_DIR_READ; 925 flags = MFI_FRAME_DIR_READ;
741 926
927 if (instance->flag_ieee == 1) {
928 flags |= MFI_FRAME_IEEE;
929 }
930
742 /* 931 /*
743 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 932 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
744 */ 933 */
@@ -809,12 +998,22 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
809 /* 998 /*
810 * Construct SGL 999 * Construct SGL
811 */ 1000 */
812 if (IS_DMA64) { 1001 if (instance->flag_ieee) {
1002 ldio->flags |= MFI_FRAME_SGL64;
1003 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1004 &ldio->sgl);
1005 } else if (IS_DMA64) {
813 ldio->flags |= MFI_FRAME_SGL64; 1006 ldio->flags |= MFI_FRAME_SGL64;
814 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1007 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
815 } else 1008 } else
816 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1009 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
817 1010
1011 if (ldio->sge_count > instance->max_num_sge) {
1012 printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
1013 ldio->sge_count);
1014 return 0;
1015 }
1016
818 /* 1017 /*
819 * Sense info specific 1018 * Sense info specific
820 */ 1019 */
@@ -826,7 +1025,8 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
826 * Compute the total number of frames this command consumes. FW uses 1025 * Compute the total number of frames this command consumes. FW uses
827 * this number to pull sufficient number of frames from host memory. 1026 * this number to pull sufficient number of frames from host memory.
828 */ 1027 */
829 cmd->frame_count = megasas_get_frame_count(ldio->sge_count, IO_FRAME); 1028 cmd->frame_count = megasas_get_frame_count(instance,
1029 ldio->sge_count, IO_FRAME);
830 1030
831 return cmd->frame_count; 1031 return cmd->frame_count;
832} 1032}
@@ -983,7 +1183,8 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
983 */ 1183 */
984 atomic_inc(&instance->fw_outstanding); 1184 atomic_inc(&instance->fw_outstanding);
985 1185
986 instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set); 1186 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1187 cmd->frame_count-1, instance->reg_set);
987 /* 1188 /*
988 * Check if we have pend cmds to be completed 1189 * Check if we have pend cmds to be completed
989 */ 1190 */
@@ -1000,24 +1201,76 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
1000 return 0; 1201 return 0;
1001} 1202}
1002 1203
1204static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1205{
1206 int i;
1207
1208 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1209
1210 if ((megasas_mgmt_info.instance[i]) &&
1211 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1212 return megasas_mgmt_info.instance[i];
1213 }
1214
1215 return NULL;
1216}
1217
1003static int megasas_slave_configure(struct scsi_device *sdev) 1218static int megasas_slave_configure(struct scsi_device *sdev)
1004{ 1219{
1220 u16 pd_index = 0;
1221 struct megasas_instance *instance ;
1222
1223 instance = megasas_lookup_instance(sdev->host->host_no);
1224
1005 /* 1225 /*
1006 * Don't export physical disk devices to the disk driver. 1226 * Don't export physical disk devices to the disk driver.
1007 * 1227 *
1008 * FIXME: Currently we don't export them to the midlayer at all. 1228 * FIXME: Currently we don't export them to the midlayer at all.
1009 * That will be fixed once LSI engineers have audited the 1229 * That will be fixed once LSI engineers have audited the
1010 * firmware for possible issues. 1230 * firmware for possible issues.
1011 */ 1231 */
1012 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK) 1232 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1233 sdev->type == TYPE_DISK) {
1234 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1235 sdev->id;
1236 if (instance->pd_list[pd_index].driveState ==
1237 MR_PD_STATE_SYSTEM) {
1238 blk_queue_rq_timeout(sdev->request_queue,
1239 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
1240 return 0;
1241 }
1013 return -ENXIO; 1242 return -ENXIO;
1243 }
1014 1244
1015 /* 1245 /*
1016 * The RAID firmware may require extended timeouts. 1246 * The RAID firmware may require extended timeouts.
1017 */ 1247 */
1018 if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) 1248 blk_queue_rq_timeout(sdev->request_queue,
1019 blk_queue_rq_timeout(sdev->request_queue, 1249 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
1020 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); 1250 return 0;
1251}
1252
1253static int megasas_slave_alloc(struct scsi_device *sdev)
1254{
1255 u16 pd_index = 0;
1256 struct megasas_instance *instance ;
1257 instance = megasas_lookup_instance(sdev->host->host_no);
1258 if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
1259 (sdev->type == TYPE_DISK)) {
1260 /*
1261 * Open the OS scan to the SYSTEM PD
1262 */
1263 pd_index =
1264 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1265 sdev->id;
1266 if ((instance->pd_list[pd_index].driveState ==
1267 MR_PD_STATE_SYSTEM) &&
1268 (instance->pd_list[pd_index].driveType ==
1269 TYPE_DISK)) {
1270 return 0;
1271 }
1272 return -ENXIO;
1273 }
1021 return 0; 1274 return 0;
1022} 1275}
1023 1276
@@ -1072,7 +1325,14 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1072 1325
1073 spin_lock_irqsave(instance->host->host_lock, flags); 1326 spin_lock_irqsave(instance->host->host_lock, flags);
1074 instance->flag &= ~MEGASAS_FW_BUSY; 1327 instance->flag &= ~MEGASAS_FW_BUSY;
1075 instance->host->can_queue = 1328 if ((instance->pdev->device ==
1329 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1330 (instance->pdev->device ==
1331 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1332 instance->host->can_queue =
1333 instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
1334 } else
1335 instance->host->can_queue =
1076 instance->max_fw_cmds - MEGASAS_INT_CMDS; 1336 instance->max_fw_cmds - MEGASAS_INT_CMDS;
1077 1337
1078 spin_unlock_irqrestore(instance->host->host_lock, flags); 1338 spin_unlock_irqrestore(instance->host->host_lock, flags);
@@ -1117,8 +1377,16 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1117 * Send signal to FW to stop processing any pending cmds. 1377 * Send signal to FW to stop processing any pending cmds.
1118 * The controller will be taken offline by the OS now. 1378 * The controller will be taken offline by the OS now.
1119 */ 1379 */
1120 writel(MFI_STOP_ADP, 1380 if ((instance->pdev->device ==
1381 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1382 (instance->pdev->device ==
1383 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1384 writel(MFI_STOP_ADP,
1385 &instance->reg_set->reserved_0[0]);
1386 } else {
1387 writel(MFI_STOP_ADP,
1121 &instance->reg_set->inbound_doorbell); 1388 &instance->reg_set->inbound_doorbell);
1389 }
1122 megasas_dump_pending_frames(instance); 1390 megasas_dump_pending_frames(instance);
1123 instance->hw_crit_error = 1; 1391 instance->hw_crit_error = 1;
1124 return FAILED; 1392 return FAILED;
@@ -1266,6 +1534,8 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1266 return 0; 1534 return 0;
1267} 1535}
1268 1536
1537static void megasas_aen_polling(struct work_struct *work);
1538
1269/** 1539/**
1270 * megasas_service_aen - Processes an event notification 1540 * megasas_service_aen - Processes an event notification
1271 * @instance: Adapter soft state 1541 * @instance: Adapter soft state
@@ -1281,16 +1551,36 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1281static void 1551static void
1282megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 1552megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
1283{ 1553{
1554 unsigned long flags;
1284 /* 1555 /*
1285 * Don't signal app if it is just an aborted previously registered aen 1556 * Don't signal app if it is just an aborted previously registered aen
1286 */ 1557 */
1287 if (!cmd->abort_aen) 1558 if ((!cmd->abort_aen) && (instance->unload == 0)) {
1559 spin_lock_irqsave(&poll_aen_lock, flags);
1560 megasas_poll_wait_aen = 1;
1561 spin_unlock_irqrestore(&poll_aen_lock, flags);
1562 wake_up(&megasas_poll_wait);
1288 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 1563 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
1564 }
1289 else 1565 else
1290 cmd->abort_aen = 0; 1566 cmd->abort_aen = 0;
1291 1567
1292 instance->aen_cmd = NULL; 1568 instance->aen_cmd = NULL;
1293 megasas_return_cmd(instance, cmd); 1569 megasas_return_cmd(instance, cmd);
1570
1571 if (instance->unload == 0) {
1572 struct megasas_aen_event *ev;
1573 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
1574 if (!ev) {
1575 printk(KERN_ERR "megasas_service_aen: out of memory\n");
1576 } else {
1577 ev->instance = instance;
1578 instance->ev = ev;
1579 INIT_WORK(&ev->hotplug_work, megasas_aen_polling);
1580 schedule_delayed_work(
1581 (struct delayed_work *)&ev->hotplug_work, 0);
1582 }
1583 }
1294} 1584}
1295 1585
1296/* 1586/*
@@ -1302,6 +1592,7 @@ static struct scsi_host_template megasas_template = {
1302 .name = "LSI SAS based MegaRAID driver", 1592 .name = "LSI SAS based MegaRAID driver",
1303 .proc_name = "megaraid_sas", 1593 .proc_name = "megaraid_sas",
1304 .slave_configure = megasas_slave_configure, 1594 .slave_configure = megasas_slave_configure,
1595 .slave_alloc = megasas_slave_alloc,
1305 .queuecommand = megasas_queue_command, 1596 .queuecommand = megasas_queue_command,
1306 .eh_device_reset_handler = megasas_reset_device, 1597 .eh_device_reset_handler = megasas_reset_device,
1307 .eh_bus_reset_handler = megasas_reset_bus_host, 1598 .eh_bus_reset_handler = megasas_reset_bus_host,
@@ -1370,6 +1661,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1370{ 1661{
1371 int exception = 0; 1662 int exception = 0;
1372 struct megasas_header *hdr = &cmd->frame->hdr; 1663 struct megasas_header *hdr = &cmd->frame->hdr;
1664 unsigned long flags;
1373 1665
1374 if (cmd->scmd) 1666 if (cmd->scmd)
1375 cmd->scmd->SCp.ptr = NULL; 1667 cmd->scmd->SCp.ptr = NULL;
@@ -1459,6 +1751,12 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1459 case MFI_CMD_SMP: 1751 case MFI_CMD_SMP:
1460 case MFI_CMD_STP: 1752 case MFI_CMD_STP:
1461 case MFI_CMD_DCMD: 1753 case MFI_CMD_DCMD:
1754 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
1755 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
1756 spin_lock_irqsave(&poll_aen_lock, flags);
1757 megasas_poll_wait_aen = 0;
1758 spin_unlock_irqrestore(&poll_aen_lock, flags);
1759 }
1462 1760
1463 /* 1761 /*
1464 * See if got an event notification 1762 * See if got an event notification
@@ -1536,6 +1834,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1536 u8 max_wait; 1834 u8 max_wait;
1537 u32 fw_state; 1835 u32 fw_state;
1538 u32 cur_state; 1836 u32 cur_state;
1837 u32 abs_state, curr_abs_state;
1539 1838
1540 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 1839 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
1541 1840
@@ -1545,6 +1844,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1545 1844
1546 while (fw_state != MFI_STATE_READY) { 1845 while (fw_state != MFI_STATE_READY) {
1547 1846
1847 abs_state =
1848 instance->instancet->read_fw_status_reg(instance->reg_set);
1849
1548 switch (fw_state) { 1850 switch (fw_state) {
1549 1851
1550 case MFI_STATE_FAULT: 1852 case MFI_STATE_FAULT:
@@ -1556,18 +1858,36 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1556 /* 1858 /*
1557 * Set the CLR bit in inbound doorbell 1859 * Set the CLR bit in inbound doorbell
1558 */ 1860 */
1559 writel(MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 1861 if ((instance->pdev->device ==
1560 &instance->reg_set->inbound_doorbell); 1862 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1863 (instance->pdev->device ==
1864 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1865
1866 writel(
1867 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
1868 &instance->reg_set->reserved_0[0]);
1869 } else {
1870 writel(
1871 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
1872 &instance->reg_set->inbound_doorbell);
1873 }
1561 1874
1562 max_wait = 2; 1875 max_wait = MEGASAS_RESET_WAIT_TIME;
1563 cur_state = MFI_STATE_WAIT_HANDSHAKE; 1876 cur_state = MFI_STATE_WAIT_HANDSHAKE;
1564 break; 1877 break;
1565 1878
1566 case MFI_STATE_BOOT_MESSAGE_PENDING: 1879 case MFI_STATE_BOOT_MESSAGE_PENDING:
1567 writel(MFI_INIT_HOTPLUG, 1880 if ((instance->pdev->device ==
1568 &instance->reg_set->inbound_doorbell); 1881 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1882 (instance->pdev->device ==
1883 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1884 writel(MFI_INIT_HOTPLUG,
1885 &instance->reg_set->reserved_0[0]);
1886 } else
1887 writel(MFI_INIT_HOTPLUG,
1888 &instance->reg_set->inbound_doorbell);
1569 1889
1570 max_wait = 10; 1890 max_wait = MEGASAS_RESET_WAIT_TIME;
1571 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 1891 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
1572 break; 1892 break;
1573 1893
@@ -1576,9 +1896,17 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1576 * Bring it to READY state; assuming max wait 10 secs 1896 * Bring it to READY state; assuming max wait 10 secs
1577 */ 1897 */
1578 instance->instancet->disable_intr(instance->reg_set); 1898 instance->instancet->disable_intr(instance->reg_set);
1579 writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); 1899 if ((instance->pdev->device ==
1900 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1901 (instance->pdev->device ==
1902 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1903 writel(MFI_RESET_FLAGS,
1904 &instance->reg_set->reserved_0[0]);
1905 } else
1906 writel(MFI_RESET_FLAGS,
1907 &instance->reg_set->inbound_doorbell);
1580 1908
1581 max_wait = 60; 1909 max_wait = MEGASAS_RESET_WAIT_TIME;
1582 cur_state = MFI_STATE_OPERATIONAL; 1910 cur_state = MFI_STATE_OPERATIONAL;
1583 break; 1911 break;
1584 1912
@@ -1586,32 +1914,32 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1586 /* 1914 /*
1587 * This state should not last for more than 2 seconds 1915 * This state should not last for more than 2 seconds
1588 */ 1916 */
1589 max_wait = 2; 1917 max_wait = MEGASAS_RESET_WAIT_TIME;
1590 cur_state = MFI_STATE_UNDEFINED; 1918 cur_state = MFI_STATE_UNDEFINED;
1591 break; 1919 break;
1592 1920
1593 case MFI_STATE_BB_INIT: 1921 case MFI_STATE_BB_INIT:
1594 max_wait = 2; 1922 max_wait = MEGASAS_RESET_WAIT_TIME;
1595 cur_state = MFI_STATE_BB_INIT; 1923 cur_state = MFI_STATE_BB_INIT;
1596 break; 1924 break;
1597 1925
1598 case MFI_STATE_FW_INIT: 1926 case MFI_STATE_FW_INIT:
1599 max_wait = 20; 1927 max_wait = MEGASAS_RESET_WAIT_TIME;
1600 cur_state = MFI_STATE_FW_INIT; 1928 cur_state = MFI_STATE_FW_INIT;
1601 break; 1929 break;
1602 1930
1603 case MFI_STATE_FW_INIT_2: 1931 case MFI_STATE_FW_INIT_2:
1604 max_wait = 20; 1932 max_wait = MEGASAS_RESET_WAIT_TIME;
1605 cur_state = MFI_STATE_FW_INIT_2; 1933 cur_state = MFI_STATE_FW_INIT_2;
1606 break; 1934 break;
1607 1935
1608 case MFI_STATE_DEVICE_SCAN: 1936 case MFI_STATE_DEVICE_SCAN:
1609 max_wait = 20; 1937 max_wait = MEGASAS_RESET_WAIT_TIME;
1610 cur_state = MFI_STATE_DEVICE_SCAN; 1938 cur_state = MFI_STATE_DEVICE_SCAN;
1611 break; 1939 break;
1612 1940
1613 case MFI_STATE_FLUSH_CACHE: 1941 case MFI_STATE_FLUSH_CACHE:
1614 max_wait = 20; 1942 max_wait = MEGASAS_RESET_WAIT_TIME;
1615 cur_state = MFI_STATE_FLUSH_CACHE; 1943 cur_state = MFI_STATE_FLUSH_CACHE;
1616 break; 1944 break;
1617 1945
@@ -1627,8 +1955,10 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1627 for (i = 0; i < (max_wait * 1000); i++) { 1955 for (i = 0; i < (max_wait * 1000); i++) {
1628 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & 1956 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &
1629 MFI_STATE_MASK ; 1957 MFI_STATE_MASK ;
1958 curr_abs_state =
1959 instance->instancet->read_fw_status_reg(instance->reg_set);
1630 1960
1631 if (fw_state == cur_state) { 1961 if (abs_state == curr_abs_state) {
1632 msleep(1); 1962 msleep(1);
1633 } else 1963 } else
1634 break; 1964 break;
@@ -1637,7 +1967,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1637 /* 1967 /*
1638 * Return error if fw_state hasn't changed after max_wait 1968 * Return error if fw_state hasn't changed after max_wait
1639 */ 1969 */
1640 if (fw_state == cur_state) { 1970 if (curr_abs_state == abs_state) {
1641 printk(KERN_DEBUG "FW state [%d] hasn't changed " 1971 printk(KERN_DEBUG "FW state [%d] hasn't changed "
1642 "in %d secs\n", fw_state, max_wait); 1972 "in %d secs\n", fw_state, max_wait);
1643 return -ENODEV; 1973 return -ENODEV;
@@ -1715,6 +2045,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
1715 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 2045 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1716 sizeof(struct megasas_sge32); 2046 sizeof(struct megasas_sge32);
1717 2047
2048 if (instance->flag_ieee) {
2049 sge_sz = sizeof(struct megasas_sge_skinny);
2050 }
2051
1718 /* 2052 /*
1719 * Calculated the number of 64byte frames required for SGL 2053 * Calculated the number of 64byte frames required for SGL
1720 */ 2054 */
@@ -1777,6 +2111,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
1777 } 2111 }
1778 2112
1779 cmd->frame->io.context = cmd->index; 2113 cmd->frame->io.context = cmd->index;
2114 cmd->frame->io.pad_0 = 0;
1780 } 2115 }
1781 2116
1782 return 0; 2117 return 0;
@@ -1882,6 +2217,178 @@ static int megasas_alloc_cmds(struct megasas_instance *instance)
1882 return 0; 2217 return 0;
1883} 2218}
1884 2219
2220/*
2221 * megasas_get_pd_list_info - Returns FW's pd_list structure
2222 * @instance: Adapter soft state
2223 * @pd_list: pd_list structure
2224 *
2225 * Issues an internal command (DCMD) to get the FW's controller PD
2226 * list structure. This information is mainly used to find out SYSTEM
2227 * supported by the FW.
2228 */
2229static int
2230megasas_get_pd_list(struct megasas_instance *instance)
2231{
2232 int ret = 0, pd_index = 0;
2233 struct megasas_cmd *cmd;
2234 struct megasas_dcmd_frame *dcmd;
2235 struct MR_PD_LIST *ci;
2236 struct MR_PD_ADDRESS *pd_addr;
2237 dma_addr_t ci_h = 0;
2238
2239 cmd = megasas_get_cmd(instance);
2240
2241 if (!cmd) {
2242 printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
2243 return -ENOMEM;
2244 }
2245
2246 dcmd = &cmd->frame->dcmd;
2247
2248 ci = pci_alloc_consistent(instance->pdev,
2249 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
2250
2251 if (!ci) {
2252 printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
2253 megasas_return_cmd(instance, cmd);
2254 return -ENOMEM;
2255 }
2256
2257 memset(ci, 0, sizeof(*ci));
2258 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2259
2260 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
2261 dcmd->mbox.b[1] = 0;
2262 dcmd->cmd = MFI_CMD_DCMD;
2263 dcmd->cmd_status = 0xFF;
2264 dcmd->sge_count = 1;
2265 dcmd->flags = MFI_FRAME_DIR_READ;
2266 dcmd->timeout = 0;
2267 dcmd->pad_0 = 0;
2268 dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
2269 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
2270 dcmd->sgl.sge32[0].phys_addr = ci_h;
2271 dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
2272
2273 if (!megasas_issue_polled(instance, cmd)) {
2274 ret = 0;
2275 } else {
2276 ret = -1;
2277 }
2278
2279 /*
2280 * the following function will get the instance PD LIST.
2281 */
2282
2283 pd_addr = ci->addr;
2284
2285 if ( ret == 0 &&
2286 (ci->count <
2287 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
2288
2289 memset(instance->pd_list, 0,
2290 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
2291
2292 for (pd_index = 0; pd_index < ci->count; pd_index++) {
2293
2294 instance->pd_list[pd_addr->deviceId].tid =
2295 pd_addr->deviceId;
2296 instance->pd_list[pd_addr->deviceId].driveType =
2297 pd_addr->scsiDevType;
2298 instance->pd_list[pd_addr->deviceId].driveState =
2299 MR_PD_STATE_SYSTEM;
2300 pd_addr++;
2301 }
2302 }
2303
2304 pci_free_consistent(instance->pdev,
2305 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
2306 ci, ci_h);
2307 megasas_return_cmd(instance, cmd);
2308
2309 return ret;
2310}
2311
2312/*
2313 * megasas_get_ld_list_info - Returns FW's ld_list structure
2314 * @instance: Adapter soft state
2315 * @ld_list: ld_list structure
2316 *
2317 * Issues an internal command (DCMD) to get the FW's controller PD
2318 * list structure. This information is mainly used to find out SYSTEM
2319 * supported by the FW.
2320 */
2321static int
2322megasas_get_ld_list(struct megasas_instance *instance)
2323{
2324 int ret = 0, ld_index = 0, ids = 0;
2325 struct megasas_cmd *cmd;
2326 struct megasas_dcmd_frame *dcmd;
2327 struct MR_LD_LIST *ci;
2328 dma_addr_t ci_h = 0;
2329
2330 cmd = megasas_get_cmd(instance);
2331
2332 if (!cmd) {
2333 printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
2334 return -ENOMEM;
2335 }
2336
2337 dcmd = &cmd->frame->dcmd;
2338
2339 ci = pci_alloc_consistent(instance->pdev,
2340 sizeof(struct MR_LD_LIST),
2341 &ci_h);
2342
2343 if (!ci) {
2344 printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
2345 megasas_return_cmd(instance, cmd);
2346 return -ENOMEM;
2347 }
2348
2349 memset(ci, 0, sizeof(*ci));
2350 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2351
2352 dcmd->cmd = MFI_CMD_DCMD;
2353 dcmd->cmd_status = 0xFF;
2354 dcmd->sge_count = 1;
2355 dcmd->flags = MFI_FRAME_DIR_READ;
2356 dcmd->timeout = 0;
2357 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
2358 dcmd->opcode = MR_DCMD_LD_GET_LIST;
2359 dcmd->sgl.sge32[0].phys_addr = ci_h;
2360 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
2361 dcmd->pad_0 = 0;
2362
2363 if (!megasas_issue_polled(instance, cmd)) {
2364 ret = 0;
2365 } else {
2366 ret = -1;
2367 }
2368
2369 /* the following function will get the instance PD LIST */
2370
2371 if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) {
2372 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2373
2374 for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
2375 if (ci->ldList[ld_index].state != 0) {
2376 ids = ci->ldList[ld_index].ref.targetId;
2377 instance->ld_ids[ids] =
2378 ci->ldList[ld_index].ref.targetId;
2379 }
2380 }
2381 }
2382
2383 pci_free_consistent(instance->pdev,
2384 sizeof(struct MR_LD_LIST),
2385 ci,
2386 ci_h);
2387
2388 megasas_return_cmd(instance, cmd);
2389 return ret;
2390}
2391
1885/** 2392/**
1886 * megasas_get_controller_info - Returns FW's controller structure 2393 * megasas_get_controller_info - Returns FW's controller structure
1887 * @instance: Adapter soft state 2394 * @instance: Adapter soft state
@@ -1927,6 +2434,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
1927 dcmd->sge_count = 1; 2434 dcmd->sge_count = 1;
1928 dcmd->flags = MFI_FRAME_DIR_READ; 2435 dcmd->flags = MFI_FRAME_DIR_READ;
1929 dcmd->timeout = 0; 2436 dcmd->timeout = 0;
2437 dcmd->pad_0 = 0;
1930 dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); 2438 dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
1931 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 2439 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
1932 dcmd->sgl.sge32[0].phys_addr = ci_h; 2440 dcmd->sgl.sge32[0].phys_addr = ci_h;
@@ -2081,13 +2589,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2081 * Map the message registers 2589 * Map the message registers
2082 */ 2590 */
2083 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || 2591 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
2592 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2593 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2084 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) { 2594 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) {
2085 instance->base_addr = pci_resource_start(instance->pdev, 1); 2595 instance->base_addr = pci_resource_start(instance->pdev, 1);
2086 } else { 2596 } else {
2087 instance->base_addr = pci_resource_start(instance->pdev, 0); 2597 instance->base_addr = pci_resource_start(instance->pdev, 0);
2088 } 2598 }
2089 2599
2090 if (pci_request_regions(instance->pdev, "megasas: LSI")) { 2600 if (pci_request_selected_regions(instance->pdev,
2601 pci_select_bars(instance->pdev, IORESOURCE_MEM),
2602 "megasas: LSI")) {
2091 printk(KERN_DEBUG "megasas: IO memory region busy!\n"); 2603 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
2092 return -EBUSY; 2604 return -EBUSY;
2093 } 2605 }
@@ -2111,6 +2623,10 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2111 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 2623 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
2112 instance->instancet = &megasas_instance_template_gen2; 2624 instance->instancet = &megasas_instance_template_gen2;
2113 break; 2625 break;
2626 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
2627 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
2628 instance->instancet = &megasas_instance_template_skinny;
2629 break;
2114 case PCI_DEVICE_ID_LSI_SAS1064R: 2630 case PCI_DEVICE_ID_LSI_SAS1064R:
2115 case PCI_DEVICE_ID_DELL_PERC5: 2631 case PCI_DEVICE_ID_DELL_PERC5:
2116 default: 2632 default:
@@ -2166,6 +2682,13 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2166 if (megasas_issue_init_mfi(instance)) 2682 if (megasas_issue_init_mfi(instance))
2167 goto fail_fw_init; 2683 goto fail_fw_init;
2168 2684
2685 memset(instance->pd_list, 0 ,
2686 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
2687 megasas_get_pd_list(instance);
2688
2689 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2690 megasas_get_ld_list(instance);
2691
2169 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); 2692 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
2170 2693
2171 /* 2694 /*
@@ -2220,7 +2743,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2220 iounmap(instance->reg_set); 2743 iounmap(instance->reg_set);
2221 2744
2222 fail_ioremap: 2745 fail_ioremap:
2223 pci_release_regions(instance->pdev); 2746 pci_release_selected_regions(instance->pdev,
2747 pci_select_bars(instance->pdev, IORESOURCE_MEM));
2224 2748
2225 return -EINVAL; 2749 return -EINVAL;
2226} 2750}
@@ -2240,7 +2764,8 @@ static void megasas_release_mfi(struct megasas_instance *instance)
2240 2764
2241 iounmap(instance->reg_set); 2765 iounmap(instance->reg_set);
2242 2766
2243 pci_release_regions(instance->pdev); 2767 pci_release_selected_regions(instance->pdev,
2768 pci_select_bars(instance->pdev, IORESOURCE_MEM));
2244} 2769}
2245 2770
2246/** 2771/**
@@ -2288,6 +2813,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
2288 dcmd->sge_count = 1; 2813 dcmd->sge_count = 1;
2289 dcmd->flags = MFI_FRAME_DIR_READ; 2814 dcmd->flags = MFI_FRAME_DIR_READ;
2290 dcmd->timeout = 0; 2815 dcmd->timeout = 0;
2816 dcmd->pad_0 = 0;
2291 dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); 2817 dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
2292 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2818 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
2293 dcmd->sgl.sge32[0].phys_addr = el_info_h; 2819 dcmd->sgl.sge32[0].phys_addr = el_info_h;
@@ -2402,6 +2928,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
2402 dcmd->sge_count = 1; 2928 dcmd->sge_count = 1;
2403 dcmd->flags = MFI_FRAME_DIR_READ; 2929 dcmd->flags = MFI_FRAME_DIR_READ;
2404 dcmd->timeout = 0; 2930 dcmd->timeout = 0;
2931 dcmd->pad_0 = 0;
2405 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); 2932 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
2406 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 2933 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
2407 dcmd->mbox.w[0] = seq_num; 2934 dcmd->mbox.w[0] = seq_num;
@@ -2409,6 +2936,11 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
2409 dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; 2936 dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
2410 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); 2937 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
2411 2938
2939 if (instance->aen_cmd != NULL) {
2940 megasas_return_cmd(instance, cmd);
2941 return 0;
2942 }
2943
2412 /* 2944 /*
2413 * Store reference to the cmd used to register for AEN. When an 2945 * Store reference to the cmd used to register for AEN. When an
2414 * application wants us to register for AEN, we have to abort this 2946 * application wants us to register for AEN, we have to abort this
@@ -2419,7 +2951,8 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
2419 /* 2951 /*
2420 * Issue the aen registration frame 2952 * Issue the aen registration frame
2421 */ 2953 */
2422 instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); 2954 instance->instancet->fire_cmd(instance,
2955 cmd->frame_phys_addr, 0, instance->reg_set);
2423 2956
2424 return 0; 2957 return 0;
2425} 2958}
@@ -2465,7 +2998,13 @@ static int megasas_io_attach(struct megasas_instance *instance)
2465 */ 2998 */
2466 host->irq = instance->pdev->irq; 2999 host->irq = instance->pdev->irq;
2467 host->unique_id = instance->unique_id; 3000 host->unique_id = instance->unique_id;
2468 host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; 3001 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3002 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
3003 host->can_queue =
3004 instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
3005 } else
3006 host->can_queue =
3007 instance->max_fw_cmds - MEGASAS_INT_CMDS;
2469 host->this_id = instance->init_id; 3008 host->this_id = instance->init_id;
2470 host->sg_tablesize = instance->max_num_sge; 3009 host->sg_tablesize = instance->max_num_sge;
2471 host->max_sectors = instance->max_sectors_per_req; 3010 host->max_sectors = instance->max_sectors_per_req;
@@ -2537,7 +3076,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2537 /* 3076 /*
2538 * PCI prepping: enable device set bus mastering and dma mask 3077 * PCI prepping: enable device set bus mastering and dma mask
2539 */ 3078 */
2540 rval = pci_enable_device(pdev); 3079 rval = pci_enable_device_mem(pdev);
2541 3080
2542 if (rval) { 3081 if (rval) {
2543 return rval; 3082 return rval;
@@ -2572,6 +3111,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2572 3111
2573 *instance->producer = 0; 3112 *instance->producer = 0;
2574 *instance->consumer = 0; 3113 *instance->consumer = 0;
3114 megasas_poll_wait_aen = 0;
3115 instance->flag_ieee = 0;
3116 instance->ev = NULL;
2575 3117
2576 instance->evt_detail = pci_alloc_consistent(pdev, 3118 instance->evt_detail = pci_alloc_consistent(pdev,
2577 sizeof(struct 3119 sizeof(struct
@@ -2595,10 +3137,11 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2595 init_waitqueue_head(&instance->abort_cmd_wait_q); 3137 init_waitqueue_head(&instance->abort_cmd_wait_q);
2596 3138
2597 spin_lock_init(&instance->cmd_pool_lock); 3139 spin_lock_init(&instance->cmd_pool_lock);
3140 spin_lock_init(&instance->fire_lock);
2598 spin_lock_init(&instance->completion_lock); 3141 spin_lock_init(&instance->completion_lock);
3142 spin_lock_init(&poll_aen_lock);
2599 3143
2600 mutex_init(&instance->aen_mutex); 3144 mutex_init(&instance->aen_mutex);
2601 sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
2602 3145
2603 /* 3146 /*
2604 * Initialize PCI related and misc parameters 3147 * Initialize PCI related and misc parameters
@@ -2608,8 +3151,16 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2608 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 3151 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
2609 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 3152 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
2610 3153
3154 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3155 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
3156 instance->flag_ieee = 1;
3157 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
3158 } else
3159 sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
3160
2611 megasas_dbg_lvl = 0; 3161 megasas_dbg_lvl = 0;
2612 instance->flag = 0; 3162 instance->flag = 0;
3163 instance->unload = 1;
2613 instance->last_time = 0; 3164 instance->last_time = 0;
2614 3165
2615 /* 3166 /*
@@ -2655,6 +3206,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2655 if (megasas_io_attach(instance)) 3206 if (megasas_io_attach(instance))
2656 goto fail_io_attach; 3207 goto fail_io_attach;
2657 3208
3209 instance->unload = 0;
2658 return 0; 3210 return 0;
2659 3211
2660 fail_start_aen: 3212 fail_start_aen:
@@ -2715,6 +3267,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
2715 dcmd->sge_count = 0; 3267 dcmd->sge_count = 0;
2716 dcmd->flags = MFI_FRAME_DIR_NONE; 3268 dcmd->flags = MFI_FRAME_DIR_NONE;
2717 dcmd->timeout = 0; 3269 dcmd->timeout = 0;
3270 dcmd->pad_0 = 0;
2718 dcmd->data_xfer_len = 0; 3271 dcmd->data_xfer_len = 0;
2719 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 3272 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2720 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 3273 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
@@ -2754,6 +3307,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
2754 dcmd->sge_count = 0; 3307 dcmd->sge_count = 0;
2755 dcmd->flags = MFI_FRAME_DIR_NONE; 3308 dcmd->flags = MFI_FRAME_DIR_NONE;
2756 dcmd->timeout = 0; 3309 dcmd->timeout = 0;
3310 dcmd->pad_0 = 0;
2757 dcmd->data_xfer_len = 0; 3311 dcmd->data_xfer_len = 0;
2758 dcmd->opcode = opcode; 3312 dcmd->opcode = opcode;
2759 3313
@@ -2778,12 +3332,23 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
2778 3332
2779 instance = pci_get_drvdata(pdev); 3333 instance = pci_get_drvdata(pdev);
2780 host = instance->host; 3334 host = instance->host;
3335 instance->unload = 1;
2781 3336
2782 if (poll_mode_io) 3337 if (poll_mode_io)
2783 del_timer_sync(&instance->io_completion_timer); 3338 del_timer_sync(&instance->io_completion_timer);
2784 3339
2785 megasas_flush_cache(instance); 3340 megasas_flush_cache(instance);
2786 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 3341 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
3342
3343 /* cancel the delayed work if this work still in queue */
3344 if (instance->ev != NULL) {
3345 struct megasas_aen_event *ev = instance->ev;
3346 cancel_delayed_work(
3347 (struct delayed_work *)&ev->hotplug_work);
3348 flush_scheduled_work();
3349 instance->ev = NULL;
3350 }
3351
2787 tasklet_kill(&instance->isr_tasklet); 3352 tasklet_kill(&instance->isr_tasklet);
2788 3353
2789 pci_set_drvdata(instance->pdev, instance); 3354 pci_set_drvdata(instance->pdev, instance);
@@ -2818,7 +3383,7 @@ megasas_resume(struct pci_dev *pdev)
2818 /* 3383 /*
2819 * PCI prepping: enable device set bus mastering and dma mask 3384 * PCI prepping: enable device set bus mastering and dma mask
2820 */ 3385 */
2821 rval = pci_enable_device(pdev); 3386 rval = pci_enable_device_mem(pdev);
2822 3387
2823 if (rval) { 3388 if (rval) {
2824 printk(KERN_ERR "megasas: Enable device failed\n"); 3389 printk(KERN_ERR "megasas: Enable device failed\n");
@@ -2873,6 +3438,8 @@ megasas_resume(struct pci_dev *pdev)
2873 megasas_start_timer(instance, &instance->io_completion_timer, 3438 megasas_start_timer(instance, &instance->io_completion_timer,
2874 megasas_io_completion_timer, 3439 megasas_io_completion_timer,
2875 MEGASAS_COMPLETION_TIMER_INTERVAL); 3440 MEGASAS_COMPLETION_TIMER_INTERVAL);
3441 instance->unload = 0;
3442
2876 return 0; 3443 return 0;
2877 3444
2878fail_irq: 3445fail_irq:
@@ -2913,6 +3480,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
2913 struct megasas_instance *instance; 3480 struct megasas_instance *instance;
2914 3481
2915 instance = pci_get_drvdata(pdev); 3482 instance = pci_get_drvdata(pdev);
3483 instance->unload = 1;
2916 host = instance->host; 3484 host = instance->host;
2917 3485
2918 if (poll_mode_io) 3486 if (poll_mode_io)
@@ -2921,6 +3489,16 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
2921 scsi_remove_host(instance->host); 3489 scsi_remove_host(instance->host);
2922 megasas_flush_cache(instance); 3490 megasas_flush_cache(instance);
2923 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 3491 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
3492
3493 /* cancel the delayed work if this work still in queue*/
3494 if (instance->ev != NULL) {
3495 struct megasas_aen_event *ev = instance->ev;
3496 cancel_delayed_work(
3497 (struct delayed_work *)&ev->hotplug_work);
3498 flush_scheduled_work();
3499 instance->ev = NULL;
3500 }
3501
2924 tasklet_kill(&instance->isr_tasklet); 3502 tasklet_kill(&instance->isr_tasklet);
2925 3503
2926 /* 3504 /*
@@ -2969,6 +3547,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
2969static void megasas_shutdown(struct pci_dev *pdev) 3547static void megasas_shutdown(struct pci_dev *pdev)
2970{ 3548{
2971 struct megasas_instance *instance = pci_get_drvdata(pdev); 3549 struct megasas_instance *instance = pci_get_drvdata(pdev);
3550 instance->unload = 1;
2972 megasas_flush_cache(instance); 3551 megasas_flush_cache(instance);
2973 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 3552 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
2974} 3553}
@@ -3016,6 +3595,23 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
3016} 3595}
3017 3596
3018/** 3597/**
3598 * megasas_mgmt_poll - char node "poll" entry point
3599 * */
3600static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
3601{
3602 unsigned int mask;
3603 unsigned long flags;
3604 poll_wait(file, &megasas_poll_wait, wait);
3605 spin_lock_irqsave(&poll_aen_lock, flags);
3606 if (megasas_poll_wait_aen)
3607 mask = (POLLIN | POLLRDNORM);
3608 else
3609 mask = 0;
3610 spin_unlock_irqrestore(&poll_aen_lock, flags);
3611 return mask;
3612}
3613
3614/**
3019 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 3615 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
3020 * @instance: Adapter soft state 3616 * @instance: Adapter soft state
3021 * @argp: User's ioctl packet 3617 * @argp: User's ioctl packet
@@ -3032,7 +3628,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
3032 int error = 0, i; 3628 int error = 0, i;
3033 void *sense = NULL; 3629 void *sense = NULL;
3034 dma_addr_t sense_handle; 3630 dma_addr_t sense_handle;
3035 u32 *sense_ptr; 3631 unsigned long *sense_ptr;
3036 3632
3037 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 3633 memset(kbuff_arr, 0, sizeof(kbuff_arr));
3038 3634
@@ -3056,6 +3652,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
3056 */ 3652 */
3057 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 3653 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
3058 cmd->frame->hdr.context = cmd->index; 3654 cmd->frame->hdr.context = cmd->index;
3655 cmd->frame->hdr.pad_0 = 0;
3059 3656
3060 /* 3657 /*
3061 * The management interface between applications and the fw uses 3658 * The management interface between applications and the fw uses
@@ -3109,7 +3706,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
3109 } 3706 }
3110 3707
3111 sense_ptr = 3708 sense_ptr =
3112 (u32 *) ((unsigned long)cmd->frame + ioc->sense_off); 3709 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
3113 *sense_ptr = sense_handle; 3710 *sense_ptr = sense_handle;
3114 } 3711 }
3115 3712
@@ -3140,8 +3737,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
3140 * sense_ptr points to the location that has the user 3737 * sense_ptr points to the location that has the user
3141 * sense buffer address 3738 * sense buffer address
3142 */ 3739 */
3143 sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + 3740 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
3144 ioc->sense_off); 3741 ioc->sense_off);
3145 3742
3146 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), 3743 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
3147 sense, ioc->sense_len)) { 3744 sense, ioc->sense_len)) {
@@ -3177,20 +3774,6 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
3177 return error; 3774 return error;
3178} 3775}
3179 3776
3180static struct megasas_instance *megasas_lookup_instance(u16 host_no)
3181{
3182 int i;
3183
3184 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
3185
3186 if ((megasas_mgmt_info.instance[i]) &&
3187 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
3188 return megasas_mgmt_info.instance[i];
3189 }
3190
3191 return NULL;
3192}
3193
3194static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 3777static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
3195{ 3778{
3196 struct megasas_iocpacket __user *user_ioc = 3779 struct megasas_iocpacket __user *user_ioc =
@@ -3214,6 +3797,17 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
3214 goto out_kfree_ioc; 3797 goto out_kfree_ioc;
3215 } 3798 }
3216 3799
3800 if (instance->hw_crit_error == 1) {
3801 printk(KERN_DEBUG "Controller in Crit ERROR\n");
3802 error = -ENODEV;
3803 goto out_kfree_ioc;
3804 }
3805
3806 if (instance->unload == 1) {
3807 error = -ENODEV;
3808 goto out_kfree_ioc;
3809 }
3810
3217 /* 3811 /*
3218 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds 3812 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
3219 */ 3813 */
@@ -3249,6 +3843,14 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
3249 if (!instance) 3843 if (!instance)
3250 return -ENODEV; 3844 return -ENODEV;
3251 3845
3846 if (instance->hw_crit_error == 1) {
3847 error = -ENODEV;
3848 }
3849
3850 if (instance->unload == 1) {
3851 return -ENODEV;
3852 }
3853
3252 mutex_lock(&instance->aen_mutex); 3854 mutex_lock(&instance->aen_mutex);
3253 error = megasas_register_aen(instance, aen.seq_num, 3855 error = megasas_register_aen(instance, aen.seq_num,
3254 aen.class_locale_word); 3856 aen.class_locale_word);
@@ -3282,6 +3884,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
3282 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 3884 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
3283 int i; 3885 int i;
3284 int error = 0; 3886 int error = 0;
3887 compat_uptr_t ptr;
3285 3888
3286 if (clear_user(ioc, sizeof(*ioc))) 3889 if (clear_user(ioc, sizeof(*ioc)))
3287 return -EFAULT; 3890 return -EFAULT;
@@ -3294,9 +3897,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
3294 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 3897 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
3295 return -EFAULT; 3898 return -EFAULT;
3296 3899
3297 for (i = 0; i < MAX_IOCTL_SGE; i++) { 3900 /*
3298 compat_uptr_t ptr; 3901 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
3902 * sense_len is not null, so prepare the 64bit value under
3903 * the same condition.
3904 */
3905 if (ioc->sense_len) {
3906 void __user **sense_ioc_ptr =
3907 (void __user **)(ioc->frame.raw + ioc->sense_off);
3908 compat_uptr_t *sense_cioc_ptr =
3909 (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
3910 if (get_user(ptr, sense_cioc_ptr) ||
3911 put_user(compat_ptr(ptr), sense_ioc_ptr))
3912 return -EFAULT;
3913 }
3299 3914
3915 for (i = 0; i < MAX_IOCTL_SGE; i++) {
3300 if (get_user(ptr, &cioc->sgl[i].iov_base) || 3916 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
3301 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 3917 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
3302 copy_in_user(&ioc->sgl[i].iov_len, 3918 copy_in_user(&ioc->sgl[i].iov_len,
@@ -3337,6 +3953,7 @@ static const struct file_operations megasas_mgmt_fops = {
3337 .open = megasas_mgmt_open, 3953 .open = megasas_mgmt_open,
3338 .fasync = megasas_mgmt_fasync, 3954 .fasync = megasas_mgmt_fasync,
3339 .unlocked_ioctl = megasas_mgmt_ioctl, 3955 .unlocked_ioctl = megasas_mgmt_ioctl,
3956 .poll = megasas_mgmt_poll,
3340#ifdef CONFIG_COMPAT 3957#ifdef CONFIG_COMPAT
3341 .compat_ioctl = megasas_mgmt_compat_ioctl, 3958 .compat_ioctl = megasas_mgmt_compat_ioctl,
3342#endif 3959#endif
@@ -3378,6 +3995,15 @@ static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
3378 NULL); 3995 NULL);
3379 3996
3380static ssize_t 3997static ssize_t
3998megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
3999{
4000 return sprintf(buf, "%u\n", support_poll_for_event);
4001}
4002
4003static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
4004 megasas_sysfs_show_support_poll_for_event, NULL);
4005
4006static ssize_t
3381megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) 4007megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
3382{ 4008{
3383 return sprintf(buf, "%u\n", megasas_dbg_lvl); 4009 return sprintf(buf, "%u\n", megasas_dbg_lvl);
@@ -3451,7 +4077,235 @@ out:
3451 return retval; 4077 return retval;
3452} 4078}
3453 4079
3454static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, 4080static void
4081megasas_aen_polling(struct work_struct *work)
4082{
4083 struct megasas_aen_event *ev =
4084 container_of(work, struct megasas_aen_event, hotplug_work);
4085 struct megasas_instance *instance = ev->instance;
4086 union megasas_evt_class_locale class_locale;
4087 struct Scsi_Host *host;
4088 struct scsi_device *sdev1;
4089 u16 pd_index = 0;
4090 u16 ld_index = 0;
4091 int i, j, doscan = 0;
4092 u32 seq_num;
4093 int error;
4094
4095 if (!instance) {
4096 printk(KERN_ERR "invalid instance!\n");
4097 kfree(ev);
4098 return;
4099 }
4100 instance->ev = NULL;
4101 host = instance->host;
4102 if (instance->evt_detail) {
4103
4104 switch (instance->evt_detail->code) {
4105 case MR_EVT_PD_INSERTED:
4106 if (megasas_get_pd_list(instance) == 0) {
4107 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
4108 for (j = 0;
4109 j < MEGASAS_MAX_DEV_PER_CHANNEL;
4110 j++) {
4111
4112 pd_index =
4113 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4114
4115 sdev1 =
4116 scsi_device_lookup(host, i, j, 0);
4117
4118 if (instance->pd_list[pd_index].driveState
4119 == MR_PD_STATE_SYSTEM) {
4120 if (!sdev1) {
4121 scsi_add_device(host, i, j, 0);
4122 }
4123
4124 if (sdev1)
4125 scsi_device_put(sdev1);
4126 }
4127 }
4128 }
4129 }
4130 doscan = 0;
4131 break;
4132
4133 case MR_EVT_PD_REMOVED:
4134 if (megasas_get_pd_list(instance) == 0) {
4135 megasas_get_pd_list(instance);
4136 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
4137 for (j = 0;
4138 j < MEGASAS_MAX_DEV_PER_CHANNEL;
4139 j++) {
4140
4141 pd_index =
4142 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4143
4144 sdev1 =
4145 scsi_device_lookup(host, i, j, 0);
4146
4147 if (instance->pd_list[pd_index].driveState
4148 == MR_PD_STATE_SYSTEM) {
4149 if (sdev1) {
4150 scsi_device_put(sdev1);
4151 }
4152 } else {
4153 if (sdev1) {
4154 scsi_remove_device(sdev1);
4155 scsi_device_put(sdev1);
4156 }
4157 }
4158 }
4159 }
4160 }
4161 doscan = 0;
4162 break;
4163
4164 case MR_EVT_LD_OFFLINE:
4165 case MR_EVT_LD_DELETED:
4166 megasas_get_ld_list(instance);
4167 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
4168 for (j = 0;
4169 j < MEGASAS_MAX_DEV_PER_CHANNEL;
4170 j++) {
4171
4172 ld_index =
4173 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4174
4175 sdev1 = scsi_device_lookup(host,
4176 i + MEGASAS_MAX_LD_CHANNELS,
4177 j,
4178 0);
4179
4180 if (instance->ld_ids[ld_index] != 0xff) {
4181 if (sdev1) {
4182 scsi_device_put(sdev1);
4183 }
4184 } else {
4185 if (sdev1) {
4186 scsi_remove_device(sdev1);
4187 scsi_device_put(sdev1);
4188 }
4189 }
4190 }
4191 }
4192 doscan = 0;
4193 break;
4194 case MR_EVT_LD_CREATED:
4195 megasas_get_ld_list(instance);
4196 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
4197 for (j = 0;
4198 j < MEGASAS_MAX_DEV_PER_CHANNEL;
4199 j++) {
4200 ld_index =
4201 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4202
4203 sdev1 = scsi_device_lookup(host,
4204 i+MEGASAS_MAX_LD_CHANNELS,
4205 j, 0);
4206
4207 if (instance->ld_ids[ld_index] !=
4208 0xff) {
4209 if (!sdev1) {
4210 scsi_add_device(host,
4211 i + 2,
4212 j, 0);
4213 }
4214 }
4215 if (sdev1) {
4216 scsi_device_put(sdev1);
4217 }
4218 }
4219 }
4220 doscan = 0;
4221 break;
4222 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4223 case MR_EVT_FOREIGN_CFG_IMPORTED:
4224 doscan = 1;
4225 break;
4226 default:
4227 doscan = 0;
4228 break;
4229 }
4230 } else {
4231 printk(KERN_ERR "invalid evt_detail!\n");
4232 kfree(ev);
4233 return;
4234 }
4235
4236 if (doscan) {
4237 printk(KERN_INFO "scanning ...\n");
4238 megasas_get_pd_list(instance);
4239 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
4240 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
4241 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
4242 sdev1 = scsi_device_lookup(host, i, j, 0);
4243 if (instance->pd_list[pd_index].driveState ==
4244 MR_PD_STATE_SYSTEM) {
4245 if (!sdev1) {
4246 scsi_add_device(host, i, j, 0);
4247 }
4248 if (sdev1)
4249 scsi_device_put(sdev1);
4250 } else {
4251 if (sdev1) {
4252 scsi_remove_device(sdev1);
4253 scsi_device_put(sdev1);
4254 }
4255 }
4256 }
4257 }
4258
4259 megasas_get_ld_list(instance);
4260 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
4261 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
4262 ld_index =
4263 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4264
4265 sdev1 = scsi_device_lookup(host,
4266 i+MEGASAS_MAX_LD_CHANNELS, j, 0);
4267 if (instance->ld_ids[ld_index] != 0xff) {
4268 if (!sdev1) {
4269 scsi_add_device(host,
4270 i+2,
4271 j, 0);
4272 } else {
4273 scsi_device_put(sdev1);
4274 }
4275 } else {
4276 if (sdev1) {
4277 scsi_remove_device(sdev1);
4278 scsi_device_put(sdev1);
4279 }
4280 }
4281 }
4282 }
4283 }
4284
4285 if ( instance->aen_cmd != NULL ) {
4286 kfree(ev);
4287 return ;
4288 }
4289
4290 seq_num = instance->evt_detail->seq_num + 1;
4291
4292 /* Register AEN with FW for latest sequence number plus 1 */
4293 class_locale.members.reserved = 0;
4294 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4295 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4296 mutex_lock(&instance->aen_mutex);
4297 error = megasas_register_aen(instance, seq_num,
4298 class_locale.word);
4299 mutex_unlock(&instance->aen_mutex);
4300
4301 if (error)
4302 printk(KERN_ERR "register aen failed error %x\n", error);
4303
4304 kfree(ev);
4305}
4306
4307
4308static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
3455 megasas_sysfs_show_poll_mode_io, 4309 megasas_sysfs_show_poll_mode_io,
3456 megasas_sysfs_set_poll_mode_io); 4310 megasas_sysfs_set_poll_mode_io);
3457 4311
@@ -3468,6 +4322,8 @@ static int __init megasas_init(void)
3468 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, 4322 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
3469 MEGASAS_EXT_VERSION); 4323 MEGASAS_EXT_VERSION);
3470 4324
4325 support_poll_for_event = 2;
4326
3471 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 4327 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
3472 4328
3473 /* 4329 /*
@@ -3500,6 +4356,12 @@ static int __init megasas_init(void)
3500 &driver_attr_release_date); 4356 &driver_attr_release_date);
3501 if (rval) 4357 if (rval)
3502 goto err_dcf_rel_date; 4358 goto err_dcf_rel_date;
4359
4360 rval = driver_create_file(&megasas_pci_driver.driver,
4361 &driver_attr_support_poll_for_event);
4362 if (rval)
4363 goto err_dcf_support_poll_for_event;
4364
3503 rval = driver_create_file(&megasas_pci_driver.driver, 4365 rval = driver_create_file(&megasas_pci_driver.driver,
3504 &driver_attr_dbg_lvl); 4366 &driver_attr_dbg_lvl);
3505 if (rval) 4367 if (rval)
@@ -3516,7 +4378,12 @@ err_dcf_poll_mode_io:
3516 &driver_attr_dbg_lvl); 4378 &driver_attr_dbg_lvl);
3517err_dcf_dbg_lvl: 4379err_dcf_dbg_lvl:
3518 driver_remove_file(&megasas_pci_driver.driver, 4380 driver_remove_file(&megasas_pci_driver.driver,
4381 &driver_attr_support_poll_for_event);
4382
4383err_dcf_support_poll_for_event:
4384 driver_remove_file(&megasas_pci_driver.driver,
3519 &driver_attr_release_date); 4385 &driver_attr_release_date);
4386
3520err_dcf_rel_date: 4387err_dcf_rel_date:
3521 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 4388 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
3522err_dcf_attr_ver: 4389err_dcf_attr_ver:
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 0d033248fdf1..9d8b6bf605aa 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/* 18/*
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.04.01" 21#define MEGASAS_VERSION "00.00.04.17.1-rc1"
22#define MEGASAS_RELDATE "July 24, 2008" 22#define MEGASAS_RELDATE "Oct. 29, 2009"
23#define MEGASAS_EXT_VERSION "Thu July 24 11:41:51 PST 2008" 23#define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009"
24 24
25/* 25/*
26 * Device IDs 26 * Device IDs
@@ -30,6 +30,8 @@
30#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 30#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413
31#define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078 31#define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078
32#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 32#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079
33#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
34#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
33 35
34/* 36/*
35 * ===================================== 37 * =====================================
@@ -94,6 +96,7 @@
94#define MFI_FRAME_DIR_WRITE 0x0008 96#define MFI_FRAME_DIR_WRITE 0x0008
95#define MFI_FRAME_DIR_READ 0x0010 97#define MFI_FRAME_DIR_READ 0x0010
96#define MFI_FRAME_DIR_BOTH 0x0018 98#define MFI_FRAME_DIR_BOTH 0x0018
99#define MFI_FRAME_IEEE 0x0020
97 100
98/* 101/*
99 * Definition for cmd_status 102 * Definition for cmd_status
@@ -114,6 +117,7 @@
114#define MFI_CMD_STP 0x08 117#define MFI_CMD_STP 0x08
115 118
116#define MR_DCMD_CTRL_GET_INFO 0x01010000 119#define MR_DCMD_CTRL_GET_INFO 0x01010000
120#define MR_DCMD_LD_GET_LIST 0x03010000
117 121
118#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 122#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
119#define MR_FLUSH_CTRL_CACHE 0x01 123#define MR_FLUSH_CTRL_CACHE 0x01
@@ -131,6 +135,7 @@
131#define MR_DCMD_CLUSTER 0x08000000 135#define MR_DCMD_CLUSTER 0x08000000
132#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 136#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
133#define MR_DCMD_CLUSTER_RESET_LD 0x08010200 137#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
138#define MR_DCMD_PD_LIST_QUERY 0x02010100
134 139
135/* 140/*
136 * MFI command completion codes 141 * MFI command completion codes
@@ -251,9 +256,126 @@ enum MR_EVT_ARGS {
251 MR_EVT_ARGS_STR, 256 MR_EVT_ARGS_STR,
252 MR_EVT_ARGS_TIME, 257 MR_EVT_ARGS_TIME,
253 MR_EVT_ARGS_ECC, 258 MR_EVT_ARGS_ECC,
259 MR_EVT_ARGS_LD_PROP,
260 MR_EVT_ARGS_PD_SPARE,
261 MR_EVT_ARGS_PD_INDEX,
262 MR_EVT_ARGS_DIAG_PASS,
263 MR_EVT_ARGS_DIAG_FAIL,
264 MR_EVT_ARGS_PD_LBA_LBA,
265 MR_EVT_ARGS_PORT_PHY,
266 MR_EVT_ARGS_PD_MISSING,
267 MR_EVT_ARGS_PD_ADDRESS,
268 MR_EVT_ARGS_BITMAP,
269 MR_EVT_ARGS_CONNECTOR,
270 MR_EVT_ARGS_PD_PD,
271 MR_EVT_ARGS_PD_FRU,
272 MR_EVT_ARGS_PD_PATHINFO,
273 MR_EVT_ARGS_PD_POWER_STATE,
274 MR_EVT_ARGS_GENERIC,
275};
254 276
277/*
278 * define constants for device list query options
279 */
280enum MR_PD_QUERY_TYPE {
281 MR_PD_QUERY_TYPE_ALL = 0,
282 MR_PD_QUERY_TYPE_STATE = 1,
283 MR_PD_QUERY_TYPE_POWER_STATE = 2,
284 MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
285 MR_PD_QUERY_TYPE_SPEED = 4,
286 MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
255}; 287};
256 288
289#define MR_EVT_CFG_CLEARED 0x0004
290#define MR_EVT_LD_STATE_CHANGE 0x0051
291#define MR_EVT_PD_INSERTED 0x005b
292#define MR_EVT_PD_REMOVED 0x0070
293#define MR_EVT_LD_CREATED 0x008a
294#define MR_EVT_LD_DELETED 0x008b
295#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
296#define MR_EVT_LD_OFFLINE 0x00fc
297#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
298#define MAX_LOGICAL_DRIVES 64
299
300enum MR_PD_STATE {
301 MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
302 MR_PD_STATE_UNCONFIGURED_BAD = 0x01,
303 MR_PD_STATE_HOT_SPARE = 0x02,
304 MR_PD_STATE_OFFLINE = 0x10,
305 MR_PD_STATE_FAILED = 0x11,
306 MR_PD_STATE_REBUILD = 0x14,
307 MR_PD_STATE_ONLINE = 0x18,
308 MR_PD_STATE_COPYBACK = 0x20,
309 MR_PD_STATE_SYSTEM = 0x40,
310 };
311
312
313 /*
314 * defines the physical drive address structure
315 */
316struct MR_PD_ADDRESS {
317 u16 deviceId;
318 u16 enclDeviceId;
319
320 union {
321 struct {
322 u8 enclIndex;
323 u8 slotNumber;
324 } mrPdAddress;
325 struct {
326 u8 enclPosition;
327 u8 enclConnectorIndex;
328 } mrEnclAddress;
329 };
330 u8 scsiDevType;
331 union {
332 u8 connectedPortBitmap;
333 u8 connectedPortNumbers;
334 };
335 u64 sasAddr[2];
336} __packed;
337
338/*
339 * defines the physical drive list structure
340 */
341struct MR_PD_LIST {
342 u32 size;
343 u32 count;
344 struct MR_PD_ADDRESS addr[1];
345} __packed;
346
347struct megasas_pd_list {
348 u16 tid;
349 u8 driveType;
350 u8 driveState;
351} __packed;
352
353 /*
354 * defines the logical drive reference structure
355 */
356union MR_LD_REF {
357 struct {
358 u8 targetId;
359 u8 reserved;
360 u16 seqNum;
361 };
362 u32 ref;
363} __packed;
364
365/*
366 * defines the logical drive list structure
367 */
368struct MR_LD_LIST {
369 u32 ldCount;
370 u32 reserved;
371 struct {
372 union MR_LD_REF ref;
373 u8 state;
374 u8 reserved[3];
375 u64 size;
376 } ldList[MAX_LOGICAL_DRIVES];
377} __packed;
378
257/* 379/*
258 * SAS controller properties 380 * SAS controller properties
259 */ 381 */
@@ -282,7 +404,7 @@ struct megasas_ctrl_prop {
282 u8 expose_encl_devices; 404 u8 expose_encl_devices;
283 u8 reserved[38]; 405 u8 reserved[38];
284 406
285} __attribute__ ((packed)); 407} __packed;
286 408
287/* 409/*
288 * SAS controller information 410 * SAS controller information
@@ -525,7 +647,7 @@ struct megasas_ctrl_info {
525 647
526 u8 pad[0x800 - 0x6a0]; 648 u8 pad[0x800 - 0x6a0];
527 649
528} __attribute__ ((packed)); 650} __packed;
529 651
530/* 652/*
531 * =============================== 653 * ===============================
@@ -540,6 +662,10 @@ struct megasas_ctrl_info {
540#define MEGASAS_DEFAULT_INIT_ID -1 662#define MEGASAS_DEFAULT_INIT_ID -1
541#define MEGASAS_MAX_LUN 8 663#define MEGASAS_MAX_LUN 8
542#define MEGASAS_MAX_LD 64 664#define MEGASAS_MAX_LD 64
665#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
666 MEGASAS_MAX_DEV_PER_CHANNEL)
667#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
668 MEGASAS_MAX_DEV_PER_CHANNEL)
543 669
544#define MEGASAS_DBG_LVL 1 670#define MEGASAS_DBG_LVL 1
545 671
@@ -570,6 +696,7 @@ struct megasas_ctrl_info {
570 * is shown below 696 * is shown below
571 */ 697 */
572#define MEGASAS_INT_CMDS 32 698#define MEGASAS_INT_CMDS 32
699#define MEGASAS_SKINNY_INT_CMDS 5
573 700
574/* 701/*
575 * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit 702 * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
@@ -584,6 +711,8 @@ struct megasas_ctrl_info {
584#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 711#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
585#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 712#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
586#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) 713#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
714#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
715#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
587 716
588/* 717/*
589* register set for both 1068 and 1078 controllers 718* register set for both 1068 and 1078 controllers
@@ -644,10 +773,17 @@ struct megasas_sge64 {
644 773
645} __attribute__ ((packed)); 774} __attribute__ ((packed));
646 775
776struct megasas_sge_skinny {
777 u64 phys_addr;
778 u32 length;
779 u32 flag;
780} __packed;
781
647union megasas_sgl { 782union megasas_sgl {
648 783
649 struct megasas_sge32 sge32[1]; 784 struct megasas_sge32 sge32[1];
650 struct megasas_sge64 sge64[1]; 785 struct megasas_sge64 sge64[1];
786 struct megasas_sge_skinny sge_skinny[1];
651 787
652} __attribute__ ((packed)); 788} __attribute__ ((packed));
653 789
@@ -1061,16 +1197,10 @@ struct megasas_evt_detail {
1061 1197
1062} __attribute__ ((packed)); 1198} __attribute__ ((packed));
1063 1199
1064 struct megasas_instance_template { 1200struct megasas_aen_event {
1065 void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *); 1201 struct work_struct hotplug_work;
1066 1202 struct megasas_instance *instance;
1067 void (*enable_intr)(struct megasas_register_set __iomem *) ; 1203};
1068 void (*disable_intr)(struct megasas_register_set __iomem *);
1069
1070 int (*clear_intr)(struct megasas_register_set __iomem *);
1071
1072 u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
1073 };
1074 1204
1075struct megasas_instance { 1205struct megasas_instance {
1076 1206
@@ -1085,17 +1215,22 @@ struct megasas_instance {
1085 unsigned long base_addr; 1215 unsigned long base_addr;
1086 struct megasas_register_set __iomem *reg_set; 1216 struct megasas_register_set __iomem *reg_set;
1087 1217
1218 struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
1219 u8 ld_ids[MEGASAS_MAX_LD_IDS];
1088 s8 init_id; 1220 s8 init_id;
1089 1221
1090 u16 max_num_sge; 1222 u16 max_num_sge;
1091 u16 max_fw_cmds; 1223 u16 max_fw_cmds;
1092 u32 max_sectors_per_req; 1224 u32 max_sectors_per_req;
1225 struct megasas_aen_event *ev;
1093 1226
1094 struct megasas_cmd **cmd_list; 1227 struct megasas_cmd **cmd_list;
1095 struct list_head cmd_pool; 1228 struct list_head cmd_pool;
1096 spinlock_t cmd_pool_lock; 1229 spinlock_t cmd_pool_lock;
1097 /* used to synch producer, consumer ptrs in dpc */ 1230 /* used to synch producer, consumer ptrs in dpc */
1098 spinlock_t completion_lock; 1231 spinlock_t completion_lock;
1232 /* used to sync fire the cmd to fw */
1233 spinlock_t fire_lock;
1099 struct dma_pool *frame_dma_pool; 1234 struct dma_pool *frame_dma_pool;
1100 struct dma_pool *sense_dma_pool; 1235 struct dma_pool *sense_dma_pool;
1101 1236
@@ -1120,11 +1255,25 @@ struct megasas_instance {
1120 struct tasklet_struct isr_tasklet; 1255 struct tasklet_struct isr_tasklet;
1121 1256
1122 u8 flag; 1257 u8 flag;
1258 u8 unload;
1259 u8 flag_ieee;
1123 unsigned long last_time; 1260 unsigned long last_time;
1124 1261
1125 struct timer_list io_completion_timer; 1262 struct timer_list io_completion_timer;
1126}; 1263};
1127 1264
1265struct megasas_instance_template {
1266 void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \
1267 u32, struct megasas_register_set __iomem *);
1268
1269 void (*enable_intr)(struct megasas_register_set __iomem *) ;
1270 void (*disable_intr)(struct megasas_register_set __iomem *);
1271
1272 int (*clear_intr)(struct megasas_register_set __iomem *);
1273
1274 u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
1275};
1276
1128#define MEGASAS_IS_LOGICAL(scp) \ 1277#define MEGASAS_IS_LOGICAL(scp) \
1129 (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 1278 (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
1130 1279
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 11aa917629ac..a1c97e88068a 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -23,7 +23,6 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/slab.h>
27#include <linux/blkdev.h> 26#include <linux/blkdev.h>
28#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
29#include <linux/stat.h> 28#include <linux/stat.h>
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index 70c4c2467dd8..ba8e128de238 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -44,6 +44,7 @@ config SCSI_MPT2SAS
44 tristate "LSI MPT Fusion SAS 2.0 Device Driver" 44 tristate "LSI MPT Fusion SAS 2.0 Device Driver"
45 depends on PCI && SCSI 45 depends on PCI && SCSI
46 select SCSI_SAS_ATTRS 46 select SCSI_SAS_ATTRS
47 select RAID_ATTRS
47 ---help--- 48 ---help---
48 This driver supports PCI-Express SAS 6Gb/s Host Adapters. 49 This driver supports PCI-Express SAS 6Gb/s Host Adapters.
49 50
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index f9f6c0839276..9958d847a88d 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.12 11 * mpi2.h Version: 02.00.14
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -52,6 +52,11 @@
52 * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those 52 * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
53 * bytes reserved. 53 * bytes reserved.
54 * Added RAID Accelerator functionality. 54 * Added RAID Accelerator functionality.
55 * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
56 * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
57 * Added MSI-x index mask and shift for Reply Post Host
58 * Index register.
59 * Added function code for Host Based Discovery Action.
55 * -------------------------------------------------------------------------- 60 * --------------------------------------------------------------------------
56 */ 61 */
57 62
@@ -77,7 +82,7 @@
77#define MPI2_VERSION_02_00 (0x0200) 82#define MPI2_VERSION_02_00 (0x0200)
78 83
79/* versioning for this MPI header set */ 84/* versioning for this MPI header set */
80#define MPI2_HEADER_VERSION_UNIT (0x0C) 85#define MPI2_HEADER_VERSION_UNIT (0x0E)
81#define MPI2_HEADER_VERSION_DEV (0x00) 86#define MPI2_HEADER_VERSION_DEV (0x00)
82#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 87#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
83#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 88#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -231,9 +236,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
231#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) 236#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
232 237
233/* 238/*
234 * Offset for the Reply Descriptor Post Queue 239 * Defines for the Reply Descriptor Post Queue
235 */ 240 */
236#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) 241#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
242#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
243#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
244#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
237 245
238/* 246/*
239 * Defines for the HCBSize and address 247 * Defines for the HCBSize and address
@@ -496,12 +504,13 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
496#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */ 504#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
497#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */ 505#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
498#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/ 506#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/
507/* Host Based Discovery Action */
508#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
499 509
500 510
501 511
502/* Doorbell functions */ 512/* Doorbell functions */
503#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) 513#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
504/* #define MPI2_FUNCTION_IO_UNIT_RESET (0x41) */
505#define MPI2_FUNCTION_HANDSHAKE (0x42) 514#define MPI2_FUNCTION_HANDSHAKE (0x42)
506 515
507 516
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index ab47c4679640..cf0ac9f40c97 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.11 9 * mpi2_cnfg.h Version: 02.00.13
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -100,6 +100,15 @@
100 * Added expander reduced functionality data to SAS 100 * Added expander reduced functionality data to SAS
101 * Expander Page 0. 101 * Expander Page 0.
102 * Added SAS PHY Page 2 and SAS PHY Page 3. 102 * Added SAS PHY Page 2 and SAS PHY Page 3.
103 * 07-30-09 02.00.12 Added IO Unit Page 7.
104 * Added new device ids.
105 * Added SAS IO Unit Page 5.
106 * Added partial and slumber power management capable flags
107 * to SAS Device Page 0 Flags field.
108 * Added PhyInfo defines for power condition.
109 * Added Ethernet configuration pages.
110 * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
111 * Added SAS PHY Page 4 structure and defines.
103 * -------------------------------------------------------------------------- 112 * --------------------------------------------------------------------------
104 */ 113 */
105 114
@@ -182,6 +191,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
182#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) 191#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
183#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) 192#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
184#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) 193#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
194#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
185 195
186 196
187/***************************************************************************** 197/*****************************************************************************
@@ -268,6 +278,14 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
268#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) 278#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF)
269 279
270 280
281/* Ethernet PageAddress format */
282#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000)
283#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000)
284
285#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
286
287
288
271/**************************************************************************** 289/****************************************************************************
272* Configuration messages 290* Configuration messages
273****************************************************************************/ 291****************************************************************************/
@@ -349,6 +367,15 @@ typedef struct _MPI2_CONFIG_REPLY
349#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) 367#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
350#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) 368#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
351 369
370#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
371#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
372#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
373#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
374#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
375#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
376#define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086)
377#define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087)
378
352 379
353/* Manufacturing Page 0 */ 380/* Manufacturing Page 0 */
354 381
@@ -687,6 +714,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
687#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) 714#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
688 715
689/* IO Unit Page 1 Flags defines */ 716/* IO Unit Page 1 Flags defines */
717#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
690#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) 718#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
691#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) 719#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
692#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) 720#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
@@ -787,6 +815,56 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 {
787#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) 815#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001)
788 816
789 817
818/* IO Unit Page 7 */
819
820typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
821 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
822 U16 Reserved1; /* 0x04 */
823 U8 PCIeWidth; /* 0x06 */
824 U8 PCIeSpeed; /* 0x07 */
825 U32 ProcessorState; /* 0x08 */
826 U32 Reserved2; /* 0x0C */
827 U16 IOCTemperature; /* 0x10 */
828 U8 IOCTemperatureUnits; /* 0x12 */
829 U8 IOCSpeed; /* 0x13 */
830 U32 Reserved3; /* 0x14 */
831} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
832 Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
833
834#define MPI2_IOUNITPAGE7_PAGEVERSION (0x00)
835
836/* defines for IO Unit Page 7 PCIeWidth field */
837#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
838#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
839#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
840#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
841
842/* defines for IO Unit Page 7 PCIeSpeed field */
843#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
844#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
845#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
846
847/* defines for IO Unit Page 7 ProcessorState field */
848#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
849#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0)
850
851#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00)
852#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
853#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
854
855/* defines for IO Unit Page 7 IOCTemperatureUnits field */
856#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
857#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
858#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02)
859
860/* defines for IO Unit Page 7 IOCSpeed field */
861#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01)
862#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02)
863#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
864#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
865
866
867
790/**************************************************************************** 868/****************************************************************************
791* IOC Config Pages 869* IOC Config Pages
792****************************************************************************/ 870****************************************************************************/
@@ -1470,6 +1548,12 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
1470 1548
1471/* values for PhyInfo fields */ 1549/* values for PhyInfo fields */
1472#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) 1550#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
1551
1552#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
1553#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
1554#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
1555#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
1556
1473#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) 1557#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000)
1474#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) 1558#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000)
1475#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) 1559#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000)
@@ -1682,11 +1766,11 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1
1682/* values for SAS IO Unit Page 1 PortFlags */ 1766/* values for SAS IO Unit Page 1 PortFlags */
1683#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) 1767#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
1684 1768
1685/* values for SAS IO Unit Page 2 PhyFlags */ 1769/* values for SAS IO Unit Page 1 PhyFlags */
1686#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) 1770#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
1687#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) 1771#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
1688 1772
1689/* values for SAS IO Unit Page 0 MaxMinLinkRate */ 1773/* values for SAS IO Unit Page 1 MaxMinLinkRate */
1690#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) 1774#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0)
1691#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) 1775#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80)
1692#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) 1776#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
@@ -1745,6 +1829,74 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4
1745#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) 1829#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03)
1746 1830
1747 1831
1832/* SAS IO Unit Page 5 */
1833
1834typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
1835 U8 ControlFlags; /* 0x00 */
1836 U8 Reserved1; /* 0x01 */
1837 U16 InactivityTimerExponent; /* 0x02 */
1838 U8 SATAPartialTimeout; /* 0x04 */
1839 U8 Reserved2; /* 0x05 */
1840 U8 SATASlumberTimeout; /* 0x06 */
1841 U8 Reserved3; /* 0x07 */
1842 U8 SASPartialTimeout; /* 0x08 */
1843 U8 Reserved4; /* 0x09 */
1844 U8 SASSlumberTimeout; /* 0x0A */
1845 U8 Reserved5; /* 0x0B */
1846} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
1847 MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
1848 Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t;
1849
1850/* defines for ControlFlags field */
1851#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08)
1852#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04)
1853#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
1854#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
1855
1856/* defines for InactivityTimerExponent field */
1857#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
1858#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
1859#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700)
1860#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8)
1861#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070)
1862#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4)
1863#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007)
1864#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0)
1865
1866#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7)
1867#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6)
1868#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5)
1869#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4)
1870#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3)
1871#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2)
1872#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1)
1873#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0)
1874
1875/*
1876 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1877 * one and check Header.ExtPageLength or NumPhys at runtime.
1878 */
1879#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
1880#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
1881#endif
1882
1883typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
1884 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1885 U8 NumPhys; /* 0x08 */
1886 U8 Reserved1; /* 0x09 */
1887 U16 Reserved2; /* 0x0A */
1888 U32 Reserved3; /* 0x0C */
1889 MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS SASPhyPowerManagementSettings
1890 [MPI2_SAS_IOUNIT5_PHY_MAX]; /* 0x10 */
1891} MPI2_CONFIG_PAGE_SASIOUNIT_5,
1892 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
1893 Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t;
1894
1895#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x00)
1896
1897
1898
1899
1748/**************************************************************************** 1900/****************************************************************************
1749* SAS Expander Config Pages 1901* SAS Expander Config Pages
1750****************************************************************************/ 1902****************************************************************************/
@@ -1927,6 +2079,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
1927/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ 2079/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
1928 2080
1929/* values for SAS Device Page 0 Flags field */ 2081/* values for SAS Device Page 0 Flags field */
2082#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000)
2083#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800)
1930#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) 2084#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
1931#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) 2085#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
1932#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) 2086#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
@@ -2140,6 +2294,26 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
2140#define MPI2_SASPHY3_PAGEVERSION (0x00) 2294#define MPI2_SASPHY3_PAGEVERSION (0x00)
2141 2295
2142 2296
2297/* SAS PHY Page 4 */
2298
2299typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
2300 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2301 U16 Reserved1; /* 0x08 */
2302 U8 Reserved2; /* 0x0A */
2303 U8 Flags; /* 0x0B */
2304 U8 InitialFrame[28]; /* 0x0C */
2305} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
2306 Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t;
2307
2308#define MPI2_SASPHY4_PAGEVERSION (0x00)
2309
2310/* values for the Flags field */
2311#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
2312#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
2313
2314
2315
2316
2143/**************************************************************************** 2317/****************************************************************************
2144* SAS Port Config Pages 2318* SAS Port Config Pages
2145****************************************************************************/ 2319****************************************************************************/
@@ -2343,5 +2517,122 @@ typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0
2343#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) 2517#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F)
2344 2518
2345 2519
2520/****************************************************************************
2521* Ethernet Config Pages
2522****************************************************************************/
2523
2524/* Ethernet Page 0 */
2525
2526/* IP address (union of IPv4 and IPv6) */
2527typedef union _MPI2_ETHERNET_IP_ADDR {
2528 U32 IPv4Addr;
2529 U32 IPv6Addr[4];
2530} MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR,
2531 Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t;
2532
2533#define MPI2_ETHERNET_HOST_NAME_LENGTH (32)
2534
2535typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 {
2536 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2537 U8 NumInterfaces; /* 0x08 */
2538 U8 Reserved0; /* 0x09 */
2539 U16 Reserved1; /* 0x0A */
2540 U32 Status; /* 0x0C */
2541 U8 MediaState; /* 0x10 */
2542 U8 Reserved2; /* 0x11 */
2543 U16 Reserved3; /* 0x12 */
2544 U8 MacAddress[6]; /* 0x14 */
2545 U8 Reserved4; /* 0x1A */
2546 U8 Reserved5; /* 0x1B */
2547 MPI2_ETHERNET_IP_ADDR IpAddress; /* 0x1C */
2548 MPI2_ETHERNET_IP_ADDR SubnetMask; /* 0x2C */
2549 MPI2_ETHERNET_IP_ADDR GatewayIpAddress; /* 0x3C */
2550 MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /* 0x4C */
2551 MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /* 0x5C */
2552 MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /* 0x6C */
2553 U8 HostName
2554 [MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */
2555} MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0,
2556 Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t;
2557
2558#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00)
2559
2560/* values for Ethernet Page 0 Status field */
2561#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000)
2562#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000)
2563#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000)
2564#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100)
2565#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080)
2566#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040)
2567#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020)
2568#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010)
2569#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008)
2570#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004)
2571#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002)
2572#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001)
2573
2574/* values for Ethernet Page 0 MediaState field */
2575#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80)
2576#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00)
2577#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80)
2578
2579#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07)
2580#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00)
2581#define MPI2_ETHPG0_MS_10MBIT (0x01)
2582#define MPI2_ETHPG0_MS_100MBIT (0x02)
2583#define MPI2_ETHPG0_MS_1GBIT (0x03)
2584
2585
2586/* Ethernet Page 1 */
2587
2588typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
2589 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2590 U32 Reserved0; /* 0x08 */
2591 U32 Flags; /* 0x0C */
2592 U8 MediaState; /* 0x10 */
2593 U8 Reserved1; /* 0x11 */
2594 U16 Reserved2; /* 0x12 */
2595 U8 MacAddress[6]; /* 0x14 */
2596 U8 Reserved3; /* 0x1A */
2597 U8 Reserved4; /* 0x1B */
2598 MPI2_ETHERNET_IP_ADDR StaticIpAddress; /* 0x1C */
2599 MPI2_ETHERNET_IP_ADDR StaticSubnetMask; /* 0x2C */
2600 MPI2_ETHERNET_IP_ADDR StaticGatewayIpAddress; /* 0x3C */
2601 MPI2_ETHERNET_IP_ADDR StaticDNS1IpAddress; /* 0x4C */
2602 MPI2_ETHERNET_IP_ADDR StaticDNS2IpAddress; /* 0x5C */
2603 U32 Reserved5; /* 0x6C */
2604 U32 Reserved6; /* 0x70 */
2605 U32 Reserved7; /* 0x74 */
2606 U32 Reserved8; /* 0x78 */
2607 U8 HostName
2608 [MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */
2609} MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1,
2610 Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t;
2611
2612#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00)
2613
2614/* values for Ethernet Page 1 Flags field */
2615#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100)
2616#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080)
2617#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040)
2618#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020)
2619#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010)
2620#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008)
2621#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004)
2622#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002)
2623#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001)
2624
2625/* values for Ethernet Page 1 MediaState field */
2626#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80)
2627#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00)
2628#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80)
2629
2630#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07)
2631#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00)
2632#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01)
2633#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02)
2634#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
2635
2636
2346#endif 2637#endif
2347 2638
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index 65fcaa31cb30..c4adf76b49d9 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -5,23 +5,24 @@
5 Copyright (c) 2000-2009 LSI Corporation. 5 Copyright (c) 2000-2009 LSI Corporation.
6 6
7 --------------------------------------- 7 ---------------------------------------
8 Header Set Release Version: 02.00.12 8 Header Set Release Version: 02.00.14
9 Header Set Release Date: 05-06-09 9 Header Set Release Date: 10-28-09
10 --------------------------------------- 10 ---------------------------------------
11 11
12 Filename Current version Prior version 12 Filename Current version Prior version
13 ---------- --------------- ------------- 13 ---------- --------------- -------------
14 mpi2.h 02.00.12 02.00.11 14 mpi2.h 02.00.14 02.00.13
15 mpi2_cnfg.h 02.00.11 02.00.10 15 mpi2_cnfg.h 02.00.13 02.00.12
16 mpi2_init.h 02.00.07 02.00.06 16 mpi2_init.h 02.00.08 02.00.07
17 mpi2_ioc.h 02.00.11 02.00.10 17 mpi2_ioc.h 02.00.13 02.00.12
18 mpi2_raid.h 02.00.03 02.00.03 18 mpi2_raid.h 02.00.04 02.00.04
19 mpi2_sas.h 02.00.02 02.00.02 19 mpi2_sas.h 02.00.03 02.00.02
20 mpi2_targ.h 02.00.03 02.00.03 20 mpi2_targ.h 02.00.03 02.00.03
21 mpi2_tool.h 02.00.03 02.00.02 21 mpi2_tool.h 02.00.04 02.00.04
22 mpi2_type.h 02.00.00 02.00.00 22 mpi2_type.h 02.00.00 02.00.00
23 mpi2_ra.h 02.00.00 23 mpi2_ra.h 02.00.00 02.00.00
24 mpi2_history.txt 02.00.11 02.00.12 24 mpi2_hbd.h 02.00.00
25 mpi2_history.txt 02.00.14 02.00.13
25 26
26 27
27 * Date Version Description 28 * Date Version Description
@@ -65,6 +66,11 @@ mpi2.h
65 * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those 66 * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
66 * bytes reserved. 67 * bytes reserved.
67 * Added RAID Accelerator functionality. 68 * Added RAID Accelerator functionality.
69 * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
70 * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
71 * Added MSI-x index mask and shift for Reply Post Host
72 * Index register.
73 * Added function code for Host Based Discovery Action.
68 * -------------------------------------------------------------------------- 74 * --------------------------------------------------------------------------
69 75
70mpi2_cnfg.h 76mpi2_cnfg.h
@@ -155,6 +161,15 @@ mpi2_cnfg.h
155 * Added expander reduced functionality data to SAS 161 * Added expander reduced functionality data to SAS
156 * Expander Page 0. 162 * Expander Page 0.
157 * Added SAS PHY Page 2 and SAS PHY Page 3. 163 * Added SAS PHY Page 2 and SAS PHY Page 3.
164 * 07-30-09 02.00.12 Added IO Unit Page 7.
165 * Added new device ids.
166 * Added SAS IO Unit Page 5.
167 * Added partial and slumber power management capable flags
168 * to SAS Device Page 0 Flags field.
169 * Added PhyInfo defines for power condition.
170 * Added Ethernet configuration pages.
171 * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
172 * Added SAS PHY Page 4 structure and defines.
158 * -------------------------------------------------------------------------- 173 * --------------------------------------------------------------------------
159 174
160mpi2_init.h 175mpi2_init.h
@@ -172,6 +187,10 @@ mpi2_init.h
172 * Query Asynchronous Event. 187 * Query Asynchronous Event.
173 * Defined two new bits in the SlotStatus field of the SCSI 188 * Defined two new bits in the SlotStatus field of the SCSI
174 * Enclosure Processor Request and Reply. 189 * Enclosure Processor Request and Reply.
190 * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
191 * both SCSI IO Error Reply and SCSI Task Management Reply.
192 * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
193 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
175 * -------------------------------------------------------------------------- 194 * --------------------------------------------------------------------------
176 195
177mpi2_ioc.h 196mpi2_ioc.h
@@ -246,6 +265,20 @@ mpi2_ioc.h
246 * Added two new reason codes for SAS Device Status Change 265 * Added two new reason codes for SAS Device Status Change
247 * Event. 266 * Event.
248 * Added new event: SAS PHY Counter. 267 * Added new event: SAS PHY Counter.
268 * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
269 * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
270 * Added new product id family for 2208.
271 * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
272 * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
273 * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
274 * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
275 * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
276 * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
277 * Added Host Based Discovery Phy Event data.
278 * Added defines for ProductID Product field
279 * (MPI2_FW_HEADER_PID_).
280 * Modified values for SAS ProductID Family
281 * (MPI2_FW_HEADER_PID_FAMILY_).
249 * -------------------------------------------------------------------------- 282 * --------------------------------------------------------------------------
250 283
251mpi2_raid.h 284mpi2_raid.h
@@ -256,6 +289,8 @@ mpi2_raid.h
256 * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that 289 * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
257 * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT 290 * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
258 * can be sized by the build environment. 291 * can be sized by the build environment.
292 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
293 * VolumeCreationFlags and marked the old one as obsolete.
259 * -------------------------------------------------------------------------- 294 * --------------------------------------------------------------------------
260 295
261mpi2_sas.h 296mpi2_sas.h
@@ -264,6 +299,8 @@ mpi2_sas.h
264 * Control Request. 299 * Control Request.
265 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control 300 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
266 * Request. 301 * Request.
302 * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
303 * to MPI2_SGE_IO_UNION since it supports chained SGLs.
267 * -------------------------------------------------------------------------- 304 * --------------------------------------------------------------------------
268 305
269mpi2_targ.h 306mpi2_targ.h
@@ -283,6 +320,10 @@ mpi2_tool.h
283 * structures and defines. 320 * structures and defines.
284 * 02-29-08 02.00.02 Modified various names to make them 32-character unique. 321 * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
285 * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. 322 * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
323 * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
324 * and reply messages.
325 * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
326 * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
286 * -------------------------------------------------------------------------- 327 * --------------------------------------------------------------------------
287 328
288mpi2_type.h 329mpi2_type.h
@@ -293,20 +334,26 @@ mpi2_ra.h
293 * 05-06-09 02.00.00 Initial version. 334 * 05-06-09 02.00.00 Initial version.
294 * -------------------------------------------------------------------------- 335 * --------------------------------------------------------------------------
295 336
337mpi2_hbd.h
338 * 10-28-09 02.00.00 Initial version.
339 * --------------------------------------------------------------------------
340
341
296mpi2_history.txt Parts list history 342mpi2_history.txt Parts list history
297 343
298Filename 02.00.12 344Filename 02.00.14 02.00.13 02.00.12
299---------- -------- 345---------- -------- -------- --------
300mpi2.h 02.00.12 346mpi2.h 02.00.14 02.00.13 02.00.12
301mpi2_cnfg.h 02.00.11 347mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
302mpi2_init.h 02.00.07 348mpi2_init.h 02.00.08 02.00.07 02.00.07
303mpi2_ioc.h 02.00.11 349mpi2_ioc.h 02.00.13 02.00.12 02.00.11
304mpi2_raid.h 02.00.03 350mpi2_raid.h 02.00.04 02.00.04 02.00.03
305mpi2_sas.h 02.00.02 351mpi2_sas.h 02.00.03 02.00.02 02.00.02
306mpi2_targ.h 02.00.03 352mpi2_targ.h 02.00.03 02.00.03 02.00.03
307mpi2_tool.h 02.00.03 353mpi2_tool.h 02.00.04 02.00.04 02.00.03
308mpi2_type.h 02.00.00 354mpi2_type.h 02.00.00 02.00.00 02.00.00
309mpi2_ra.h 02.00.00 355mpi2_ra.h 02.00.00 02.00.00 02.00.00
356mpi2_hbd.h 02.00.00
310 357
311Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 358Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
312---------- -------- -------- -------- -------- -------- -------- 359---------- -------- -------- -------- -------- -------- --------
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 563e56d2e945..6541945e97c3 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.07 9 * mpi2_init.h Version: 02.00.08
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -27,6 +27,10 @@
27 * Query Asynchronous Event. 27 * Query Asynchronous Event.
28 * Defined two new bits in the SlotStatus field of the SCSI 28 * Defined two new bits in the SlotStatus field of the SCSI
29 * Enclosure Processor Request and Reply. 29 * Enclosure Processor Request and Reply.
30 * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
31 * both SCSI IO Error Reply and SCSI Task Management Reply.
32 * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
33 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
30 * -------------------------------------------------------------------------- 34 * --------------------------------------------------------------------------
31 */ 35 */
32 36
@@ -254,6 +258,11 @@ typedef struct _MPI2_SCSI_IO_REPLY
254#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) 258#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
255#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) 259#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
256 260
261/* masks and shifts for the ResponseInfo field */
262
263#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
264#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
265
257#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) 266#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
258 267
259 268
@@ -327,6 +336,7 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
327 U16 IOCStatus; /* 0x0E */ 336 U16 IOCStatus; /* 0x0E */
328 U32 IOCLogInfo; /* 0x10 */ 337 U32 IOCLogInfo; /* 0x10 */
329 U32 TerminationCount; /* 0x14 */ 338 U32 TerminationCount; /* 0x14 */
339 U32 ResponseInfo; /* 0x18 */
330} MPI2_SCSI_TASK_MANAGE_REPLY, 340} MPI2_SCSI_TASK_MANAGE_REPLY,
331 MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY, 341 MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
332 Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t; 342 Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t;
@@ -339,8 +349,20 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
339#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) 349#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
340#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) 350#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
341#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) 351#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
352#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
342#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) 353#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
343 354
355/* masks and shifts for the ResponseInfo field */
356
357#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
358#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
359#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
360#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
361#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
362#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
363#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
364#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
365
344 366
345/**************************************************************************** 367/****************************************************************************
346* SCSI Enclosure Processor messages 368* SCSI Enclosure Processor messages
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index c294128bdeb4..754938422f6a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.11 9 * mpi2_ioc.h Version: 02.00.13
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -84,6 +84,20 @@
84 * Added two new reason codes for SAS Device Status Change 84 * Added two new reason codes for SAS Device Status Change
85 * Event. 85 * Event.
86 * Added new event: SAS PHY Counter. 86 * Added new event: SAS PHY Counter.
87 * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
88 * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
89 * Added new product id family for 2208.
90 * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
91 * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
92 * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
93 * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
94 * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
95 * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
96 * Added Host Based Discovery Phy Event data.
97 * Added defines for ProductID Product field
98 * (MPI2_FW_HEADER_PID_).
99 * Modified values for SAS ProductID Family
100 * (MPI2_FW_HEADER_PID_FAMILY_).
87 * -------------------------------------------------------------------------- 101 * --------------------------------------------------------------------------
88 */ 102 */
89 103
@@ -116,8 +130,10 @@ typedef struct _MPI2_IOC_INIT_REQUEST
116 U16 MsgVersion; /* 0x0C */ 130 U16 MsgVersion; /* 0x0C */
117 U16 HeaderVersion; /* 0x0E */ 131 U16 HeaderVersion; /* 0x0E */
118 U32 Reserved5; /* 0x10 */ 132 U32 Reserved5; /* 0x10 */
119 U32 Reserved6; /* 0x14 */ 133 U16 Reserved6; /* 0x14 */
120 U16 Reserved7; /* 0x18 */ 134 U8 Reserved7; /* 0x16 */
135 U8 HostMSIxVectors; /* 0x17 */
136 U16 Reserved8; /* 0x18 */
121 U16 SystemRequestFrameSize; /* 0x1A */ 137 U16 SystemRequestFrameSize; /* 0x1A */
122 U16 ReplyDescriptorPostQueueDepth; /* 0x1C */ 138 U16 ReplyDescriptorPostQueueDepth; /* 0x1C */
123 U16 ReplyFreeQueueDepth; /* 0x1E */ 139 U16 ReplyFreeQueueDepth; /* 0x1E */
@@ -212,7 +228,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
212 U8 MaxChainDepth; /* 0x14 */ 228 U8 MaxChainDepth; /* 0x14 */
213 U8 WhoInit; /* 0x15 */ 229 U8 WhoInit; /* 0x15 */
214 U8 NumberOfPorts; /* 0x16 */ 230 U8 NumberOfPorts; /* 0x16 */
215 U8 Reserved2; /* 0x17 */ 231 U8 MaxMSIxVectors; /* 0x17 */
216 U16 RequestCredit; /* 0x18 */ 232 U16 RequestCredit; /* 0x18 */
217 U16 ProductID; /* 0x1A */ 233 U16 ProductID; /* 0x1A */
218 U32 IOCCapabilities; /* 0x1C */ 234 U32 IOCCapabilities; /* 0x1C */
@@ -230,7 +246,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY
230 U8 MaxVolumes; /* 0x37 */ 246 U8 MaxVolumes; /* 0x37 */
231 U16 MaxDevHandle; /* 0x38 */ 247 U16 MaxDevHandle; /* 0x38 */
232 U16 MaxPersistentEntries; /* 0x3A */ 248 U16 MaxPersistentEntries; /* 0x3A */
233 U32 Reserved4; /* 0x3C */ 249 U16 MinDevHandle; /* 0x3C */
250 U16 Reserved4; /* 0x3E */
234} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY, 251} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY,
235 Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t; 252 Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t;
236 253
@@ -266,6 +283,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
266/* ProductID field uses MPI2_FW_HEADER_PID_ */ 283/* ProductID field uses MPI2_FW_HEADER_PID_ */
267 284
268/* IOCCapabilities */ 285/* IOCCapabilities */
286#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
269#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) 287#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
270#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) 288#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
271#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) 289#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
@@ -274,6 +292,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
274#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) 292#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
275#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) 293#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
276#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) 294#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040)
295#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
277#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) 296#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
278#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) 297#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
279#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) 298#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
@@ -448,6 +467,8 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
448#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) 467#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
449#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) 468#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
450#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) 469#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
470#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
471#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
451 472
452 473
453/* Log Entry Added Event data */ 474/* Log Entry Added Event data */
@@ -469,6 +490,16 @@ typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED
469 MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, 490 MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
470 Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t; 491 Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t;
471 492
493/* GPIO Interrupt Event data */
494
495typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
496 U8 GPIONum; /* 0x00 */
497 U8 Reserved1; /* 0x01 */
498 U16 Reserved2; /* 0x02 */
499} MPI2_EVENT_DATA_GPIO_INTERRUPT,
500 MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
501 Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t;
502
472/* Hard Reset Received Event data */ 503/* Hard Reset Received Event data */
473 504
474typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED 505typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
@@ -778,6 +809,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
778 MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t; 809 MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t;
779 810
780/* values for the ExpStatus field */ 811/* values for the ExpStatus field */
812#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
781#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) 813#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
782#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) 814#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
783#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) 815#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
@@ -863,6 +895,44 @@ typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
863 * */ 895 * */
864 896
865 897
898/* Host Based Discovery Phy Event data */
899
900typedef struct _MPI2_EVENT_HBD_PHY_SAS {
901 U8 Flags; /* 0x00 */
902 U8 NegotiatedLinkRate; /* 0x01 */
903 U8 PhyNum; /* 0x02 */
904 U8 PhysicalPort; /* 0x03 */
905 U32 Reserved1; /* 0x04 */
906 U8 InitialFrame[28]; /* 0x08 */
907} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS,
908 Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t;
909
910/* values for the Flags field */
911#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
912#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
913
914/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for
915 * the NegotiatedLinkRate field */
916
917typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
918 MPI2_EVENT_HBD_PHY_SAS Sas;
919} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR,
920 Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t;
921
922typedef struct _MPI2_EVENT_DATA_HBD_PHY {
923 U8 DescriptorType; /* 0x00 */
924 U8 Reserved1; /* 0x01 */
925 U16 Reserved2; /* 0x02 */
926 U32 Reserved3; /* 0x04 */
927 MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */
928} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY,
929 Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t;
930
931/* values for the DescriptorType field */
932#define MPI2_EVENT_HBD_DT_SAS (0x01)
933
934
935
866/**************************************************************************** 936/****************************************************************************
867* EventAck message 937* EventAck message
868****************************************************************************/ 938****************************************************************************/
@@ -1111,12 +1181,17 @@ typedef struct _MPI2_FW_IMAGE_HEADER
1111#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) 1181#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
1112#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) 1182#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
1113 1183
1114#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) 1184#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
1115#define MPI2_FW_HEADER_PID_PROD_A (0x0000) 1185#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
1186#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
1187#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
1188#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
1189
1116 1190
1117#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) 1191#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
1118/* SAS */ 1192/* SAS */
1119#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010) 1193#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
1194#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
1120 1195
1121/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ 1196/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
1122 1197
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 7134816d9046..5160c33d2a00 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -6,7 +6,7 @@
6 * Title: MPI Integrated RAID messages and structures 6 * Title: MPI Integrated RAID messages and structures
7 * Creation Date: April 26, 2007 7 * Creation Date: April 26, 2007
8 * 8 *
9 * mpi2_raid.h Version: 02.00.03 9 * mpi2_raid.h Version: 02.00.04
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -20,6 +20,8 @@
20 * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that 20 * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
21 * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT 21 * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
22 * can be sized by the build environment. 22 * can be sized by the build environment.
23 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
24 * VolumeCreationFlags and marked the old one as obsolete.
23 * -------------------------------------------------------------------------- 25 * --------------------------------------------------------------------------
24 */ 26 */
25 27
@@ -217,10 +219,14 @@ typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT
217/* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ 219/* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
218 220
219/* defines for the VolumeCreationFlags field */ 221/* defines for the VolumeCreationFlags field */
222#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000)
223#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004)
224#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002)
225#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001)
226/* The following is an obsolete define.
227 * It must be shifted left 24 bits in order to set the proper bit.
228 */
220#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) 229#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
221#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x04)
222#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x02)
223#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x01)
224 230
225 231
226/* RAID Online Capacity Expansion Structure */ 232/* RAID Online Capacity Expansion Structure */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 8a42b136cf53..2d8aeed51392 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -6,7 +6,7 @@
6 * Title: MPI Serial Attached SCSI structures and definitions 6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: February 9, 2007 7 * Creation Date: February 9, 2007
8 * 8 *
9 * mpi2.h Version: 02.00.02 9 * mpi2.h Version: 02.00.03
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -18,6 +18,8 @@
18 * Control Request. 18 * Control Request.
19 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control 19 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
20 * Request. 20 * Request.
21 * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
22 * to MPI2_SGE_IO_UNION since it supports chained SGLs.
21 * -------------------------------------------------------------------------- 23 * --------------------------------------------------------------------------
22 */ 24 */
23 25
@@ -160,7 +162,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
160 U32 Reserved4; /* 0x14 */ 162 U32 Reserved4; /* 0x14 */
161 U32 DataLength; /* 0x18 */ 163 U32 DataLength; /* 0x18 */
162 U8 CommandFIS[20]; /* 0x1C */ 164 U8 CommandFIS[20]; /* 0x1C */
163 MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */ 165 MPI2_SGE_IO_UNION SGL; /* 0x20 */
164} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, 166} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
165 Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; 167 Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
166 168
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 007e950f7bfa..73fcdbf92632 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
6 * Title: MPI diagnostic tool structures and definitions 6 * Title: MPI diagnostic tool structures and definitions
7 * Creation Date: March 26, 2007 7 * Creation Date: March 26, 2007
8 * 8 *
9 * mpi2_tool.h Version: 02.00.03 9 * mpi2_tool.h Version: 02.00.04
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -18,6 +18,10 @@
18 * structures and defines. 18 * structures and defines.
19 * 02-29-08 02.00.02 Modified various names to make them 32-character unique. 19 * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
20 * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. 20 * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
21 * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
22 * and reply messages.
23 * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
24 * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
21 * -------------------------------------------------------------------------- 25 * --------------------------------------------------------------------------
22 */ 26 */
23 27
@@ -282,7 +286,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
282 286
283typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST 287typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
284{ 288{
285 U8 Reserved1; /* 0x00 */ 289 U8 ExtendedType; /* 0x00 */
286 U8 BufferType; /* 0x01 */ 290 U8 BufferType; /* 0x01 */
287 U8 ChainOffset; /* 0x02 */ 291 U8 ChainOffset; /* 0x02 */
288 U8 Function; /* 0x03 */ 292 U8 Function; /* 0x03 */
@@ -301,11 +305,15 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
301} MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST, 305} MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
302 Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t; 306 Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t;
303 307
308/* values for the ExtendedType field */
309#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02)
310
304/* values for the BufferType field */ 311/* values for the BufferType field */
305#define MPI2_DIAG_BUF_TYPE_TRACE (0x00) 312#define MPI2_DIAG_BUF_TYPE_TRACE (0x00)
306#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) 313#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01)
314#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02)
307/* count of the number of buffer types */ 315/* count of the number of buffer types */
308#define MPI2_DIAG_BUF_TYPE_COUNT (0x02) 316#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
309 317
310 318
311/**************************************************************************** 319/****************************************************************************
@@ -314,7 +322,7 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
314 322
315typedef struct _MPI2_DIAG_BUFFER_POST_REPLY 323typedef struct _MPI2_DIAG_BUFFER_POST_REPLY
316{ 324{
317 U8 Reserved1; /* 0x00 */ 325 U8 ExtendedType; /* 0x00 */
318 U8 BufferType; /* 0x01 */ 326 U8 BufferType; /* 0x01 */
319 U8 MsgLength; /* 0x02 */ 327 U8 MsgLength; /* 0x02 */
320 U8 Function; /* 0x03 */ 328 U8 Function; /* 0x03 */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 670241efa4b5..88e6eebc3159 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -57,6 +57,7 @@
57#include <linux/dma-mapping.h> 57#include <linux/dma-mapping.h>
58#include <linux/sort.h> 58#include <linux/sort.h>
59#include <linux/io.h> 59#include <linux/io.h>
60#include <linux/time.h>
60 61
61#include "mpt2sas_base.h" 62#include "mpt2sas_base.h"
62 63
@@ -77,6 +78,43 @@ static int msix_disable = -1;
77module_param(msix_disable, int, 0); 78module_param(msix_disable, int, 0);
78MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 79MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
79 80
81/* diag_buffer_enable is bitwise
82 * bit 0 set = TRACE
83 * bit 1 set = SNAPSHOT
84 * bit 2 set = EXTENDED
85 *
86 * Either bit can be set, or both
87 */
88static int diag_buffer_enable;
89module_param(diag_buffer_enable, int, 0);
90MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
91 "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
92
93int mpt2sas_fwfault_debug;
94MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
95 "and halt firmware - (default=0)");
96
97/**
98 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
99 *
100 */
101static int
102_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
103{
104 int ret = param_set_int(val, kp);
105 struct MPT2SAS_ADAPTER *ioc;
106
107 if (ret)
108 return ret;
109
110 printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
111 list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
112 ioc->fwfault_debug = mpt2sas_fwfault_debug;
113 return 0;
114}
115module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
116 param_get_int, &mpt2sas_fwfault_debug, 0644);
117
80/** 118/**
81 * _base_fault_reset_work - workq handling ioc fault conditions 119 * _base_fault_reset_work - workq handling ioc fault conditions
82 * @work: input argument, used to derive ioc 120 * @work: input argument, used to derive ioc
@@ -121,7 +159,7 @@ _base_fault_reset_work(struct work_struct *work)
121 159
122/** 160/**
123 * mpt2sas_base_start_watchdog - start the fault_reset_work_q 161 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
124 * @ioc: pointer to scsi command object 162 * @ioc: per adapter object
125 * Context: sleep. 163 * Context: sleep.
126 * 164 *
127 * Return nothing. 165 * Return nothing.
@@ -155,7 +193,7 @@ mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
155 193
156/** 194/**
157 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q 195 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
158 * @ioc: pointer to scsi command object 196 * @ioc: per adapter object
159 * Context: sleep. 197 * Context: sleep.
160 * 198 *
161 * Return nothing. 199 * Return nothing.
@@ -177,10 +215,55 @@ mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
177 } 215 }
178} 216}
179 217
218/**
219 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
220 * @ioc: per adapter object
221 * @fault_code: fault code
222 *
223 * Return nothing.
224 */
225void
226mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
227{
228 printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
229 ioc->name, fault_code);
230}
231
232/**
233 * mpt2sas_halt_firmware - halt's mpt controller firmware
234 * @ioc: per adapter object
235 *
236 * For debugging timeout related issues. Writing 0xCOFFEE00
237 * to the doorbell register will halt controller firmware. With
238 * the purpose to stop both driver and firmware, the enduser can
239 * obtain a ring buffer from controller UART.
240 */
241void
242mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
243{
244 u32 doorbell;
245
246 if (!ioc->fwfault_debug)
247 return;
248
249 dump_stack();
250
251 doorbell = readl(&ioc->chip->Doorbell);
252 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
253 mpt2sas_base_fault_info(ioc , doorbell);
254 else {
255 writel(0xC0FFEE00, &ioc->chip->Doorbell);
256 printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
257 "timeout\n", ioc->name);
258 }
259
260 panic("panic in %s\n", __func__);
261}
262
180#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 263#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
181/** 264/**
182 * _base_sas_ioc_info - verbose translation of the ioc status 265 * _base_sas_ioc_info - verbose translation of the ioc status
183 * @ioc: pointer to scsi command object 266 * @ioc: per adapter object
184 * @mpi_reply: reply mf payload returned from firmware 267 * @mpi_reply: reply mf payload returned from firmware
185 * @request_hdr: request mf 268 * @request_hdr: request mf
186 * 269 *
@@ -394,7 +477,7 @@ _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
394 477
395/** 478/**
396 * _base_display_event_data - verbose translation of firmware asyn events 479 * _base_display_event_data - verbose translation of firmware asyn events
397 * @ioc: pointer to scsi command object 480 * @ioc: per adapter object
398 * @mpi_reply: reply mf payload returned from firmware 481 * @mpi_reply: reply mf payload returned from firmware
399 * 482 *
400 * Return nothing. 483 * Return nothing.
@@ -474,7 +557,7 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
474 557
475/** 558/**
476 * _base_sas_log_info - verbose translation of firmware log info 559 * _base_sas_log_info - verbose translation of firmware log info
477 * @ioc: pointer to scsi command object 560 * @ioc: per adapter object
478 * @log_info: log info 561 * @log_info: log info
479 * 562 *
480 * Return nothing. 563 * Return nothing.
@@ -526,22 +609,8 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
526} 609}
527 610
528/** 611/**
529 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
530 * @ioc: pointer to scsi command object
531 * @fault_code: fault code
532 *
533 * Return nothing.
534 */
535void
536mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
537{
538 printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
539 ioc->name, fault_code);
540}
541
542/**
543 * _base_display_reply_info - 612 * _base_display_reply_info -
544 * @ioc: pointer to scsi command object 613 * @ioc: per adapter object
545 * @smid: system request message index 614 * @smid: system request message index
546 * @msix_index: MSIX table index supplied by the OS 615 * @msix_index: MSIX table index supplied by the OS
547 * @reply: reply message frame(lower 32bit addr) 616 * @reply: reply message frame(lower 32bit addr)
@@ -570,7 +639,7 @@ _base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
570 639
571/** 640/**
572 * mpt2sas_base_done - base internal command completion routine 641 * mpt2sas_base_done - base internal command completion routine
573 * @ioc: pointer to scsi command object 642 * @ioc: per adapter object
574 * @smid: system request message index 643 * @smid: system request message index
575 * @msix_index: MSIX table index supplied by the OS 644 * @msix_index: MSIX table index supplied by the OS
576 * @reply: reply message frame(lower 32bit addr) 645 * @reply: reply message frame(lower 32bit addr)
@@ -603,7 +672,7 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
603 672
604/** 673/**
605 * _base_async_event - main callback handler for firmware asyn events 674 * _base_async_event - main callback handler for firmware asyn events
606 * @ioc: pointer to scsi command object 675 * @ioc: per adapter object
607 * @msix_index: MSIX table index supplied by the OS 676 * @msix_index: MSIX table index supplied by the OS
608 * @reply: reply message frame(lower 32bit addr) 677 * @reply: reply message frame(lower 32bit addr)
609 * 678 *
@@ -684,7 +753,7 @@ _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
684 753
685/** 754/**
686 * _base_mask_interrupts - disable interrupts 755 * _base_mask_interrupts - disable interrupts
687 * @ioc: pointer to scsi command object 756 * @ioc: per adapter object
688 * 757 *
689 * Disabling ResetIRQ, Reply and Doorbell Interrupts 758 * Disabling ResetIRQ, Reply and Doorbell Interrupts
690 * 759 *
@@ -704,7 +773,7 @@ _base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
704 773
705/** 774/**
706 * _base_unmask_interrupts - enable interrupts 775 * _base_unmask_interrupts - enable interrupts
707 * @ioc: pointer to scsi command object 776 * @ioc: per adapter object
708 * 777 *
709 * Enabling only Reply Interrupts 778 * Enabling only Reply Interrupts
710 * 779 *
@@ -1152,6 +1221,8 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1152 u32 memap_sz; 1221 u32 memap_sz;
1153 u32 pio_sz; 1222 u32 pio_sz;
1154 int i, r = 0; 1223 int i, r = 0;
1224 u64 pio_chip = 0;
1225 u64 chip_phys = 0;
1155 1226
1156 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", 1227 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n",
1157 ioc->name, __func__)); 1228 ioc->name, __func__));
@@ -1185,12 +1256,13 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1185 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) { 1256 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
1186 if (pio_sz) 1257 if (pio_sz)
1187 continue; 1258 continue;
1188 ioc->pio_chip = pci_resource_start(pdev, i); 1259 pio_chip = (u64)pci_resource_start(pdev, i);
1189 pio_sz = pci_resource_len(pdev, i); 1260 pio_sz = pci_resource_len(pdev, i);
1190 } else { 1261 } else {
1191 if (memap_sz) 1262 if (memap_sz)
1192 continue; 1263 continue;
1193 ioc->chip_phys = pci_resource_start(pdev, i); 1264 ioc->chip_phys = pci_resource_start(pdev, i);
1265 chip_phys = (u64)ioc->chip_phys;
1194 memap_sz = pci_resource_len(pdev, i); 1266 memap_sz = pci_resource_len(pdev, i);
1195 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 1267 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1196 if (ioc->chip == NULL) { 1268 if (ioc->chip == NULL) {
@@ -1210,10 +1282,10 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1210 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", 1282 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1211 ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 1283 ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1212 "IO-APIC enabled"), ioc->pci_irq); 1284 "IO-APIC enabled"), ioc->pci_irq);
1213 printk(MPT2SAS_INFO_FMT "iomem(0x%lx), mapped(0x%p), size(%d)\n", 1285 printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1214 ioc->name, ioc->chip_phys, ioc->chip, memap_sz); 1286 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1215 printk(MPT2SAS_INFO_FMT "ioport(0x%lx), size(%d)\n", 1287 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1216 ioc->name, ioc->pio_chip, pio_sz); 1288 ioc->name, (unsigned long long)pio_chip, pio_sz);
1217 1289
1218 return 0; 1290 return 0;
1219 1291
@@ -1258,12 +1330,13 @@ mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1258 * @ioc: per adapter object 1330 * @ioc: per adapter object
1259 * @smid: system request message index 1331 * @smid: system request message index
1260 * 1332 *
1261 * Returns phys pointer to sense buffer. 1333 * Returns phys pointer to the low 32bit address of the sense buffer.
1262 */ 1334 */
1263dma_addr_t 1335__le32
1264mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid) 1336mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1265{ 1337{
1266 return ioc->sense_dma + ((smid - 1) * SCSI_SENSE_BUFFERSIZE); 1338 return cpu_to_le32(ioc->sense_dma +
1339 ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1267} 1340}
1268 1341
1269/** 1342/**
@@ -1697,6 +1770,12 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1697 } 1770 }
1698 1771
1699 if (ioc->facts.IOCCapabilities & 1772 if (ioc->facts.IOCCapabilities &
1773 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
1774 printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
1775 i++;
1776 }
1777
1778 if (ioc->facts.IOCCapabilities &
1700 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { 1779 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
1701 printk("%sTask Set Full", i ? "," : ""); 1780 printk("%sTask Set Full", i ? "," : "");
1702 i++; 1781 i++;
@@ -2871,6 +2950,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2871 Mpi2IOCInitRequest_t mpi_request; 2950 Mpi2IOCInitRequest_t mpi_request;
2872 Mpi2IOCInitReply_t mpi_reply; 2951 Mpi2IOCInitReply_t mpi_reply;
2873 int r; 2952 int r;
2953 struct timeval current_time;
2954 u16 ioc_status;
2874 2955
2875 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 2956 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2876 __func__)); 2957 __func__));
@@ -2921,6 +3002,13 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2921 cpu_to_le32(ioc->reply_post_free_dma); 3002 cpu_to_le32(ioc->reply_post_free_dma);
2922#endif 3003#endif
2923 3004
3005 /* This time stamp specifies number of milliseconds
3006 * since epoch ~ midnight January 1, 1970.
3007 */
3008 do_gettimeofday(&current_time);
3009 mpi_request.TimeStamp = (current_time.tv_sec * 1000) +
3010 (current_time.tv_usec >> 3);
3011
2924 if (ioc->logging_level & MPT_DEBUG_INIT) { 3012 if (ioc->logging_level & MPT_DEBUG_INIT) {
2925 u32 *mfp; 3013 u32 *mfp;
2926 int i; 3014 int i;
@@ -2943,7 +3031,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2943 return r; 3031 return r;
2944 } 3032 }
2945 3033
2946 if (mpi_reply.IOCStatus != MPI2_IOCSTATUS_SUCCESS || 3034 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3035 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
2947 mpi_reply.IOCLogInfo) { 3036 mpi_reply.IOCLogInfo) {
2948 printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__); 3037 printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
2949 r = -EIO; 3038 r = -EIO;
@@ -3461,11 +3550,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3461 return r; 3550 return r;
3462 3551
3463 pci_set_drvdata(ioc->pdev, ioc->shost); 3552 pci_set_drvdata(ioc->pdev, ioc->shost);
3464 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 3553 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
3465 if (r) 3554 if (r)
3466 goto out_free_resources; 3555 goto out_free_resources;
3467 3556
3468 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 3557 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3469 if (r) 3558 if (r)
3470 goto out_free_resources; 3559 goto out_free_resources;
3471 3560
@@ -3486,6 +3575,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3486 3575
3487 init_waitqueue_head(&ioc->reset_wq); 3576 init_waitqueue_head(&ioc->reset_wq);
3488 3577
3578 ioc->fwfault_debug = mpt2sas_fwfault_debug;
3579
3489 /* base internal command bits */ 3580 /* base internal command bits */
3490 mutex_init(&ioc->base_cmds.mutex); 3581 mutex_init(&ioc->base_cmds.mutex);
3491 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3582 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
@@ -3496,6 +3587,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3496 ioc->transport_cmds.status = MPT2_CMD_NOT_USED; 3587 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
3497 mutex_init(&ioc->transport_cmds.mutex); 3588 mutex_init(&ioc->transport_cmds.mutex);
3498 3589
3590 /* scsih internal command bits */
3591 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3592 ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
3593 mutex_init(&ioc->scsih_cmds.mutex);
3594
3499 /* task management internal command bits */ 3595 /* task management internal command bits */
3500 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3596 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3501 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 3597 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
@@ -3531,6 +3627,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3531 goto out_free_resources; 3627 goto out_free_resources;
3532 3628
3533 mpt2sas_base_start_watchdog(ioc); 3629 mpt2sas_base_start_watchdog(ioc);
3630 if (diag_buffer_enable != 0)
3631 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
3534 return 0; 3632 return 0;
3535 3633
3536 out_free_resources: 3634 out_free_resources:
@@ -3684,6 +3782,9 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3684 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 3782 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
3685 __func__)); 3783 __func__));
3686 3784
3785 if (mpt2sas_fwfault_debug)
3786 mpt2sas_halt_firmware(ioc);
3787
3687 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 3788 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3688 if (ioc->shost_recovery) { 3789 if (ioc->shost_recovery) {
3689 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 3790 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 0cf6bc236e4d..e18b0544c38f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,10 +69,10 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "02.100.03.00" 72#define MPT2SAS_DRIVER_VERSION "04.100.01.00"
73#define MPT2SAS_MAJOR_VERSION 02 73#define MPT2SAS_MAJOR_VERSION 04
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 03 75#define MPT2SAS_BUILD_VERSION 01
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
77 77
78/* 78/*
@@ -278,7 +278,7 @@ struct _internal_cmd {
278 * @sas_address: device sas address 278 * @sas_address: device sas address
279 * @device_name: retrieved from the SAS IDENTIFY frame. 279 * @device_name: retrieved from the SAS IDENTIFY frame.
280 * @handle: device handle 280 * @handle: device handle
281 * @parent_handle: handle to parent device 281 * @sas_address_parent: sas address of parent expander or sas host
282 * @enclosure_handle: enclosure handle 282 * @enclosure_handle: enclosure handle
283 * @enclosure_logical_id: enclosure logical identifier 283 * @enclosure_logical_id: enclosure logical identifier
284 * @volume_handle: volume handle (valid when hidden raid member) 284 * @volume_handle: volume handle (valid when hidden raid member)
@@ -296,7 +296,7 @@ struct _sas_device {
296 u64 sas_address; 296 u64 sas_address;
297 u64 device_name; 297 u64 device_name;
298 u16 handle; 298 u16 handle;
299 u16 parent_handle; 299 u64 sas_address_parent;
300 u16 enclosure_handle; 300 u16 enclosure_handle;
301 u64 enclosure_logical_id; 301 u64 enclosure_logical_id;
302 u16 volume_handle; 302 u16 volume_handle;
@@ -323,6 +323,7 @@ struct _sas_device {
323 * @device_info: bitfield provides detailed info about the hidden components 323 * @device_info: bitfield provides detailed info about the hidden components
324 * @num_pds: number of hidden raid components 324 * @num_pds: number of hidden raid components
325 * @responding: used in _scsih_raid_device_mark_responding 325 * @responding: used in _scsih_raid_device_mark_responding
326 * @percent_complete: resync percent complete
326 */ 327 */
327struct _raid_device { 328struct _raid_device {
328 struct list_head list; 329 struct list_head list;
@@ -336,6 +337,7 @@ struct _raid_device {
336 u32 device_info; 337 u32 device_info;
337 u8 num_pds; 338 u8 num_pds;
338 u8 responding; 339 u8 responding;
340 u8 percent_complete;
339}; 341};
340 342
341/** 343/**
@@ -352,8 +354,6 @@ struct _boot_device {
352/** 354/**
353 * struct _sas_port - wide/narrow sas port information 355 * struct _sas_port - wide/narrow sas port information
354 * @port_list: list of ports belonging to expander 356 * @port_list: list of ports belonging to expander
355 * @handle: device handle for this port
356 * @sas_address: sas address of this port
357 * @num_phys: number of phys belonging to this port 357 * @num_phys: number of phys belonging to this port
358 * @remote_identify: attached device identification 358 * @remote_identify: attached device identification
359 * @rphy: sas transport rphy object 359 * @rphy: sas transport rphy object
@@ -362,8 +362,6 @@ struct _boot_device {
362 */ 362 */
363struct _sas_port { 363struct _sas_port {
364 struct list_head port_list; 364 struct list_head port_list;
365 u16 handle;
366 u64 sas_address;
367 u8 num_phys; 365 u8 num_phys;
368 struct sas_identify remote_identify; 366 struct sas_identify remote_identify;
369 struct sas_rphy *rphy; 367 struct sas_rphy *rphy;
@@ -398,7 +396,7 @@ struct _sas_phy {
398 * @num_phys: number phys belonging to this sas_host/expander 396 * @num_phys: number phys belonging to this sas_host/expander
399 * @sas_address: sas address of this sas_host/expander 397 * @sas_address: sas address of this sas_host/expander
400 * @handle: handle for this sas_host/expander 398 * @handle: handle for this sas_host/expander
401 * @parent_handle: parent handle 399 * @sas_address_parent: sas address of parent expander or sas host
402 * @enclosure_handle: handle for this a member of an enclosure 400 * @enclosure_handle: handle for this a member of an enclosure
403 * @device_info: bitwise defining capabilities of this sas_host/expander 401 * @device_info: bitwise defining capabilities of this sas_host/expander
404 * @responding: used in _scsih_expander_device_mark_responding 402 * @responding: used in _scsih_expander_device_mark_responding
@@ -411,7 +409,7 @@ struct _sas_node {
411 u8 num_phys; 409 u8 num_phys;
412 u64 sas_address; 410 u64 sas_address;
413 u16 handle; 411 u16 handle;
414 u16 parent_handle; 412 u64 sas_address_parent;
415 u16 enclosure_handle; 413 u16 enclosure_handle;
416 u64 enclosure_logical_id; 414 u64 enclosure_logical_id;
417 u8 responding; 415 u8 responding;
@@ -468,8 +466,8 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
468 * @pdev: pci pdev object 466 * @pdev: pci pdev object
469 * @chip: memory mapped register space 467 * @chip: memory mapped register space
470 * @chip_phys: physical addrss prior to mapping 468 * @chip_phys: physical addrss prior to mapping
471 * @pio_chip: I/O mapped register space
472 * @logging_level: see mpt2sas_debug.h 469 * @logging_level: see mpt2sas_debug.h
470 * @fwfault_debug: debuging FW timeouts
473 * @ir_firmware: IR firmware present 471 * @ir_firmware: IR firmware present
474 * @bars: bitmask of BAR's that must be configured 472 * @bars: bitmask of BAR's that must be configured
475 * @mask_interrupts: ignore interrupt 473 * @mask_interrupts: ignore interrupt
@@ -495,12 +493,14 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
495 * @msix_table_backup: backup msix table 493 * @msix_table_backup: backup msix table
496 * @scsi_io_cb_idx: shost generated commands 494 * @scsi_io_cb_idx: shost generated commands
497 * @tm_cb_idx: task management commands 495 * @tm_cb_idx: task management commands
496 * @scsih_cb_idx: scsih internal commands
498 * @transport_cb_idx: transport internal commands 497 * @transport_cb_idx: transport internal commands
499 * @ctl_cb_idx: clt internal commands 498 * @ctl_cb_idx: clt internal commands
500 * @base_cb_idx: base internal commands 499 * @base_cb_idx: base internal commands
501 * @config_cb_idx: base internal commands 500 * @config_cb_idx: base internal commands
502 * @base_cmds: 501 * @base_cmds:
503 * @transport_cmds: 502 * @transport_cmds:
503 * @scsih_cmds:
504 * @tm_cmds: 504 * @tm_cmds:
505 * @ctl_cmds: 505 * @ctl_cmds:
506 * @config_cmds: 506 * @config_cmds:
@@ -588,9 +588,9 @@ struct MPT2SAS_ADAPTER {
588 char tmp_string[MPT_STRING_LENGTH]; 588 char tmp_string[MPT_STRING_LENGTH];
589 struct pci_dev *pdev; 589 struct pci_dev *pdev;
590 Mpi2SystemInterfaceRegs_t __iomem *chip; 590 Mpi2SystemInterfaceRegs_t __iomem *chip;
591 unsigned long chip_phys; 591 resource_size_t chip_phys;
592 unsigned long pio_chip;
593 int logging_level; 592 int logging_level;
593 int fwfault_debug;
594 u8 ir_firmware; 594 u8 ir_firmware;
595 int bars; 595 int bars;
596 u8 mask_interrupts; 596 u8 mask_interrupts;
@@ -626,6 +626,7 @@ struct MPT2SAS_ADAPTER {
626 u8 scsi_io_cb_idx; 626 u8 scsi_io_cb_idx;
627 u8 tm_cb_idx; 627 u8 tm_cb_idx;
628 u8 transport_cb_idx; 628 u8 transport_cb_idx;
629 u8 scsih_cb_idx;
629 u8 ctl_cb_idx; 630 u8 ctl_cb_idx;
630 u8 base_cb_idx; 631 u8 base_cb_idx;
631 u8 config_cb_idx; 632 u8 config_cb_idx;
@@ -633,6 +634,7 @@ struct MPT2SAS_ADAPTER {
633 u8 tm_sas_control_cb_idx; 634 u8 tm_sas_control_cb_idx;
634 struct _internal_cmd base_cmds; 635 struct _internal_cmd base_cmds;
635 struct _internal_cmd transport_cmds; 636 struct _internal_cmd transport_cmds;
637 struct _internal_cmd scsih_cmds;
636 struct _internal_cmd tm_cmds; 638 struct _internal_cmd tm_cmds;
637 struct _internal_cmd ctl_cmds; 639 struct _internal_cmd ctl_cmds;
638 struct _internal_cmd config_cmds; 640 struct _internal_cmd config_cmds;
@@ -773,7 +775,7 @@ int mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
773void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid); 775void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid);
774void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid); 776void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid);
775void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr); 777void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr);
776dma_addr_t mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, 778__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc,
777 u16 smid); 779 u16 smid);
778 780
779/* hi-priority queue */ 781/* hi-priority queue */
@@ -807,6 +809,8 @@ int mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
807 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request); 809 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request);
808void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type); 810void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type);
809 811
812void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
813
810/* scsih shared API */ 814/* scsih shared API */
811u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, 815u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
812 u32 reply); 816 u32 reply);
@@ -849,6 +853,8 @@ int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
849 *mpi_reply, Mpi2IOUnitPage1_t *config_page); 853 *mpi_reply, Mpi2IOUnitPage1_t *config_page);
850int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 854int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
851 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz); 855 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
856int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
857 Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
852int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 858int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
853 *mpi_reply, Mpi2IOCPage8_t *config_page); 859 *mpi_reply, Mpi2IOCPage8_t *config_page);
854int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 860int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -886,19 +892,22 @@ u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
886void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, 892void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
887 Mpi2EventNotificationReply_t *mpi_reply); 893 Mpi2EventNotificationReply_t *mpi_reply);
888 894
895void mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc,
896 u8 bits_to_regsiter);
897
889/* transport shared API */ 898/* transport shared API */
890u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 899u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
891 u32 reply); 900 u32 reply);
892struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, 901struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc,
893 u16 handle, u16 parent_handle); 902 u16 handle, u64 sas_address);
894void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, 903void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
895 u16 parent_handle); 904 u64 sas_address_parent);
896int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy 905int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
897 *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); 906 *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
898int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy 907int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
899 *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev); 908 *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev);
900void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle, 909void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
901 u16 attached_handle, u8 phy_number, u8 link_rate); 910 u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
902extern struct sas_function_template mpt2sas_transport_functions; 911extern struct sas_function_template mpt2sas_transport_functions;
903extern struct scsi_transport_template *mpt2sas_transport_template; 912extern struct scsi_transport_template *mpt2sas_transport_template;
904extern int scsi_internal_device_block(struct scsi_device *sdev); 913extern int scsi_internal_device_block(struct scsi_device *sdev);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 594a389c6526..cf44b355bc97 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -51,6 +51,7 @@
51#include <linux/workqueue.h> 51#include <linux/workqueue.h>
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/pci.h> 53#include <linux/pci.h>
54#include <linux/slab.h>
54 55
55#include "mpt2sas_base.h" 56#include "mpt2sas_base.h"
56 57
@@ -324,7 +325,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
324 if (r != 0) 325 if (r != 0)
325 goto out; 326 goto out;
326 if (mpi_request->Action == 327 if (mpi_request->Action ==
327 MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) { 328 MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
329 mpi_request->Action ==
330 MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
328 ioc->base_add_sg_single(&mpi_request->PageBufferSGE, 331 ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
329 MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, 332 MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
330 mem.page_dma); 333 mem.page_dma);
@@ -882,7 +885,7 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
882} 885}
883 886
884/** 887/**
885 * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 0 888 * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
886 * @ioc: per adapter object 889 * @ioc: per adapter object
887 * @mpi_reply: reply mf payload returned from firmware 890 * @mpi_reply: reply mf payload returned from firmware
888 * @config_page: contents of the config page 891 * @config_page: contents of the config page
@@ -907,7 +910,7 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
907 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 910 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
908 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; 911 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
909 mpi_request.Header.PageNumber = 1; 912 mpi_request.Header.PageNumber = 1;
910 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; 913 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
911 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 914 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
912 r = _config_request(ioc, &mpi_request, mpi_reply, 915 r = _config_request(ioc, &mpi_request, mpi_reply,
913 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); 916 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
@@ -922,6 +925,49 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
922} 925}
923 926
924/** 927/**
928 * mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1
929 * @ioc: per adapter object
930 * @mpi_reply: reply mf payload returned from firmware
931 * @config_page: contents of the config page
932 * @sz: size of buffer passed in config_page
933 * Context: sleep.
934 *
935 * Calling function should call config_get_number_hba_phys prior to
936 * this function, so enough memory is allocated for config_page.
937 *
938 * Returns 0 for success, non-zero for failure.
939 */
940int
941mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
942 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz)
943{
944 Mpi2ConfigRequest_t mpi_request;
945 int r;
946
947 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
948 mpi_request.Function = MPI2_FUNCTION_CONFIG;
949 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
950 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
951 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
952 mpi_request.Header.PageNumber = 1;
953 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
954 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
955 r = _config_request(ioc, &mpi_request, mpi_reply,
956 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
957 if (r)
958 goto out;
959
960 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
961 _config_request(ioc, &mpi_request, mpi_reply,
962 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
963 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
964 r = _config_request(ioc, &mpi_request, mpi_reply,
965 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
966 out:
967 return r;
968}
969
970/**
925 * mpt2sas_config_get_expander_pg0 - obtain expander page 0 971 * mpt2sas_config_get_expander_pg0 - obtain expander page 0
926 * @ioc: per adapter object 972 * @ioc: per adapter object
927 * @mpi_reply: reply mf payload returned from firmware 973 * @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 57d724633906..fa9bf83819d5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -740,7 +740,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
740 Mpi2SCSIIORequest_t *scsiio_request = 740 Mpi2SCSIIORequest_t *scsiio_request =
741 (Mpi2SCSIIORequest_t *)mpi_request; 741 (Mpi2SCSIIORequest_t *)mpi_request;
742 scsiio_request->SenseBufferLowAddress = 742 scsiio_request->SenseBufferLowAddress =
743 (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid); 743 mpt2sas_base_get_sense_buffer_dma(ioc, smid);
744 priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid); 744 priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid);
745 memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE); 745 memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
746 mpt2sas_base_put_smid_scsi_io(ioc, smid, 746 mpt2sas_base_put_smid_scsi_io(ioc, smid,
@@ -848,8 +848,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
848 printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: " 848 printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: "
849 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " 849 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
850 "TerminationCount(0x%08x)\n", ioc->name, 850 "TerminationCount(0x%08x)\n", ioc->name,
851 tm_reply->IOCStatus, tm_reply->IOCLogInfo, 851 le16_to_cpu(tm_reply->IOCStatus),
852 tm_reply->TerminationCount); 852 le32_to_cpu(tm_reply->IOCLogInfo),
853 le32_to_cpu(tm_reply->TerminationCount));
853 } 854 }
854#endif 855#endif
855 /* copy out xdata to user */ 856 /* copy out xdata to user */
@@ -890,12 +891,14 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
890 891
891 issue_host_reset: 892 issue_host_reset:
892 if (issue_reset) { 893 if (issue_reset) {
894 ret = -ENODATA;
893 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 895 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
894 mpi_request->Function == 896 mpi_request->Function ==
895 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 897 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
896 printk(MPT2SAS_INFO_FMT "issue target reset: handle " 898 printk(MPT2SAS_INFO_FMT "issue target reset: handle "
897 "= (0x%04x)\n", ioc->name, 899 "= (0x%04x)\n", ioc->name,
898 mpi_request->FunctionDependent1); 900 mpi_request->FunctionDependent1);
901 mpt2sas_halt_firmware(ioc);
899 mutex_lock(&ioc->tm_cmds.mutex); 902 mutex_lock(&ioc->tm_cmds.mutex);
900 mpt2sas_scsih_issue_tm(ioc, 903 mpt2sas_scsih_issue_tm(ioc,
901 mpi_request->FunctionDependent1, 0, 904 mpi_request->FunctionDependent1, 0,
@@ -1229,7 +1232,7 @@ _ctl_btdh_mapping(void __user *arg)
1229/** 1232/**
1230 * _ctl_diag_capability - return diag buffer capability 1233 * _ctl_diag_capability - return diag buffer capability
1231 * @ioc: per adapter object 1234 * @ioc: per adapter object
1232 * @buffer_type: specifies either TRACE or SNAPSHOT 1235 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1233 * 1236 *
1234 * returns 1 when diag buffer support is enabled in firmware 1237 * returns 1 when diag buffer support is enabled in firmware
1235 */ 1238 */
@@ -1249,24 +1252,25 @@ _ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type)
1249 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1252 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1250 rc = 1; 1253 rc = 1;
1251 break; 1254 break;
1255 case MPI2_DIAG_BUF_TYPE_EXTENDED:
1256 if (ioc->facts.IOCCapabilities &
1257 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1258 rc = 1;
1252 } 1259 }
1253 1260
1254 return rc; 1261 return rc;
1255} 1262}
1256 1263
1257/** 1264/**
1258 * _ctl_diag_register - application register with driver 1265 * _ctl_diag_register_2 - wrapper for registering diag buffer support
1259 * @arg - user space buffer containing ioctl content 1266 * @ioc: per adapter object
1260 * @state - NON_BLOCKING or BLOCKING 1267 * @diag_register: the diag_register struct passed in from user space
1261 * 1268 *
1262 * This will allow the driver to setup any required buffers that will be
1263 * needed by firmware to communicate with the driver.
1264 */ 1269 */
1265static long 1270static long
1266_ctl_diag_register(void __user *arg, enum block_state state) 1271_ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
1272 struct mpt2_diag_register *diag_register)
1267{ 1273{
1268 struct mpt2_diag_register karg;
1269 struct MPT2SAS_ADAPTER *ioc;
1270 int rc, i; 1274 int rc, i;
1271 void *request_data = NULL; 1275 void *request_data = NULL;
1272 dma_addr_t request_data_dma; 1276 dma_addr_t request_data_dma;
@@ -1279,18 +1283,17 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1279 u16 ioc_status; 1283 u16 ioc_status;
1280 u8 issue_reset = 0; 1284 u8 issue_reset = 0;
1281 1285
1282 if (copy_from_user(&karg, arg, sizeof(karg))) {
1283 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1284 __FILE__, __LINE__, __func__);
1285 return -EFAULT;
1286 }
1287 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1288 return -ENODEV;
1289
1290 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1286 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1291 __func__)); 1287 __func__));
1292 1288
1293 buffer_type = karg.buffer_type; 1289 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
1290 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
1291 ioc->name, __func__);
1292 rc = -EAGAIN;
1293 goto out;
1294 }
1295
1296 buffer_type = diag_register->buffer_type;
1294 if (!_ctl_diag_capability(ioc, buffer_type)) { 1297 if (!_ctl_diag_capability(ioc, buffer_type)) {
1295 printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " 1298 printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
1296 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); 1299 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
@@ -1305,24 +1308,12 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1305 return -EINVAL; 1308 return -EINVAL;
1306 } 1309 }
1307 1310
1308 if (karg.requested_buffer_size % 4) { 1311 if (diag_register->requested_buffer_size % 4) {
1309 printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size " 1312 printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size "
1310 "is not 4 byte aligned\n", ioc->name, __func__); 1313 "is not 4 byte aligned\n", ioc->name, __func__);
1311 return -EINVAL; 1314 return -EINVAL;
1312 } 1315 }
1313 1316
1314 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1315 return -EAGAIN;
1316 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1317 return -ERESTARTSYS;
1318
1319 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
1320 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
1321 ioc->name, __func__);
1322 rc = -EAGAIN;
1323 goto out;
1324 }
1325
1326 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1317 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1327 if (!smid) { 1318 if (!smid) {
1328 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 1319 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
@@ -1338,12 +1329,12 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1338 ioc->ctl_cmds.smid = smid; 1329 ioc->ctl_cmds.smid = smid;
1339 1330
1340 request_data = ioc->diag_buffer[buffer_type]; 1331 request_data = ioc->diag_buffer[buffer_type];
1341 request_data_sz = karg.requested_buffer_size; 1332 request_data_sz = diag_register->requested_buffer_size;
1342 ioc->unique_id[buffer_type] = karg.unique_id; 1333 ioc->unique_id[buffer_type] = diag_register->unique_id;
1343 ioc->diag_buffer_status[buffer_type] = 0; 1334 ioc->diag_buffer_status[buffer_type] = 0;
1344 memcpy(ioc->product_specific[buffer_type], karg.product_specific, 1335 memcpy(ioc->product_specific[buffer_type],
1345 MPT2_PRODUCT_SPECIFIC_DWORDS); 1336 diag_register->product_specific, MPT2_PRODUCT_SPECIFIC_DWORDS);
1346 ioc->diagnostic_flags[buffer_type] = karg.diagnostic_flags; 1337 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1347 1338
1348 if (request_data) { 1339 if (request_data) {
1349 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1340 request_data_dma = ioc->diag_buffer_dma[buffer_type];
@@ -1373,8 +1364,8 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1373 } 1364 }
1374 1365
1375 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1366 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1376 mpi_request->BufferType = karg.buffer_type; 1367 mpi_request->BufferType = diag_register->buffer_type;
1377 mpi_request->Flags = cpu_to_le32(karg.diagnostic_flags); 1368 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1378 mpi_request->BufferAddress = cpu_to_le64(request_data_dma); 1369 mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1379 mpi_request->BufferLength = cpu_to_le32(request_data_sz); 1370 mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1380 mpi_request->VF_ID = 0; /* TODO */ 1371 mpi_request->VF_ID = 0; /* TODO */
@@ -1422,7 +1413,7 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1422 } else { 1413 } else {
1423 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " 1414 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
1424 "log_info(0x%08x)\n", ioc->name, __func__, 1415 "log_info(0x%08x)\n", ioc->name, __func__,
1425 ioc_status, mpi_reply->IOCLogInfo); 1416 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1426 rc = -EFAULT; 1417 rc = -EFAULT;
1427 } 1418 }
1428 1419
@@ -1438,6 +1429,83 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1438 request_data, request_data_dma); 1429 request_data, request_data_dma);
1439 1430
1440 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; 1431 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
1432 return rc;
1433}
1434
1435/**
1436 * mpt2sas_enable_diag_buffer - enabling diag_buffers support driver load time
1437 * @ioc: per adapter object
1438 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1439 *
1440 * This is called when command line option diag_buffer_enable is enabled
1441 * at driver load time.
1442 */
1443void
1444mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register)
1445{
1446 struct mpt2_diag_register diag_register;
1447
1448 memset(&diag_register, 0, sizeof(struct mpt2_diag_register));
1449
1450 if (bits_to_register & 1) {
1451 printk(MPT2SAS_INFO_FMT "registering trace buffer support\n",
1452 ioc->name);
1453 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1454 /* register for 1MB buffers */
1455 diag_register.requested_buffer_size = (1024 * 1024);
1456 diag_register.unique_id = 0x7075900;
1457 _ctl_diag_register_2(ioc, &diag_register);
1458 }
1459
1460 if (bits_to_register & 2) {
1461 printk(MPT2SAS_INFO_FMT "registering snapshot buffer support\n",
1462 ioc->name);
1463 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1464 /* register for 2MB buffers */
1465 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1466 diag_register.unique_id = 0x7075901;
1467 _ctl_diag_register_2(ioc, &diag_register);
1468 }
1469
1470 if (bits_to_register & 4) {
1471 printk(MPT2SAS_INFO_FMT "registering extended buffer support\n",
1472 ioc->name);
1473 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1474 /* register for 2MB buffers */
1475 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1476 diag_register.unique_id = 0x7075901;
1477 _ctl_diag_register_2(ioc, &diag_register);
1478 }
1479}
1480
1481/**
1482 * _ctl_diag_register - application register with driver
1483 * @arg - user space buffer containing ioctl content
1484 * @state - NON_BLOCKING or BLOCKING
1485 *
1486 * This will allow the driver to setup any required buffers that will be
1487 * needed by firmware to communicate with the driver.
1488 */
1489static long
1490_ctl_diag_register(void __user *arg, enum block_state state)
1491{
1492 struct mpt2_diag_register karg;
1493 struct MPT2SAS_ADAPTER *ioc;
1494 long rc;
1495
1496 if (copy_from_user(&karg, arg, sizeof(karg))) {
1497 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1498 __FILE__, __LINE__, __func__);
1499 return -EFAULT;
1500 }
1501 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1502 return -ENODEV;
1503
1504 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1505 return -EAGAIN;
1506 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1507 return -ERESTARTSYS;
1508 rc = _ctl_diag_register_2(ioc, &karg);
1441 mutex_unlock(&ioc->ctl_cmds.mutex); 1509 mutex_unlock(&ioc->ctl_cmds.mutex);
1442 return rc; 1510 return rc;
1443} 1511}
@@ -1600,7 +1668,7 @@ _ctl_diag_query(void __user *arg)
1600/** 1668/**
1601 * _ctl_send_release - Diag Release Message 1669 * _ctl_send_release - Diag Release Message
1602 * @ioc: per adapter object 1670 * @ioc: per adapter object
1603 * @buffer_type - specifies either TRACE or SNAPSHOT 1671 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
1604 * @issue_reset - specifies whether host reset is required. 1672 * @issue_reset - specifies whether host reset is required.
1605 * 1673 *
1606 */ 1674 */
@@ -1690,7 +1758,7 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
1690 } else { 1758 } else {
1691 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " 1759 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
1692 "log_info(0x%08x)\n", ioc->name, __func__, 1760 "log_info(0x%08x)\n", ioc->name, __func__,
1693 ioc_status, mpi_reply->IOCLogInfo); 1761 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1694 rc = -EFAULT; 1762 rc = -EFAULT;
1695 } 1763 }
1696 1764
@@ -1951,7 +2019,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
1951 } else { 2019 } else {
1952 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " 2020 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
1953 "log_info(0x%08x)\n", ioc->name, __func__, 2021 "log_info(0x%08x)\n", ioc->name, __func__,
1954 ioc_status, mpi_reply->IOCLogInfo); 2022 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1955 rc = -EFAULT; 2023 rc = -EFAULT;
1956 } 2024 }
1957 2025
@@ -2135,14 +2203,10 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
2135 karg.data_out_size = karg32.data_out_size; 2203 karg.data_out_size = karg32.data_out_size;
2136 karg.max_sense_bytes = karg32.max_sense_bytes; 2204 karg.max_sense_bytes = karg32.max_sense_bytes;
2137 karg.data_sge_offset = karg32.data_sge_offset; 2205 karg.data_sge_offset = karg32.data_sge_offset;
2138 memcpy(&karg.reply_frame_buf_ptr, &karg32.reply_frame_buf_ptr, 2206 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2139 sizeof(uint32_t)); 2207 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2140 memcpy(&karg.data_in_buf_ptr, &karg32.data_in_buf_ptr, 2208 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2141 sizeof(uint32_t)); 2209 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2142 memcpy(&karg.data_out_buf_ptr, &karg32.data_out_buf_ptr,
2143 sizeof(uint32_t));
2144 memcpy(&karg.sense_data_ptr, &karg32.sense_data_ptr,
2145 sizeof(uint32_t));
2146 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2210 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2147 return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); 2211 return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
2148} 2212}
@@ -2474,6 +2538,43 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
2474static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, 2538static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
2475 _ctl_logging_level_show, _ctl_logging_level_store); 2539 _ctl_logging_level_show, _ctl_logging_level_store);
2476 2540
2541/* device attributes */
2542/*
2543 * _ctl_fwfault_debug_show - show/store fwfault_debug
2544 * @cdev - pointer to embedded class device
2545 * @buf - the buffer returned
2546 *
2547 * mpt2sas_fwfault_debug is command line option
2548 * A sysfs 'read/write' shost attribute.
2549 */
2550static ssize_t
2551_ctl_fwfault_debug_show(struct device *cdev,
2552 struct device_attribute *attr, char *buf)
2553{
2554 struct Scsi_Host *shost = class_to_shost(cdev);
2555 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2556
2557 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
2558}
2559static ssize_t
2560_ctl_fwfault_debug_store(struct device *cdev,
2561 struct device_attribute *attr, const char *buf, size_t count)
2562{
2563 struct Scsi_Host *shost = class_to_shost(cdev);
2564 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2565 int val = 0;
2566
2567 if (sscanf(buf, "%d", &val) != 1)
2568 return -EINVAL;
2569
2570 ioc->fwfault_debug = val;
2571 printk(MPT2SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name,
2572 ioc->fwfault_debug);
2573 return strlen(buf);
2574}
2575static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
2576 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
2577
2477struct device_attribute *mpt2sas_host_attrs[] = { 2578struct device_attribute *mpt2sas_host_attrs[] = {
2478 &dev_attr_version_fw, 2579 &dev_attr_version_fw,
2479 &dev_attr_version_bios, 2580 &dev_attr_version_bios,
@@ -2487,13 +2588,12 @@ struct device_attribute *mpt2sas_host_attrs[] = {
2487 &dev_attr_io_delay, 2588 &dev_attr_io_delay,
2488 &dev_attr_device_delay, 2589 &dev_attr_device_delay,
2489 &dev_attr_logging_level, 2590 &dev_attr_logging_level,
2591 &dev_attr_fwfault_debug,
2490 &dev_attr_fw_queue_depth, 2592 &dev_attr_fw_queue_depth,
2491 &dev_attr_host_sas_address, 2593 &dev_attr_host_sas_address,
2492 NULL, 2594 NULL,
2493}; 2595};
2494 2596
2495/* device attributes */
2496
2497/** 2597/**
2498 * _ctl_device_sas_address_show - sas address 2598 * _ctl_device_sas_address_show - sas address
2499 * @cdev - pointer to embedded class device 2599 * @cdev - pointer to embedded class device
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 211f296dd191..8a5eeb1a5c84 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -313,7 +313,7 @@ struct mpt2_ioctl_btdh_mapping {
313 * struct mpt2_diag_register - application register with driver 313 * struct mpt2_diag_register - application register with driver
314 * @hdr - generic header 314 * @hdr - generic header
315 * @reserved - 315 * @reserved -
316 * @buffer_type - specifies either TRACE or SNAPSHOT 316 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
317 * @application_flags - misc flags 317 * @application_flags - misc flags
318 * @diagnostic_flags - specifies flags affecting command processing 318 * @diagnostic_flags - specifies flags affecting command processing
319 * @product_specific - product specific information 319 * @product_specific - product specific information
@@ -352,7 +352,7 @@ struct mpt2_diag_unregister {
352 * struct mpt2_diag_query - query relevant info associated with diag buffers 352 * struct mpt2_diag_query - query relevant info associated with diag buffers
353 * @hdr - generic header 353 * @hdr - generic header
354 * @reserved - 354 * @reserved -
355 * @buffer_type - specifies either TRACE or SNAPSHOT 355 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
356 * @application_flags - misc flags 356 * @application_flags - misc flags
357 * @diagnostic_flags - specifies flags affecting command processing 357 * @diagnostic_flags - specifies flags affecting command processing
358 * @product_specific - product specific information 358 * @product_specific - product specific information
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 86ab32d7ab15..be171ed682e0 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -52,6 +52,8 @@
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/pci.h> 53#include <linux/pci.h>
54#include <linux/interrupt.h> 54#include <linux/interrupt.h>
55#include <linux/raid_class.h>
56#include <linux/slab.h>
55 57
56#include "mpt2sas_base.h" 58#include "mpt2sas_base.h"
57 59
@@ -76,6 +78,7 @@ static u8 tm_cb_idx = -1;
76static u8 ctl_cb_idx = -1; 78static u8 ctl_cb_idx = -1;
77static u8 base_cb_idx = -1; 79static u8 base_cb_idx = -1;
78static u8 transport_cb_idx = -1; 80static u8 transport_cb_idx = -1;
81static u8 scsih_cb_idx = -1;
79static u8 config_cb_idx = -1; 82static u8 config_cb_idx = -1;
80static int mpt_ids; 83static int mpt_ids;
81 84
@@ -132,6 +135,9 @@ struct fw_event_work {
132 void *event_data; 135 void *event_data;
133}; 136};
134 137
138/* raid transport support */
139static struct raid_template *mpt2sas_raid_template;
140
135/** 141/**
136 * struct _scsi_io_transfer - scsi io transfer 142 * struct _scsi_io_transfer - scsi io transfer
137 * @handle: sas device handle (assigned by firmware) 143 * @handle: sas device handle (assigned by firmware)
@@ -196,10 +202,28 @@ static struct pci_device_id scsih_pci_table[] = {
196 PCI_ANY_ID, PCI_ANY_ID }, 202 PCI_ANY_ID, PCI_ANY_ID },
197 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, 203 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
198 PCI_ANY_ID, PCI_ANY_ID }, 204 PCI_ANY_ID, PCI_ANY_ID },
205 /* Meteor ~ 2116 */
199 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, 206 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
200 PCI_ANY_ID, PCI_ANY_ID }, 207 PCI_ANY_ID, PCI_ANY_ID },
201 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, 208 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
202 PCI_ANY_ID, PCI_ANY_ID }, 209 PCI_ANY_ID, PCI_ANY_ID },
210 /* Thunderbolt ~ 2208 */
211 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
212 PCI_ANY_ID, PCI_ANY_ID },
213 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
214 PCI_ANY_ID, PCI_ANY_ID },
215 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
216 PCI_ANY_ID, PCI_ANY_ID },
217 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
218 PCI_ANY_ID, PCI_ANY_ID },
219 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
220 PCI_ANY_ID, PCI_ANY_ID },
221 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
222 PCI_ANY_ID, PCI_ANY_ID },
223 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7,
224 PCI_ANY_ID, PCI_ANY_ID },
225 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8,
226 PCI_ANY_ID, PCI_ANY_ID },
203 {0} /* Terminating entry */ 227 {0} /* Terminating entry */
204}; 228};
205MODULE_DEVICE_TABLE(pci, scsih_pci_table); 229MODULE_DEVICE_TABLE(pci, scsih_pci_table);
@@ -317,6 +341,47 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name,
317} 341}
318 342
319/** 343/**
344 * _scsih_get_sas_address - set the sas_address for given device handle
345 * @handle: device handle
346 * @sas_address: sas address
347 *
348 * Returns 0 success, non-zero when failure
349 */
350static int
351_scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle,
352 u64 *sas_address)
353{
354 Mpi2SasDevicePage0_t sas_device_pg0;
355 Mpi2ConfigReply_t mpi_reply;
356 u32 ioc_status;
357
358 if (handle <= ioc->sas_hba.num_phys) {
359 *sas_address = ioc->sas_hba.sas_address;
360 return 0;
361 } else
362 *sas_address = 0;
363
364 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
365 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
366 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
367 ioc->name, __FILE__, __LINE__, __func__);
368 return -ENXIO;
369 }
370
371 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
372 MPI2_IOCSTATUS_MASK;
373 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
374 printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
375 "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
376 __FILE__, __LINE__, __func__);
377 return -EIO;
378 }
379
380 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
381 return 0;
382}
383
384/**
320 * _scsih_determine_boot_device - determine boot device. 385 * _scsih_determine_boot_device - determine boot device.
321 * @ioc: per adapter object 386 * @ioc: per adapter object
322 * @device: either sas_device or raid_device object 387 * @device: either sas_device or raid_device object
@@ -510,8 +575,6 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
510 struct _sas_device *sas_device) 575 struct _sas_device *sas_device)
511{ 576{
512 unsigned long flags; 577 unsigned long flags;
513 u16 handle, parent_handle;
514 u64 sas_address;
515 578
516 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle" 579 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle"
517 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, 580 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
@@ -521,10 +584,8 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
521 list_add_tail(&sas_device->list, &ioc->sas_device_list); 584 list_add_tail(&sas_device->list, &ioc->sas_device_list);
522 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 585 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
523 586
524 handle = sas_device->handle; 587 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
525 parent_handle = sas_device->parent_handle; 588 sas_device->sas_address_parent))
526 sas_address = sas_device->sas_address;
527 if (!mpt2sas_transport_port_add(ioc, handle, parent_handle))
528 _scsih_sas_device_remove(ioc, sas_device); 589 _scsih_sas_device_remove(ioc, sas_device);
529} 590}
530 591
@@ -553,31 +614,6 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
553} 614}
554 615
555/** 616/**
556 * mpt2sas_scsih_expander_find_by_handle - expander device search
557 * @ioc: per adapter object
558 * @handle: expander handle (assigned by firmware)
559 * Context: Calling function should acquire ioc->sas_device_lock
560 *
561 * This searches for expander device based on handle, then returns the
562 * sas_node object.
563 */
564struct _sas_node *
565mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
566{
567 struct _sas_node *sas_expander, *r;
568
569 r = NULL;
570 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
571 if (sas_expander->handle != handle)
572 continue;
573 r = sas_expander;
574 goto out;
575 }
576 out:
577 return r;
578}
579
580/**
581 * _scsih_raid_device_find_by_id - raid device search 617 * _scsih_raid_device_find_by_id - raid device search
582 * @ioc: per adapter object 618 * @ioc: per adapter object
583 * @id: sas device target id 619 * @id: sas device target id
@@ -699,6 +735,31 @@ _scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
699} 735}
700 736
701/** 737/**
738 * mpt2sas_scsih_expander_find_by_handle - expander device search
739 * @ioc: per adapter object
740 * @handle: expander handle (assigned by firmware)
741 * Context: Calling function should acquire ioc->sas_device_lock
742 *
743 * This searches for expander device based on handle, then returns the
744 * sas_node object.
745 */
746struct _sas_node *
747mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
748{
749 struct _sas_node *sas_expander, *r;
750
751 r = NULL;
752 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
753 if (sas_expander->handle != handle)
754 continue;
755 r = sas_expander;
756 goto out;
757 }
758 out:
759 return r;
760}
761
762/**
702 * mpt2sas_scsih_expander_find_by_sas_address - expander device search 763 * mpt2sas_scsih_expander_find_by_sas_address - expander device search
703 * @ioc: per adapter object 764 * @ioc: per adapter object
704 * @sas_address: sas address 765 * @sas_address: sas address
@@ -1043,17 +1104,46 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1043 * _scsih_change_queue_depth - setting device queue depth 1104 * _scsih_change_queue_depth - setting device queue depth
1044 * @sdev: scsi device struct 1105 * @sdev: scsi device struct
1045 * @qdepth: requested queue depth 1106 * @qdepth: requested queue depth
1107 * @reason: calling context
1046 * 1108 *
1047 * Returns queue depth. 1109 * Returns queue depth.
1048 */ 1110 */
1049static int 1111static int
1050_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1112_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1051{ 1113{
1052 struct Scsi_Host *shost = sdev->host; 1114 struct Scsi_Host *shost = sdev->host;
1053 int max_depth; 1115 int max_depth;
1054 int tag_type; 1116 int tag_type;
1117 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1118 struct MPT2SAS_DEVICE *sas_device_priv_data;
1119 struct MPT2SAS_TARGET *sas_target_priv_data;
1120 struct _sas_device *sas_device;
1121 unsigned long flags;
1122
1123 if (reason != SCSI_QDEPTH_DEFAULT)
1124 return -EOPNOTSUPP;
1055 1125
1056 max_depth = shost->can_queue; 1126 max_depth = shost->can_queue;
1127
1128 /* limit max device queue for SATA to 32 */
1129 sas_device_priv_data = sdev->hostdata;
1130 if (!sas_device_priv_data)
1131 goto not_sata;
1132 sas_target_priv_data = sas_device_priv_data->sas_target;
1133 if (!sas_target_priv_data)
1134 goto not_sata;
1135 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1136 goto not_sata;
1137 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1138 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1139 sas_device_priv_data->sas_target->sas_address);
1140 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1141 if (sas_device && sas_device->device_info &
1142 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1143 max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
1144
1145 not_sata:
1146
1057 if (!sdev->tagged_supported) 1147 if (!sdev->tagged_supported)
1058 max_depth = 1; 1148 max_depth = 1;
1059 if (qdepth > max_depth) 1149 if (qdepth > max_depth)
@@ -1220,7 +1310,6 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1220 struct MPT2SAS_DEVICE *sas_device_priv_data; 1310 struct MPT2SAS_DEVICE *sas_device_priv_data;
1221 struct scsi_target *starget; 1311 struct scsi_target *starget;
1222 struct _raid_device *raid_device; 1312 struct _raid_device *raid_device;
1223 struct _sas_device *sas_device;
1224 unsigned long flags; 1313 unsigned long flags;
1225 1314
1226 sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); 1315 sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1247,21 +1336,8 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1247 if (raid_device) 1336 if (raid_device)
1248 raid_device->sdev = sdev; /* raid is single lun */ 1337 raid_device->sdev = sdev; /* raid is single lun */
1249 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1338 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1250 } else {
1251 /* set TLR bit for SSP devices */
1252 if (!(ioc->facts.IOCCapabilities &
1253 MPI2_IOCFACTS_CAPABILITY_TLR))
1254 goto out;
1255 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1256 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1257 sas_device_priv_data->sas_target->sas_address);
1258 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1259 if (sas_device && sas_device->device_info &
1260 MPI2_SAS_DEVICE_INFO_SSP_TARGET)
1261 sas_device_priv_data->flags |= MPT_DEVICE_TLR_ON;
1262 } 1339 }
1263 1340
1264 out:
1265 return 0; 1341 return 0;
1266} 1342}
1267 1343
@@ -1334,6 +1410,140 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1334} 1410}
1335 1411
1336/** 1412/**
1413 * _scsih_is_raid - return boolean indicating device is raid volume
1414 * @dev the device struct object
1415 */
1416static int
1417_scsih_is_raid(struct device *dev)
1418{
1419 struct scsi_device *sdev = to_scsi_device(dev);
1420
1421 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1422}
1423
1424/**
1425 * _scsih_get_resync - get raid volume resync percent complete
1426 * @dev the device struct object
1427 */
1428static void
1429_scsih_get_resync(struct device *dev)
1430{
1431 struct scsi_device *sdev = to_scsi_device(dev);
1432 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
1433 static struct _raid_device *raid_device;
1434 unsigned long flags;
1435 Mpi2RaidVolPage0_t vol_pg0;
1436 Mpi2ConfigReply_t mpi_reply;
1437 u32 volume_status_flags;
1438 u8 percent_complete = 0;
1439
1440 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1441 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1442 sdev->channel);
1443 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1444
1445 if (!raid_device)
1446 goto out;
1447
1448 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1449 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
1450 sizeof(Mpi2RaidVolPage0_t))) {
1451 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1452 ioc->name, __FILE__, __LINE__, __func__);
1453 goto out;
1454 }
1455
1456 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1457 if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)
1458 percent_complete = raid_device->percent_complete;
1459 out:
1460 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
1461}
1462
1463/**
1464 * _scsih_get_state - get raid volume level
1465 * @dev the device struct object
1466 */
1467static void
1468_scsih_get_state(struct device *dev)
1469{
1470 struct scsi_device *sdev = to_scsi_device(dev);
1471 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
1472 static struct _raid_device *raid_device;
1473 unsigned long flags;
1474 Mpi2RaidVolPage0_t vol_pg0;
1475 Mpi2ConfigReply_t mpi_reply;
1476 u32 volstate;
1477 enum raid_state state = RAID_STATE_UNKNOWN;
1478
1479 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1480 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1481 sdev->channel);
1482 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1483
1484 if (!raid_device)
1485 goto out;
1486
1487 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1488 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
1489 sizeof(Mpi2RaidVolPage0_t))) {
1490 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1491 ioc->name, __FILE__, __LINE__, __func__);
1492 goto out;
1493 }
1494
1495 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1496 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
1497 state = RAID_STATE_RESYNCING;
1498 goto out;
1499 }
1500
1501 switch (vol_pg0.VolumeState) {
1502 case MPI2_RAID_VOL_STATE_OPTIMAL:
1503 case MPI2_RAID_VOL_STATE_ONLINE:
1504 state = RAID_STATE_ACTIVE;
1505 break;
1506 case MPI2_RAID_VOL_STATE_DEGRADED:
1507 state = RAID_STATE_DEGRADED;
1508 break;
1509 case MPI2_RAID_VOL_STATE_FAILED:
1510 case MPI2_RAID_VOL_STATE_MISSING:
1511 state = RAID_STATE_OFFLINE;
1512 break;
1513 }
1514 out:
1515 raid_set_state(mpt2sas_raid_template, dev, state);
1516}
1517
1518/**
1519 * _scsih_set_level - set raid level
1520 * @sdev: scsi device struct
1521 * @raid_device: raid_device object
1522 */
1523static void
1524_scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
1525{
1526 enum raid_level level = RAID_LEVEL_UNKNOWN;
1527
1528 switch (raid_device->volume_type) {
1529 case MPI2_RAID_VOL_TYPE_RAID0:
1530 level = RAID_LEVEL_0;
1531 break;
1532 case MPI2_RAID_VOL_TYPE_RAID10:
1533 level = RAID_LEVEL_10;
1534 break;
1535 case MPI2_RAID_VOL_TYPE_RAID1E:
1536 level = RAID_LEVEL_1E;
1537 break;
1538 case MPI2_RAID_VOL_TYPE_RAID1:
1539 level = RAID_LEVEL_1;
1540 break;
1541 }
1542
1543 raid_set_level(mpt2sas_raid_template, &sdev->sdev_gendev, level);
1544}
1545
1546/**
1337 * _scsih_get_volume_capabilities - volume capabilities 1547 * _scsih_get_volume_capabilities - volume capabilities
1338 * @ioc: per adapter object 1548 * @ioc: per adapter object
1339 * @sas_device: the raid_device object 1549 * @sas_device: the raid_device object
@@ -1394,6 +1604,32 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1394} 1604}
1395 1605
1396/** 1606/**
1607 * _scsih_enable_tlr - setting TLR flags
1608 * @ioc: per adapter object
1609 * @sdev: scsi device struct
1610 *
1611 * Enabling Transaction Layer Retries for tape devices when
1612 * vpd page 0x90 is present
1613 *
1614 */
1615static void
1616_scsih_enable_tlr(struct MPT2SAS_ADAPTER *ioc, struct scsi_device *sdev)
1617{
1618 /* only for TAPE */
1619 if (sdev->type != TYPE_TAPE)
1620 return;
1621
1622 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
1623 return;
1624
1625 sas_enable_tlr(sdev);
1626 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
1627 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
1628 return;
1629
1630}
1631
1632/**
1397 * _scsih_slave_configure - device configure routine. 1633 * _scsih_slave_configure - device configure routine.
1398 * @sdev: scsi device struct 1634 * @sdev: scsi device struct
1399 * 1635 *
@@ -1488,7 +1724,9 @@ _scsih_slave_configure(struct scsi_device *sdev)
1488 r_level, raid_device->handle, 1724 r_level, raid_device->handle,
1489 (unsigned long long)raid_device->wwid, 1725 (unsigned long long)raid_device->wwid,
1490 raid_device->num_pds, ds); 1726 raid_device->num_pds, ds);
1491 _scsih_change_queue_depth(sdev, qdepth); 1727 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
1728 /* raid transport support */
1729 _scsih_set_level(sdev, raid_device);
1492 return 0; 1730 return 0;
1493 } 1731 }
1494 1732
@@ -1534,10 +1772,12 @@ _scsih_slave_configure(struct scsi_device *sdev)
1534 _scsih_display_sata_capabilities(ioc, sas_device, sdev); 1772 _scsih_display_sata_capabilities(ioc, sas_device, sdev);
1535 } 1773 }
1536 1774
1537 _scsih_change_queue_depth(sdev, qdepth); 1775 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
1538 1776
1539 if (ssp_target) 1777 if (ssp_target) {
1540 sas_read_port_mode_page(sdev); 1778 sas_read_port_mode_page(sdev);
1779 _scsih_enable_tlr(ioc, sdev);
1780 }
1541 return 0; 1781 return 0;
1542} 1782}
1543 1783
@@ -1874,6 +2114,8 @@ _scsih_abort(struct scsi_cmnd *scmd)
1874 goto out; 2114 goto out;
1875 } 2115 }
1876 2116
2117 mpt2sas_halt_firmware(ioc);
2118
1877 mutex_lock(&ioc->tm_cmds.mutex); 2119 mutex_lock(&ioc->tm_cmds.mutex);
1878 handle = sas_device_priv_data->sas_target->handle; 2120 handle = sas_device_priv_data->sas_target->handle;
1879 mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun, 2121 mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun,
@@ -2297,7 +2539,6 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
2297 u16 handle; 2539 u16 handle;
2298 u16 reason_code; 2540 u16 reason_code;
2299 u8 phy_number; 2541 u8 phy_number;
2300 u8 link_rate;
2301 2542
2302 for (i = 0; i < event_data->NumEntries; i++) { 2543 for (i = 0; i < event_data->NumEntries; i++) {
2303 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 2544 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
@@ -2308,11 +2549,6 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
2308 MPI2_EVENT_SAS_TOPO_RC_MASK; 2549 MPI2_EVENT_SAS_TOPO_RC_MASK;
2309 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 2550 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
2310 _scsih_block_io_device(ioc, handle); 2551 _scsih_block_io_device(ioc, handle);
2311 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
2312 link_rate = event_data->PHY[i].LinkRate >> 4;
2313 if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
2314 _scsih_ublock_io_device(ioc, handle);
2315 }
2316 } 2552 }
2317} 2553}
2318 2554
@@ -2349,16 +2585,10 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2349 2585
2350 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2586 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2351 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 2587 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2352 if (!sas_device) {
2353 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2354 printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
2355 ioc->name, __func__);
2356 return;
2357 }
2358 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2588 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2359 2589
2360 /* skip is hidden raid component */ 2590 /* skip is hidden raid component */
2361 if (sas_device->hidden_raid_component) 2591 if (sas_device && sas_device->hidden_raid_component)
2362 return; 2592 return;
2363 2593
2364 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); 2594 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
@@ -2371,18 +2601,31 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2371 delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL; 2601 delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL;
2372 list_add_tail(&delayed_tr->list, 2602 list_add_tail(&delayed_tr->list,
2373 &ioc->delayed_tr_list); 2603 &ioc->delayed_tr_list);
2374 if (sas_device->starget) 2604 if (sas_device && sas_device->starget) {
2375 dewtprintk(ioc, starget_printk(KERN_INFO, 2605 dewtprintk(ioc, starget_printk(KERN_INFO,
2376 sas_device->starget, "DELAYED:tr:handle(0x%04x), " 2606 sas_device->starget, "DELAYED:tr:handle(0x%04x), "
2377 "(open)\n", sas_device->handle)); 2607 "(open)\n", handle));
2608 } else {
2609 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2610 "DELAYED:tr:handle(0x%04x), (open)\n",
2611 ioc->name, handle));
2612 }
2378 return; 2613 return;
2379 } 2614 }
2380 2615
2381 if (sas_device->starget && sas_device->starget->hostdata) { 2616 if (sas_device) {
2382 sas_target_priv_data = sas_device->starget->hostdata; 2617 sas_device->state |= MPTSAS_STATE_TR_SEND;
2383 sas_target_priv_data->tm_busy = 1; 2618 sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
2384 dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, 2619 if (sas_device->starget && sas_device->starget->hostdata) {
2385 "tr:handle(0x%04x), (open)\n", sas_device->handle)); 2620 sas_target_priv_data = sas_device->starget->hostdata;
2621 sas_target_priv_data->tm_busy = 1;
2622 dewtprintk(ioc, starget_printk(KERN_INFO,
2623 sas_device->starget, "tr:handle(0x%04x), (open)\n",
2624 handle));
2625 }
2626 } else {
2627 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2628 "tr:handle(0x%04x), (open)\n", ioc->name, handle));
2386 } 2629 }
2387 2630
2388 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 2631 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
@@ -2390,8 +2633,6 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2390 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2633 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2391 mpi_request->DevHandle = cpu_to_le16(handle); 2634 mpi_request->DevHandle = cpu_to_le16(handle);
2392 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2635 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2393 sas_device->state |= MPTSAS_STATE_TR_SEND;
2394 sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
2395 mpt2sas_base_put_smid_hi_priority(ioc, smid); 2636 mpt2sas_base_put_smid_hi_priority(ioc, smid);
2396} 2637}
2397 2638
@@ -2426,21 +2667,25 @@ _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
2426 2667
2427 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2668 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2428 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 2669 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2429 if (!sas_device) {
2430 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2431 printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
2432 ioc->name, __func__);
2433 return 1;
2434 }
2435 sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
2436 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2437 2671
2438 if (sas_device->starget) 2672 if (sas_device) {
2439 dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, 2673 sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
2674 if (sas_device->starget)
2675 dewtprintk(ioc, starget_printk(KERN_INFO,
2676 sas_device->starget,
2677 "sc_complete:handle(0x%04x), "
2678 "ioc_status(0x%04x), loginfo(0x%08x)\n",
2679 handle, le16_to_cpu(mpi_reply->IOCStatus),
2680 le32_to_cpu(mpi_reply->IOCLogInfo)));
2681 } else {
2682 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2440 "sc_complete:handle(0x%04x), " 2683 "sc_complete:handle(0x%04x), "
2441 "ioc_status(0x%04x), loginfo(0x%08x)\n", 2684 "ioc_status(0x%04x), loginfo(0x%08x)\n",
2442 handle, le16_to_cpu(mpi_reply->IOCStatus), 2685 ioc->name, handle, le16_to_cpu(mpi_reply->IOCStatus),
2443 le32_to_cpu(mpi_reply->IOCLogInfo))); 2686 le32_to_cpu(mpi_reply->IOCLogInfo)));
2687 }
2688
2444 return 1; 2689 return 1;
2445} 2690}
2446 2691
@@ -2478,28 +2723,33 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2478 handle = le16_to_cpu(mpi_reply->DevHandle); 2723 handle = le16_to_cpu(mpi_reply->DevHandle);
2479 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2724 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2480 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 2725 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2481 if (!sas_device) {
2482 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2483 printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
2484 ioc->name, __func__);
2485 return 1;
2486 }
2487 sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
2488 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2726 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2489 2727
2490 if (sas_device->starget) 2728 if (sas_device) {
2491 dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, 2729 sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
2492 "tr_complete:handle(0x%04x), (%s) ioc_status(0x%04x), " 2730 if (sas_device->starget) {
2493 "loginfo(0x%08x), completed(%d)\n", 2731 dewtprintk(ioc, starget_printk(KERN_INFO,
2494 sas_device->handle, (sas_device->state & 2732 sas_device->starget, "tr_complete:handle(0x%04x), "
2495 MPT2SAS_REQ_SAS_CNTRL) ? "open" : "active", 2733 "(%s) ioc_status(0x%04x), loginfo(0x%08x), "
2496 le16_to_cpu(mpi_reply->IOCStatus), 2734 "completed(%d)\n", sas_device->handle,
2735 (sas_device->state & MPT2SAS_REQ_SAS_CNTRL) ?
2736 "open" : "active",
2737 le16_to_cpu(mpi_reply->IOCStatus),
2738 le32_to_cpu(mpi_reply->IOCLogInfo),
2739 le32_to_cpu(mpi_reply->TerminationCount)));
2740 if (sas_device->starget->hostdata) {
2741 sas_target_priv_data =
2742 sas_device->starget->hostdata;
2743 sas_target_priv_data->tm_busy = 0;
2744 }
2745 }
2746 } else {
2747 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2748 "tr_complete:handle(0x%04x), (open) ioc_status(0x%04x), "
2749 "loginfo(0x%08x), completed(%d)\n", ioc->name,
2750 handle, le16_to_cpu(mpi_reply->IOCStatus),
2497 le32_to_cpu(mpi_reply->IOCLogInfo), 2751 le32_to_cpu(mpi_reply->IOCLogInfo),
2498 le32_to_cpu(mpi_reply->TerminationCount))); 2752 le32_to_cpu(mpi_reply->TerminationCount)));
2499
2500 if (sas_device->starget && sas_device->starget->hostdata) {
2501 sas_target_priv_data = sas_device->starget->hostdata;
2502 sas_target_priv_data->tm_busy = 0;
2503 } 2753 }
2504 2754
2505 if (!list_empty(&ioc->delayed_tr_list)) { 2755 if (!list_empty(&ioc->delayed_tr_list)) {
@@ -2514,8 +2764,7 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2514 } else 2764 } else
2515 rc = 1; 2765 rc = 1;
2516 2766
2517 2767 if (sas_device && !(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
2518 if (!(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
2519 return rc; 2768 return rc;
2520 2769
2521 if (ioc->shost_recovery) { 2770 if (ioc->shost_recovery) {
@@ -2531,12 +2780,14 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2531 return rc; 2780 return rc;
2532 } 2781 }
2533 2782
2783 if (sas_device)
2784 sas_device->state |= MPTSAS_STATE_CNTRL_SEND;
2785
2534 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl); 2786 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl);
2535 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 2787 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
2536 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 2788 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
2537 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 2789 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
2538 mpi_request->DevHandle = mpi_reply->DevHandle; 2790 mpi_request->DevHandle = mpi_reply->DevHandle;
2539 sas_device->state |= MPTSAS_STATE_CNTRL_SEND;
2540 mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl); 2791 mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
2541 return rc; 2792 return rc;
2542} 2793}
@@ -2678,8 +2929,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2678 else 2929 else
2679 return; 2930 return;
2680 2931
2681 mpi_request->EEDPBlockSize = scmd->device->sector_size;
2682
2683 switch (prot_type) { 2932 switch (prot_type) {
2684 case SCSI_PROT_DIF_TYPE1: 2933 case SCSI_PROT_DIF_TYPE1:
2685 2934
@@ -2687,8 +2936,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2687 * enable ref/guard checking 2936 * enable ref/guard checking
2688 * auto increment ref tag 2937 * auto increment ref tag
2689 */ 2938 */
2690 mpi_request->EEDPFlags = eedp_flags | 2939 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2691 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2692 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 2940 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2693 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 2941 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2694 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 2942 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
@@ -2701,11 +2949,11 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2701 /* 2949 /*
2702 * enable guard checking 2950 * enable guard checking
2703 */ 2951 */
2704 mpi_request->EEDPFlags = eedp_flags | 2952 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2705 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2706
2707 break; 2953 break;
2708 } 2954 }
2955 mpi_request->EEDPBlockSize = cpu_to_le32(scmd->device->sector_size);
2956 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
2709} 2957}
2710 2958
2711/** 2959/**
@@ -2788,7 +3036,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2788 } 3036 }
2789 3037
2790 /* see if we are busy with task managment stuff */ 3038 /* see if we are busy with task managment stuff */
2791 if (sas_target_priv_data->tm_busy) 3039 if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
2792 return SCSI_MLQUEUE_DEVICE_BUSY; 3040 return SCSI_MLQUEUE_DEVICE_BUSY;
2793 else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) 3041 else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
2794 return SCSI_MLQUEUE_HOST_BUSY; 3042 return SCSI_MLQUEUE_HOST_BUSY;
@@ -2815,8 +3063,9 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2815 3063
2816 } else 3064 } else
2817 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 3065 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2818 3066 /* Make sure Device is not raid volume */
2819 if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON)) 3067 if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
3068 sas_is_tlr_enabled(scmd->device))
2820 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 3069 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
2821 3070
2822 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 3071 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@@ -2842,7 +3091,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2842 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; 3091 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
2843 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 3092 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
2844 mpi_request->SenseBufferLowAddress = 3093 mpi_request->SenseBufferLowAddress =
2845 (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid); 3094 mpt2sas_base_get_sense_buffer_dma(ioc, smid);
2846 mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; 3095 mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
2847 mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI + 3096 mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI +
2848 MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR); 3097 MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR);
@@ -2894,7 +3143,7 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
2894 3143
2895#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 3144#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
2896/** 3145/**
2897 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request 3146 * _scsih_scsi_ioc_info - translated non-successfull SCSI_IO request
2898 * @ioc: per adapter object 3147 * @ioc: per adapter object
2899 * @scmd: pointer to scsi command object 3148 * @scmd: pointer to scsi command object
2900 * @mpi_reply: reply mf payload returned from firmware 3149 * @mpi_reply: reply mf payload returned from firmware
@@ -3059,7 +3308,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3059 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 3308 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
3060 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 3309 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
3061 response_bytes = (u8 *)&response_info; 3310 response_bytes = (u8 *)&response_info;
3062 _scsih_response_code(ioc, response_bytes[3]); 3311 _scsih_response_code(ioc, response_bytes[0]);
3063 } 3312 }
3064} 3313}
3065#endif 3314#endif
@@ -3177,7 +3426,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3177 u8 scsi_status; 3426 u8 scsi_status;
3178 u32 log_info; 3427 u32 log_info;
3179 struct MPT2SAS_DEVICE *sas_device_priv_data; 3428 struct MPT2SAS_DEVICE *sas_device_priv_data;
3180 u32 response_code; 3429 u32 response_code = 0;
3181 3430
3182 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 3431 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3183 scmd = _scsih_scsi_lookup_get(ioc, smid); 3432 scmd = _scsih_scsi_lookup_get(ioc, smid);
@@ -3199,15 +3448,17 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3199 } 3448 }
3200 3449
3201 /* turning off TLR */ 3450 /* turning off TLR */
3451 scsi_state = mpi_reply->SCSIState;
3452 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
3453 response_code =
3454 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
3202 if (!sas_device_priv_data->tlr_snoop_check) { 3455 if (!sas_device_priv_data->tlr_snoop_check) {
3203 sas_device_priv_data->tlr_snoop_check++; 3456 sas_device_priv_data->tlr_snoop_check++;
3204 if (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) { 3457 if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
3205 response_code = (le32_to_cpu(mpi_reply->ResponseInfo) 3458 sas_is_tlr_enabled(scmd->device) &&
3206 >> 24); 3459 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
3207 if (response_code == 3460 sas_disable_tlr(scmd->device);
3208 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) 3461 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
3209 sas_device_priv_data->flags &=
3210 ~MPT_DEVICE_TLR_ON;
3211 } 3462 }
3212 } 3463 }
3213 3464
@@ -3219,7 +3470,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3219 else 3470 else
3220 log_info = 0; 3471 log_info = 0;
3221 ioc_status &= MPI2_IOCSTATUS_MASK; 3472 ioc_status &= MPI2_IOCSTATUS_MASK;
3222 scsi_state = mpi_reply->SCSIState;
3223 scsi_status = mpi_reply->SCSIStatus; 3473 scsi_status = mpi_reply->SCSIStatus;
3224 3474
3225 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && 3475 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
@@ -3255,10 +3505,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3255 3505
3256 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 3506 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
3257 if (sas_device_priv_data->block) { 3507 if (sas_device_priv_data->block) {
3258 scmd->result = (DID_BUS_BUSY << 16); 3508 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
3259 break; 3509 goto out;
3260 } 3510 }
3261
3262 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 3511 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
3263 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 3512 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
3264 scmd->result = DID_RESET << 16; 3513 scmd->result = DID_RESET << 16;
@@ -3304,8 +3553,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3304 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 3553 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
3305 case MPI2_IOCSTATUS_SUCCESS: 3554 case MPI2_IOCSTATUS_SUCCESS:
3306 scmd->result = (DID_OK << 16) | scsi_status; 3555 scmd->result = (DID_OK << 16) | scsi_status;
3307 if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 3556 if (response_code ==
3308 MPI2_SCSI_STATE_NO_SCSI_STATUS)) 3557 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
3558 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
3559 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
3309 scmd->result = DID_SOFT_ERROR << 16; 3560 scmd->result = DID_SOFT_ERROR << 16;
3310 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 3561 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
3311 scmd->result = DID_RESET << 16; 3562 scmd->result = DID_RESET << 16;
@@ -3344,7 +3595,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3344/** 3595/**
3345 * _scsih_sas_host_refresh - refreshing sas host object contents 3596 * _scsih_sas_host_refresh - refreshing sas host object contents
3346 * @ioc: per adapter object 3597 * @ioc: per adapter object
3347 * @update: update link information
3348 * Context: user 3598 * Context: user
3349 * 3599 *
3350 * During port enable, fw will send topology events for every device. Its 3600 * During port enable, fw will send topology events for every device. Its
@@ -3354,13 +3604,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3354 * Return nothing. 3604 * Return nothing.
3355 */ 3605 */
3356static void 3606static void
3357_scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update) 3607_scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc)
3358{ 3608{
3359 u16 sz; 3609 u16 sz;
3360 u16 ioc_status; 3610 u16 ioc_status;
3361 int i; 3611 int i;
3362 Mpi2ConfigReply_t mpi_reply; 3612 Mpi2ConfigReply_t mpi_reply;
3363 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 3613 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
3614 u16 attached_handle;
3364 3615
3365 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT 3616 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
3366 "updating handles for sas_host(0x%016llx)\n", 3617 "updating handles for sas_host(0x%016llx)\n",
@@ -3374,27 +3625,24 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update)
3374 ioc->name, __FILE__, __LINE__, __func__); 3625 ioc->name, __FILE__, __LINE__, __func__);
3375 return; 3626 return;
3376 } 3627 }
3377 if (!(mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
3378 sas_iounit_pg0, sz))) {
3379 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3380 MPI2_IOCSTATUS_MASK;
3381 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3382 goto out;
3383 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
3384 ioc->sas_hba.phy[i].handle =
3385 le16_to_cpu(sas_iounit_pg0->PhyData[i].
3386 ControllerDevHandle);
3387 if (update)
3388 mpt2sas_transport_update_links(
3389 ioc,
3390 ioc->sas_hba.phy[i].handle,
3391 le16_to_cpu(sas_iounit_pg0->PhyData[i].
3392 AttachedDevHandle), i,
3393 sas_iounit_pg0->PhyData[i].
3394 NegotiatedLinkRate >> 4);
3395 }
3396 }
3397 3628
3629 if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
3630 sas_iounit_pg0, sz)) != 0)
3631 goto out;
3632 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3633 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3634 goto out;
3635 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
3636 if (i == 0)
3637 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
3638 PhyData[0].ControllerDevHandle);
3639 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
3640 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
3641 AttachedDevHandle);
3642 mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
3643 attached_handle, i, sas_iounit_pg0->PhyData[i].
3644 NegotiatedLinkRate >> 4);
3645 }
3398 out: 3646 out:
3399 kfree(sas_iounit_pg0); 3647 kfree(sas_iounit_pg0);
3400} 3648}
@@ -3507,19 +3755,21 @@ _scsih_sas_host_add(struct MPT2SAS_ADAPTER *ioc)
3507 ioc->name, __FILE__, __LINE__, __func__); 3755 ioc->name, __FILE__, __LINE__, __func__);
3508 goto out; 3756 goto out;
3509 } 3757 }
3510 ioc->sas_hba.phy[i].handle = 3758
3511 le16_to_cpu(sas_iounit_pg0->PhyData[i].ControllerDevHandle); 3759 if (i == 0)
3760 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
3761 PhyData[0].ControllerDevHandle);
3762 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
3512 ioc->sas_hba.phy[i].phy_id = i; 3763 ioc->sas_hba.phy[i].phy_id = i;
3513 mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], 3764 mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
3514 phy_pg0, ioc->sas_hba.parent_dev); 3765 phy_pg0, ioc->sas_hba.parent_dev);
3515 } 3766 }
3516 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 3767 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
3517 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.phy[0].handle))) { 3768 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
3518 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 3769 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3519 ioc->name, __FILE__, __LINE__, __func__); 3770 ioc->name, __FILE__, __LINE__, __func__);
3520 goto out; 3771 goto out;
3521 } 3772 }
3522 ioc->sas_hba.handle = le16_to_cpu(sas_device_pg0.DevHandle);
3523 ioc->sas_hba.enclosure_handle = 3773 ioc->sas_hba.enclosure_handle =
3524 le16_to_cpu(sas_device_pg0.EnclosureHandle); 3774 le16_to_cpu(sas_device_pg0.EnclosureHandle);
3525 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 3775 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
@@ -3562,7 +3812,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3562 Mpi2SasEnclosurePage0_t enclosure_pg0; 3812 Mpi2SasEnclosurePage0_t enclosure_pg0;
3563 u32 ioc_status; 3813 u32 ioc_status;
3564 u16 parent_handle; 3814 u16 parent_handle;
3565 __le64 sas_address; 3815 __le64 sas_address, sas_address_parent = 0;
3566 int i; 3816 int i;
3567 unsigned long flags; 3817 unsigned long flags;
3568 struct _sas_port *mpt2sas_port = NULL; 3818 struct _sas_port *mpt2sas_port = NULL;
@@ -3591,10 +3841,16 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3591 3841
3592 /* handle out of order topology events */ 3842 /* handle out of order topology events */
3593 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); 3843 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
3594 if (parent_handle >= ioc->sas_hba.num_phys) { 3844 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
3845 != 0) {
3846 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3847 ioc->name, __FILE__, __LINE__, __func__);
3848 return -1;
3849 }
3850 if (sas_address_parent != ioc->sas_hba.sas_address) {
3595 spin_lock_irqsave(&ioc->sas_node_lock, flags); 3851 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3596 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, 3852 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
3597 parent_handle); 3853 sas_address_parent);
3598 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 3854 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
3599 if (!sas_expander) { 3855 if (!sas_expander) {
3600 rc = _scsih_expander_add(ioc, parent_handle); 3856 rc = _scsih_expander_add(ioc, parent_handle);
@@ -3622,14 +3878,12 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3622 3878
3623 sas_expander->handle = handle; 3879 sas_expander->handle = handle;
3624 sas_expander->num_phys = expander_pg0.NumPhys; 3880 sas_expander->num_phys = expander_pg0.NumPhys;
3625 sas_expander->parent_handle = parent_handle; 3881 sas_expander->sas_address_parent = sas_address_parent;
3626 sas_expander->enclosure_handle =
3627 le16_to_cpu(expander_pg0.EnclosureHandle);
3628 sas_expander->sas_address = sas_address; 3882 sas_expander->sas_address = sas_address;
3629 3883
3630 printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x)," 3884 printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x),"
3631 " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, 3885 " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
3632 handle, sas_expander->parent_handle, (unsigned long long) 3886 handle, parent_handle, (unsigned long long)
3633 sas_expander->sas_address, sas_expander->num_phys); 3887 sas_expander->sas_address, sas_expander->num_phys);
3634 3888
3635 if (!sas_expander->num_phys) 3889 if (!sas_expander->num_phys)
@@ -3645,7 +3899,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3645 3899
3646 INIT_LIST_HEAD(&sas_expander->sas_port_list); 3900 INIT_LIST_HEAD(&sas_expander->sas_port_list);
3647 mpt2sas_port = mpt2sas_transport_port_add(ioc, handle, 3901 mpt2sas_port = mpt2sas_transport_port_add(ioc, handle,
3648 sas_expander->parent_handle); 3902 sas_address_parent);
3649 if (!mpt2sas_port) { 3903 if (!mpt2sas_port) {
3650 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 3904 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3651 ioc->name, __FILE__, __LINE__, __func__); 3905 ioc->name, __FILE__, __LINE__, __func__);
@@ -3691,20 +3945,54 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3691 3945
3692 if (mpt2sas_port) 3946 if (mpt2sas_port)
3693 mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, 3947 mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
3694 sas_expander->parent_handle); 3948 sas_address_parent);
3695 kfree(sas_expander); 3949 kfree(sas_expander);
3696 return rc; 3950 return rc;
3697} 3951}
3698 3952
3699/** 3953/**
3954 * _scsih_done - scsih callback handler.
3955 * @ioc: per adapter object
3956 * @smid: system request message index
3957 * @msix_index: MSIX table index supplied by the OS
3958 * @reply: reply message frame(lower 32bit addr)
3959 *
3960 * Callback handler when sending internal generated message frames.
3961 * The callback index passed is `ioc->scsih_cb_idx`
3962 *
3963 * Return 1 meaning mf should be freed from _base_interrupt
3964 * 0 means the mf is freed from this function.
3965 */
3966static u8
3967_scsih_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3968{
3969 MPI2DefaultReply_t *mpi_reply;
3970
3971 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3972 if (ioc->scsih_cmds.status == MPT2_CMD_NOT_USED)
3973 return 1;
3974 if (ioc->scsih_cmds.smid != smid)
3975 return 1;
3976 ioc->scsih_cmds.status |= MPT2_CMD_COMPLETE;
3977 if (mpi_reply) {
3978 memcpy(ioc->scsih_cmds.reply, mpi_reply,
3979 mpi_reply->MsgLength*4);
3980 ioc->scsih_cmds.status |= MPT2_CMD_REPLY_VALID;
3981 }
3982 ioc->scsih_cmds.status &= ~MPT2_CMD_PENDING;
3983 complete(&ioc->scsih_cmds.done);
3984 return 1;
3985}
3986
3987/**
3700 * _scsih_expander_remove - removing expander object 3988 * _scsih_expander_remove - removing expander object
3701 * @ioc: per adapter object 3989 * @ioc: per adapter object
3702 * @handle: expander handle 3990 * @sas_address: expander sas_address
3703 * 3991 *
3704 * Return nothing. 3992 * Return nothing.
3705 */ 3993 */
3706static void 3994static void
3707_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle) 3995_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
3708{ 3996{
3709 struct _sas_node *sas_expander; 3997 struct _sas_node *sas_expander;
3710 unsigned long flags; 3998 unsigned long flags;
@@ -3713,7 +4001,8 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3713 return; 4001 return;
3714 4002
3715 spin_lock_irqsave(&ioc->sas_node_lock, flags); 4003 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3716 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle); 4004 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
4005 sas_address);
3717 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4006 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
3718 _scsih_expander_node_remove(ioc, sas_expander); 4007 _scsih_expander_node_remove(ioc, sas_expander);
3719} 4008}
@@ -3805,8 +4094,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
3805 } 4094 }
3806 4095
3807 sas_device->handle = handle; 4096 sas_device->handle = handle;
3808 sas_device->parent_handle = 4097 if (_scsih_get_sas_address(ioc, le16_to_cpu
3809 le16_to_cpu(sas_device_pg0.ParentDevHandle); 4098 (sas_device_pg0.ParentDevHandle),
4099 &sas_device->sas_address_parent) != 0)
4100 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4101 ioc->name, __FILE__, __LINE__, __func__);
3810 sas_device->enclosure_handle = 4102 sas_device->enclosure_handle =
3811 le16_to_cpu(sas_device_pg0.EnclosureHandle); 4103 le16_to_cpu(sas_device_pg0.EnclosureHandle);
3812 sas_device->slot = 4104 sas_device->slot =
@@ -3836,43 +4128,39 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
3836/** 4128/**
3837 * _scsih_remove_device - removing sas device object 4129 * _scsih_remove_device - removing sas device object
3838 * @ioc: per adapter object 4130 * @ioc: per adapter object
3839 * @handle: sas device handle 4131 * @sas_device: the sas_device object
3840 * 4132 *
3841 * Return nothing. 4133 * Return nothing.
3842 */ 4134 */
3843static void 4135static void
3844_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) 4136_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
4137 *sas_device)
3845{ 4138{
3846 struct MPT2SAS_TARGET *sas_target_priv_data; 4139 struct MPT2SAS_TARGET *sas_target_priv_data;
3847 struct _sas_device *sas_device;
3848 unsigned long flags;
3849 Mpi2SasIoUnitControlReply_t mpi_reply; 4140 Mpi2SasIoUnitControlReply_t mpi_reply;
3850 Mpi2SasIoUnitControlRequest_t mpi_request; 4141 Mpi2SasIoUnitControlRequest_t mpi_request;
3851 u16 device_handle; 4142 u16 device_handle, handle;
3852 4143
3853 /* lookup sas_device */ 4144 if (!sas_device)
3854 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3855 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
3856 if (!sas_device) {
3857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3858 return; 4145 return;
3859 }
3860 4146
3861 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle" 4147 handle = sas_device->handle;
3862 "(0x%04x)\n", ioc->name, __func__, handle)); 4148 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x),"
4149 " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
4150 (unsigned long long) sas_device->sas_address));
3863 4151
3864 if (sas_device->starget && sas_device->starget->hostdata) { 4152 if (sas_device->starget && sas_device->starget->hostdata) {
3865 sas_target_priv_data = sas_device->starget->hostdata; 4153 sas_target_priv_data = sas_device->starget->hostdata;
3866 sas_target_priv_data->deleted = 1; 4154 sas_target_priv_data->deleted = 1;
3867 } 4155 }
3868 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3869 4156
3870 if (ioc->remove_host) 4157 if (ioc->remove_host || ioc->shost_recovery || !handle)
3871 goto out; 4158 goto out;
3872 4159
3873 if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) { 4160 if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) {
3874 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip " 4161 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
3875 "target_reset handle(0x%04x)\n", ioc->name, handle)); 4162 "target_reset handle(0x%04x)\n", ioc->name,
4163 handle));
3876 goto skip_tr; 4164 goto skip_tr;
3877 } 4165 }
3878 4166
@@ -3925,10 +4213,10 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3925 _scsih_ublock_io_device(ioc, handle); 4213 _scsih_ublock_io_device(ioc, handle);
3926 4214
3927 mpt2sas_transport_port_remove(ioc, sas_device->sas_address, 4215 mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
3928 sas_device->parent_handle); 4216 sas_device->sas_address_parent);
3929 4217
3930 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" 4218 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
3931 "(0x%016llx)\n", ioc->name, sas_device->handle, 4219 "(0x%016llx)\n", ioc->name, handle,
3932 (unsigned long long) sas_device->sas_address); 4220 (unsigned long long) sas_device->sas_address);
3933 _scsih_sas_device_remove(ioc, sas_device); 4221 _scsih_sas_device_remove(ioc, sas_device);
3934 4222
@@ -3952,7 +4240,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
3952 u16 reason_code; 4240 u16 reason_code;
3953 u8 phy_number; 4241 u8 phy_number;
3954 char *status_str = NULL; 4242 char *status_str = NULL;
3955 char link_rate[25]; 4243 u8 link_rate, prev_link_rate;
3956 4244
3957 switch (event_data->ExpStatus) { 4245 switch (event_data->ExpStatus) {
3958 case MPI2_EVENT_SAS_TOPO_ES_ADDED: 4246 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
@@ -3962,6 +4250,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
3962 status_str = "remove"; 4250 status_str = "remove";
3963 break; 4251 break;
3964 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: 4252 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
4253 case 0:
3965 status_str = "responding"; 4254 status_str = "responding";
3966 break; 4255 break;
3967 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 4256 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
@@ -3987,30 +4276,30 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
3987 MPI2_EVENT_SAS_TOPO_RC_MASK; 4276 MPI2_EVENT_SAS_TOPO_RC_MASK;
3988 switch (reason_code) { 4277 switch (reason_code) {
3989 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 4278 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
3990 snprintf(link_rate, 25, ": add, link(0x%02x)", 4279 status_str = "target add";
3991 (event_data->PHY[i].LinkRate >> 4));
3992 status_str = link_rate;
3993 break; 4280 break;
3994 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 4281 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
3995 status_str = ": remove"; 4282 status_str = "target remove";
3996 break; 4283 break;
3997 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: 4284 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
3998 status_str = ": remove_delay"; 4285 status_str = "delay target remove";
3999 break; 4286 break;
4000 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 4287 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
4001 snprintf(link_rate, 25, ": link(0x%02x)", 4288 status_str = "link rate change";
4002 (event_data->PHY[i].LinkRate >> 4));
4003 status_str = link_rate;
4004 break; 4289 break;
4005 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: 4290 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
4006 status_str = ": responding"; 4291 status_str = "target responding";
4007 break; 4292 break;
4008 default: 4293 default:
4009 status_str = ": unknown"; 4294 status_str = "unknown";
4010 break; 4295 break;
4011 } 4296 }
4012 printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x)%s\n", 4297 link_rate = event_data->PHY[i].LinkRate >> 4;
4013 phy_number, handle, status_str); 4298 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
4299 printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x): %s:"
4300 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
4301 handle, status_str, link_rate, prev_link_rate);
4302
4014 } 4303 }
4015} 4304}
4016#endif 4305#endif
@@ -4031,8 +4320,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4031 u16 reason_code; 4320 u16 reason_code;
4032 u8 phy_number; 4321 u8 phy_number;
4033 struct _sas_node *sas_expander; 4322 struct _sas_node *sas_expander;
4323 struct _sas_device *sas_device;
4324 u64 sas_address;
4034 unsigned long flags; 4325 unsigned long flags;
4035 u8 link_rate_; 4326 u8 link_rate, prev_link_rate;
4036 Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; 4327 Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
4037 4328
4038#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4329#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4040,10 +4331,13 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4040 _scsih_sas_topology_change_event_debug(ioc, event_data); 4331 _scsih_sas_topology_change_event_debug(ioc, event_data);
4041#endif 4332#endif
4042 4333
4334 if (ioc->shost_recovery)
4335 return;
4336
4043 if (!ioc->sas_hba.num_phys) 4337 if (!ioc->sas_hba.num_phys)
4044 _scsih_sas_host_add(ioc); 4338 _scsih_sas_host_add(ioc);
4045 else 4339 else
4046 _scsih_sas_host_refresh(ioc, 0); 4340 _scsih_sas_host_refresh(ioc);
4047 4341
4048 if (fw_event->ignore) { 4342 if (fw_event->ignore) {
4049 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring expander " 4343 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring expander "
@@ -4058,6 +4352,17 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4058 if (_scsih_expander_add(ioc, parent_handle) != 0) 4352 if (_scsih_expander_add(ioc, parent_handle) != 0)
4059 return; 4353 return;
4060 4354
4355 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4356 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
4357 parent_handle);
4358 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4359 if (sas_expander)
4360 sas_address = sas_expander->sas_address;
4361 else if (parent_handle < ioc->sas_hba.num_phys)
4362 sas_address = ioc->sas_hba.sas_address;
4363 else
4364 return;
4365
4061 /* handle siblings events */ 4366 /* handle siblings events */
4062 for (i = 0; i < event_data->NumEntries; i++) { 4367 for (i = 0; i < event_data->NumEntries; i++) {
4063 if (fw_event->ignore) { 4368 if (fw_event->ignore) {
@@ -4077,48 +4382,47 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4077 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4382 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4078 if (!handle) 4383 if (!handle)
4079 continue; 4384 continue;
4080 link_rate_ = event_data->PHY[i].LinkRate >> 4; 4385 link_rate = event_data->PHY[i].LinkRate >> 4;
4386 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
4081 switch (reason_code) { 4387 switch (reason_code) {
4082 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 4388 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
4389
4390 if (link_rate == prev_link_rate)
4391 break;
4392
4393 mpt2sas_transport_update_links(ioc, sas_address,
4394 handle, phy_number, link_rate);
4395
4396 if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
4397 _scsih_ublock_io_device(ioc, handle);
4398 break;
4083 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 4399 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
4084 if (!parent_handle) { 4400
4085 if (phy_number < ioc->sas_hba.num_phys) 4401 mpt2sas_transport_update_links(ioc, sas_address,
4086 mpt2sas_transport_update_links( 4402 handle, phy_number, link_rate);
4087 ioc, 4403
4088 ioc->sas_hba.phy[phy_number].handle, 4404 _scsih_add_device(ioc, handle, phy_number, 0);
4089 handle, phy_number, link_rate_);
4090 } else {
4091 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4092 sas_expander =
4093 mpt2sas_scsih_expander_find_by_handle(ioc,
4094 parent_handle);
4095 spin_unlock_irqrestore(&ioc->sas_node_lock,
4096 flags);
4097 if (sas_expander) {
4098 if (phy_number < sas_expander->num_phys)
4099 mpt2sas_transport_update_links(
4100 ioc,
4101 sas_expander->
4102 phy[phy_number].handle,
4103 handle, phy_number,
4104 link_rate_);
4105 }
4106 }
4107 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) {
4108 if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5)
4109 break;
4110 _scsih_add_device(ioc, handle, phy_number, 0);
4111 }
4112 break; 4405 break;
4113 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 4406 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
4114 _scsih_remove_device(ioc, handle); 4407
4408 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4409 sas_device = _scsih_sas_device_find_by_handle(ioc,
4410 handle);
4411 if (!sas_device) {
4412 spin_unlock_irqrestore(&ioc->sas_device_lock,
4413 flags);
4414 break;
4415 }
4416 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4417 _scsih_remove_device(ioc, sas_device);
4115 break; 4418 break;
4116 } 4419 }
4117 } 4420 }
4118 4421
4119 /* handle expander removal */ 4422 /* handle expander removal */
4120 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4423 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
4121 _scsih_expander_remove(ioc, parent_handle); 4424 sas_expander)
4425 _scsih_expander_remove(ioc, sas_address);
4122 4426
4123} 4427}
4124 4428
@@ -4170,6 +4474,12 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
4170 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: 4474 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
4171 reason_str = "internal async notification"; 4475 reason_str = "internal async notification";
4172 break; 4476 break;
4477 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
4478 reason_str = "expander reduced functionality";
4479 break;
4480 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
4481 reason_str = "expander reduced functionality complete";
4482 break;
4173 default: 4483 default:
4174 reason_str = "unknown reason"; 4484 reason_str = "unknown reason";
4175 break; 4485 break;
@@ -4197,11 +4507,43 @@ static void
4197_scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, 4507_scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
4198 struct fw_event_work *fw_event) 4508 struct fw_event_work *fw_event)
4199{ 4509{
4510 struct MPT2SAS_TARGET *target_priv_data;
4511 struct _sas_device *sas_device;
4512 __le64 sas_address;
4513 unsigned long flags;
4514 Mpi2EventDataSasDeviceStatusChange_t *event_data =
4515 fw_event->event_data;
4516
4200#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4517#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4201 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 4518 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
4202 _scsih_sas_device_status_change_event_debug(ioc, 4519 _scsih_sas_device_status_change_event_debug(ioc,
4203 fw_event->event_data); 4520 event_data);
4204#endif 4521#endif
4522
4523 if (!(event_data->ReasonCode ==
4524 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
4525 event_data->ReasonCode ==
4526 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET))
4527 return;
4528
4529 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4530 sas_address = le64_to_cpu(event_data->SASAddress);
4531 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
4532 sas_address);
4533 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4534
4535 if (!sas_device || !sas_device->starget)
4536 return;
4537
4538 target_priv_data = sas_device->starget->hostdata;
4539 if (!target_priv_data)
4540 return;
4541
4542 if (event_data->ReasonCode ==
4543 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
4544 target_priv_data->tm_busy = 1;
4545 else
4546 target_priv_data->tm_busy = 0;
4205} 4547}
4206 4548
4207#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4549#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4281,6 +4623,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4281#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4623#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4282 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; 4624 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
4283#endif 4625#endif
4626 u16 ioc_status;
4284 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: " 4627 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: "
4285 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, 4628 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
4286 event_data->PortWidth)); 4629 event_data->PortWidth));
@@ -4314,8 +4657,9 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4314 mpt2sas_scsih_issue_tm(ioc, handle, lun, 4657 mpt2sas_scsih_issue_tm(ioc, handle, lun,
4315 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30); 4658 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
4316 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 4659 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4317 4660 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
4318 if ((mpi_reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) && 4661 & MPI2_IOCSTATUS_MASK;
4662 if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
4319 (mpi_reply->ResponseCode == 4663 (mpi_reply->ResponseCode ==
4320 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 4664 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
4321 mpi_reply->ResponseCode == 4665 mpi_reply->ResponseCode ==
@@ -4570,7 +4914,7 @@ _scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc,
4570 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4914 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4571 if (!sas_device) 4915 if (!sas_device)
4572 return; 4916 return;
4573 _scsih_remove_device(ioc, handle); 4917 _scsih_remove_device(ioc, sas_device);
4574} 4918}
4575 4919
4576/** 4920/**
@@ -4591,6 +4935,8 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
4591 Mpi2ConfigReply_t mpi_reply; 4935 Mpi2ConfigReply_t mpi_reply;
4592 Mpi2SasDevicePage0_t sas_device_pg0; 4936 Mpi2SasDevicePage0_t sas_device_pg0;
4593 u32 ioc_status; 4937 u32 ioc_status;
4938 u64 sas_address;
4939 u16 parent_handle;
4594 4940
4595 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4941 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4596 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 4942 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
@@ -4615,9 +4961,10 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
4615 return; 4961 return;
4616 } 4962 }
4617 4963
4618 mpt2sas_transport_update_links(ioc, 4964 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
4619 le16_to_cpu(sas_device_pg0.ParentDevHandle), 4965 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
4620 handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); 4966 mpt2sas_transport_update_links(ioc, sas_address, handle,
4967 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
4621 4968
4622 _scsih_add_device(ioc, handle, 0, 1); 4969 _scsih_add_device(ioc, handle, 0, 1);
4623} 4970}
@@ -4857,7 +5204,7 @@ static void
4857_scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, 5204_scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
4858 struct fw_event_work *fw_event) 5205 struct fw_event_work *fw_event)
4859{ 5206{
4860 u16 handle; 5207 u16 handle, parent_handle;
4861 u32 state; 5208 u32 state;
4862 struct _sas_device *sas_device; 5209 struct _sas_device *sas_device;
4863 unsigned long flags; 5210 unsigned long flags;
@@ -4865,6 +5212,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
4865 Mpi2SasDevicePage0_t sas_device_pg0; 5212 Mpi2SasDevicePage0_t sas_device_pg0;
4866 u32 ioc_status; 5213 u32 ioc_status;
4867 Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; 5214 Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
5215 u64 sas_address;
4868 5216
4869 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 5217 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
4870 return; 5218 return;
@@ -4906,9 +5254,10 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
4906 return; 5254 return;
4907 } 5255 }
4908 5256
4909 mpt2sas_transport_update_links(ioc, 5257 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
4910 le16_to_cpu(sas_device_pg0.ParentDevHandle), 5258 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
4911 handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); 5259 mpt2sas_transport_update_links(ioc, sas_address, handle,
5260 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
4912 5261
4913 _scsih_add_device(ioc, handle, 0, 1); 5262 _scsih_add_device(ioc, handle, 0, 1);
4914 5263
@@ -4948,11 +5297,17 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT2SAS_ADAPTER *ioc,
4948 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: 5297 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
4949 reason_str = "consistency check"; 5298 reason_str = "consistency check";
4950 break; 5299 break;
4951 default: 5300 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
4952 reason_str = "unknown reason"; 5301 reason_str = "background init";
5302 break;
5303 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
5304 reason_str = "make data consistent";
4953 break; 5305 break;
4954 } 5306 }
4955 5307
5308 if (!reason_str)
5309 return;
5310
4956 printk(MPT2SAS_INFO_FMT "raid operational status: (%s)" 5311 printk(MPT2SAS_INFO_FMT "raid operational status: (%s)"
4957 "\thandle(0x%04x), percent complete(%d)\n", 5312 "\thandle(0x%04x), percent complete(%d)\n",
4958 ioc->name, reason_str, 5313 ioc->name, reason_str,
@@ -4973,11 +5328,33 @@ static void
4973_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, 5328_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
4974 struct fw_event_work *fw_event) 5329 struct fw_event_work *fw_event)
4975{ 5330{
5331 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
5332 static struct _raid_device *raid_device;
5333 unsigned long flags;
5334 u16 handle;
5335
4976#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5336#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4977 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 5337 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
4978 _scsih_sas_ir_operation_status_event_debug(ioc, 5338 _scsih_sas_ir_operation_status_event_debug(ioc,
4979 fw_event->event_data); 5339 event_data);
4980#endif 5340#endif
5341
5342 /* code added for raid transport support */
5343 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
5344
5345 handle = le16_to_cpu(event_data->VolDevHandle);
5346
5347 spin_lock_irqsave(&ioc->raid_device_lock, flags);
5348 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
5349 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
5350
5351 if (!raid_device)
5352 return;
5353
5354 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC)
5355 raid_device->percent_complete =
5356 event_data->PercentComplete;
5357 }
4981} 5358}
4982 5359
4983/** 5360/**
@@ -5252,18 +5629,23 @@ _scsih_mark_responding_expander(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5252{ 5629{
5253 struct _sas_node *sas_expander; 5630 struct _sas_node *sas_expander;
5254 unsigned long flags; 5631 unsigned long flags;
5632 int i;
5255 5633
5256 spin_lock_irqsave(&ioc->sas_node_lock, flags); 5634 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5257 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 5635 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
5258 if (sas_expander->sas_address == sas_address) { 5636 if (sas_expander->sas_address != sas_address)
5259 sas_expander->responding = 1; 5637 continue;
5260 if (sas_expander->handle != handle) { 5638 sas_expander->responding = 1;
5261 printk(KERN_INFO "old handle(0x%04x)\n", 5639 if (sas_expander->handle == handle)
5262 sas_expander->handle);
5263 sas_expander->handle = handle;
5264 }
5265 goto out; 5640 goto out;
5266 } 5641 printk(KERN_INFO "\texpander(0x%016llx): handle changed"
5642 " from(0x%04x) to (0x%04x)!!!\n",
5643 (unsigned long long)sas_expander->sas_address,
5644 sas_expander->handle, handle);
5645 sas_expander->handle = handle;
5646 for (i = 0 ; i < sas_expander->num_phys ; i++)
5647 sas_expander->phy[i].handle = handle;
5648 goto out;
5267 } 5649 }
5268 out: 5650 out:
5269 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 5651 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@@ -5340,7 +5722,9 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
5340 (unsigned long long) 5722 (unsigned long long)
5341 sas_device->enclosure_logical_id, 5723 sas_device->enclosure_logical_id,
5342 sas_device->slot); 5724 sas_device->slot);
5343 _scsih_remove_device(ioc, sas_device->handle); 5725 /* invalidate the device handle */
5726 sas_device->handle = 0;
5727 _scsih_remove_device(ioc, sas_device);
5344 } 5728 }
5345 5729
5346 list_for_each_entry_safe(raid_device, raid_device_next, 5730 list_for_each_entry_safe(raid_device, raid_device_next,
@@ -5366,7 +5750,7 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
5366 sas_expander->responding = 0; 5750 sas_expander->responding = 0;
5367 continue; 5751 continue;
5368 } 5752 }
5369 _scsih_expander_remove(ioc, sas_expander->handle); 5753 _scsih_expander_remove(ioc, sas_expander->sas_address);
5370 goto retry_expander_search; 5754 goto retry_expander_search;
5371 } 5755 }
5372} 5756}
@@ -5406,7 +5790,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
5406 case MPT2_IOC_DONE_RESET: 5790 case MPT2_IOC_DONE_RESET:
5407 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 5791 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5408 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 5792 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
5409 _scsih_sas_host_refresh(ioc, 0); 5793 _scsih_sas_host_refresh(ioc);
5410 _scsih_search_responding_sas_devices(ioc); 5794 _scsih_search_responding_sas_devices(ioc);
5411 _scsih_search_responding_raid_devices(ioc); 5795 _scsih_search_responding_raid_devices(ioc);
5412 _scsih_search_responding_expanders(ioc); 5796 _scsih_search_responding_expanders(ioc);
@@ -5646,7 +6030,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5646 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6030 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5647 if (!sas_device) 6031 if (!sas_device)
5648 continue; 6032 continue;
5649 _scsih_remove_device(ioc, sas_device->handle); 6033 _scsih_remove_device(ioc, sas_device);
5650 if (ioc->shost_recovery) 6034 if (ioc->shost_recovery)
5651 return; 6035 return;
5652 goto retry_device_search; 6036 goto retry_device_search;
@@ -5669,7 +6053,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5669 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6053 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5670 if (!expander_sibling) 6054 if (!expander_sibling)
5671 continue; 6055 continue;
5672 _scsih_expander_remove(ioc, expander_sibling->handle); 6056 _scsih_expander_remove(ioc,
6057 expander_sibling->sas_address);
5673 if (ioc->shost_recovery) 6058 if (ioc->shost_recovery)
5674 return; 6059 return;
5675 goto retry_expander_search; 6060 goto retry_expander_search;
@@ -5677,7 +6062,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5677 } 6062 }
5678 6063
5679 mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, 6064 mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
5680 sas_expander->parent_handle); 6065 sas_expander->sas_address_parent);
5681 6066
5682 printk(MPT2SAS_INFO_FMT "expander_remove: handle" 6067 printk(MPT2SAS_INFO_FMT "expander_remove: handle"
5683 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, 6068 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
@@ -5690,9 +6075,99 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5690} 6075}
5691 6076
5692/** 6077/**
6078 * _scsih_ir_shutdown - IR shutdown notification
6079 * @ioc: per adapter object
6080 *
6081 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
6082 * the host system is shutting down.
6083 *
6084 * Return nothing.
6085 */
6086static void
6087_scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
6088{
6089 Mpi2RaidActionRequest_t *mpi_request;
6090 Mpi2RaidActionReply_t *mpi_reply;
6091 u16 smid;
6092
6093 /* is IR firmware build loaded ? */
6094 if (!ioc->ir_firmware)
6095 return;
6096
6097 /* are there any volumes ? */
6098 if (list_empty(&ioc->raid_device_list))
6099 return;
6100
6101 mutex_lock(&ioc->scsih_cmds.mutex);
6102
6103 if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) {
6104 printk(MPT2SAS_ERR_FMT "%s: scsih_cmd in use\n",
6105 ioc->name, __func__);
6106 goto out;
6107 }
6108 ioc->scsih_cmds.status = MPT2_CMD_PENDING;
6109
6110 smid = mpt2sas_base_get_smid(ioc, ioc->scsih_cb_idx);
6111 if (!smid) {
6112 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
6113 ioc->name, __func__);
6114 ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
6115 goto out;
6116 }
6117
6118 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
6119 ioc->scsih_cmds.smid = smid;
6120 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
6121
6122 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
6123 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
6124
6125 printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name);
6126 init_completion(&ioc->scsih_cmds.done);
6127 mpt2sas_base_put_smid_default(ioc, smid);
6128 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
6129
6130 if (!(ioc->scsih_cmds.status & MPT2_CMD_COMPLETE)) {
6131 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
6132 ioc->name, __func__);
6133 goto out;
6134 }
6135
6136 if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) {
6137 mpi_reply = ioc->scsih_cmds.reply;
6138
6139 printk(MPT2SAS_INFO_FMT "IR shutdown (complete): "
6140 "ioc_status(0x%04x), loginfo(0x%08x)\n",
6141 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
6142 le32_to_cpu(mpi_reply->IOCLogInfo));
6143 }
6144
6145 out:
6146 ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
6147 mutex_unlock(&ioc->scsih_cmds.mutex);
6148}
6149
6150/**
6151 * _scsih_shutdown - routine call during system shutdown
6152 * @pdev: PCI device struct
6153 *
6154 * Return nothing.
6155 */
6156static void
6157_scsih_shutdown(struct pci_dev *pdev)
6158{
6159 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6160 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
6161
6162 _scsih_ir_shutdown(ioc);
6163 mpt2sas_base_detach(ioc);
6164}
6165
6166/**
5693 * _scsih_remove - detach and remove add host 6167 * _scsih_remove - detach and remove add host
5694 * @pdev: PCI device struct 6168 * @pdev: PCI device struct
5695 * 6169 *
6170 * Routine called when unloading the driver.
5696 * Return nothing. 6171 * Return nothing.
5697 */ 6172 */
5698static void __devexit 6173static void __devexit
@@ -5703,6 +6178,8 @@ _scsih_remove(struct pci_dev *pdev)
5703 struct _sas_port *mpt2sas_port; 6178 struct _sas_port *mpt2sas_port;
5704 struct _sas_device *sas_device; 6179 struct _sas_device *sas_device;
5705 struct _sas_node *expander_sibling; 6180 struct _sas_node *expander_sibling;
6181 struct _raid_device *raid_device, *next;
6182 struct MPT2SAS_TARGET *sas_target_priv_data;
5706 struct workqueue_struct *wq; 6183 struct workqueue_struct *wq;
5707 unsigned long flags; 6184 unsigned long flags;
5708 6185
@@ -5716,6 +6193,21 @@ _scsih_remove(struct pci_dev *pdev)
5716 if (wq) 6193 if (wq)
5717 destroy_workqueue(wq); 6194 destroy_workqueue(wq);
5718 6195
6196 /* release all the volumes */
6197 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
6198 list) {
6199 if (raid_device->starget) {
6200 sas_target_priv_data =
6201 raid_device->starget->hostdata;
6202 sas_target_priv_data->deleted = 1;
6203 scsi_remove_target(&raid_device->starget->dev);
6204 }
6205 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
6206 "(0x%016llx)\n", ioc->name, raid_device->handle,
6207 (unsigned long long) raid_device->wwid);
6208 _scsih_raid_device_remove(ioc, raid_device);
6209 }
6210
5719 /* free ports attached to the sas_host */ 6211 /* free ports attached to the sas_host */
5720 retry_again: 6212 retry_again:
5721 list_for_each_entry(mpt2sas_port, 6213 list_for_each_entry(mpt2sas_port,
@@ -5726,7 +6218,7 @@ _scsih_remove(struct pci_dev *pdev)
5726 mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 6218 mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
5727 mpt2sas_port->remote_identify.sas_address); 6219 mpt2sas_port->remote_identify.sas_address);
5728 if (sas_device) { 6220 if (sas_device) {
5729 _scsih_remove_device(ioc, sas_device->handle); 6221 _scsih_remove_device(ioc, sas_device);
5730 goto retry_again; 6222 goto retry_again;
5731 } 6223 }
5732 } else { 6224 } else {
@@ -5735,7 +6227,7 @@ _scsih_remove(struct pci_dev *pdev)
5735 mpt2sas_port->remote_identify.sas_address); 6227 mpt2sas_port->remote_identify.sas_address);
5736 if (expander_sibling) { 6228 if (expander_sibling) {
5737 _scsih_expander_remove(ioc, 6229 _scsih_expander_remove(ioc,
5738 expander_sibling->handle); 6230 expander_sibling->sas_address);
5739 goto retry_again; 6231 goto retry_again;
5740 } 6232 }
5741 } 6233 }
@@ -5749,7 +6241,7 @@ _scsih_remove(struct pci_dev *pdev)
5749 } 6241 }
5750 6242
5751 sas_remove_host(shost); 6243 sas_remove_host(shost);
5752 mpt2sas_base_detach(ioc); 6244 _scsih_shutdown(pdev);
5753 list_del(&ioc->list); 6245 list_del(&ioc->list);
5754 scsi_remove_host(shost); 6246 scsi_remove_host(shost);
5755 scsi_host_put(shost); 6247 scsi_host_put(shost);
@@ -5770,7 +6262,8 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
5770 void *device; 6262 void *device;
5771 struct _sas_device *sas_device; 6263 struct _sas_device *sas_device;
5772 struct _raid_device *raid_device; 6264 struct _raid_device *raid_device;
5773 u16 handle, parent_handle; 6265 u16 handle;
6266 u64 sas_address_parent;
5774 u64 sas_address; 6267 u64 sas_address;
5775 unsigned long flags; 6268 unsigned long flags;
5776 int rc; 6269 int rc;
@@ -5799,17 +6292,17 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
5799 } else { 6292 } else {
5800 sas_device = device; 6293 sas_device = device;
5801 handle = sas_device->handle; 6294 handle = sas_device->handle;
5802 parent_handle = sas_device->parent_handle; 6295 sas_address_parent = sas_device->sas_address_parent;
5803 sas_address = sas_device->sas_address; 6296 sas_address = sas_device->sas_address;
5804 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6297 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5805 list_move_tail(&sas_device->list, &ioc->sas_device_list); 6298 list_move_tail(&sas_device->list, &ioc->sas_device_list);
5806 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6299 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5807 if (!mpt2sas_transport_port_add(ioc, sas_device->handle, 6300 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
5808 sas_device->parent_handle)) { 6301 sas_device->sas_address_parent)) {
5809 _scsih_sas_device_remove(ioc, sas_device); 6302 _scsih_sas_device_remove(ioc, sas_device);
5810 } else if (!sas_device->starget) { 6303 } else if (!sas_device->starget) {
5811 mpt2sas_transport_port_remove(ioc, sas_address, 6304 mpt2sas_transport_port_remove(ioc, sas_address,
5812 parent_handle); 6305 sas_address_parent);
5813 _scsih_sas_device_remove(ioc, sas_device); 6306 _scsih_sas_device_remove(ioc, sas_device);
5814 } 6307 }
5815 } 6308 }
@@ -5849,8 +6342,6 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
5849{ 6342{
5850 struct _sas_device *sas_device, *next; 6343 struct _sas_device *sas_device, *next;
5851 unsigned long flags; 6344 unsigned long flags;
5852 u16 handle, parent_handle;
5853 u64 sas_address;
5854 6345
5855 /* SAS Device List */ 6346 /* SAS Device List */
5856 list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, 6347 list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
@@ -5859,14 +6350,13 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
5859 list_move_tail(&sas_device->list, &ioc->sas_device_list); 6350 list_move_tail(&sas_device->list, &ioc->sas_device_list);
5860 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6351 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5861 6352
5862 handle = sas_device->handle; 6353 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
5863 parent_handle = sas_device->parent_handle; 6354 sas_device->sas_address_parent)) {
5864 sas_address = sas_device->sas_address;
5865 if (!mpt2sas_transport_port_add(ioc, handle, parent_handle)) {
5866 _scsih_sas_device_remove(ioc, sas_device); 6355 _scsih_sas_device_remove(ioc, sas_device);
5867 } else if (!sas_device->starget) { 6356 } else if (!sas_device->starget) {
5868 mpt2sas_transport_port_remove(ioc, sas_address, 6357 mpt2sas_transport_port_remove(ioc,
5869 parent_handle); 6358 sas_device->sas_address,
6359 sas_device->sas_address_parent);
5870 _scsih_sas_device_remove(ioc, sas_device); 6360 _scsih_sas_device_remove(ioc, sas_device);
5871 } 6361 }
5872 } 6362 }
@@ -5935,6 +6425,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5935 ioc->ctl_cb_idx = ctl_cb_idx; 6425 ioc->ctl_cb_idx = ctl_cb_idx;
5936 ioc->base_cb_idx = base_cb_idx; 6426 ioc->base_cb_idx = base_cb_idx;
5937 ioc->transport_cb_idx = transport_cb_idx; 6427 ioc->transport_cb_idx = transport_cb_idx;
6428 ioc->scsih_cb_idx = scsih_cb_idx;
5938 ioc->config_cb_idx = config_cb_idx; 6429 ioc->config_cb_idx = config_cb_idx;
5939 ioc->tm_tr_cb_idx = tm_tr_cb_idx; 6430 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
5940 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 6431 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
@@ -6072,12 +6563,20 @@ static struct pci_driver scsih_driver = {
6072 .id_table = scsih_pci_table, 6563 .id_table = scsih_pci_table,
6073 .probe = _scsih_probe, 6564 .probe = _scsih_probe,
6074 .remove = __devexit_p(_scsih_remove), 6565 .remove = __devexit_p(_scsih_remove),
6566 .shutdown = _scsih_shutdown,
6075#ifdef CONFIG_PM 6567#ifdef CONFIG_PM
6076 .suspend = _scsih_suspend, 6568 .suspend = _scsih_suspend,
6077 .resume = _scsih_resume, 6569 .resume = _scsih_resume,
6078#endif 6570#endif
6079}; 6571};
6080 6572
6573/* raid transport support */
6574static struct raid_function_template mpt2sas_raid_functions = {
6575 .cookie = &scsih_driver_template,
6576 .is_raid = _scsih_is_raid,
6577 .get_resync = _scsih_get_resync,
6578 .get_state = _scsih_get_state,
6579};
6081 6580
6082/** 6581/**
6083 * _scsih_init - main entry point for this driver. 6582 * _scsih_init - main entry point for this driver.
@@ -6097,6 +6596,12 @@ _scsih_init(void)
6097 sas_attach_transport(&mpt2sas_transport_functions); 6596 sas_attach_transport(&mpt2sas_transport_functions);
6098 if (!mpt2sas_transport_template) 6597 if (!mpt2sas_transport_template)
6099 return -ENODEV; 6598 return -ENODEV;
6599 /* raid transport support */
6600 mpt2sas_raid_template = raid_class_attach(&mpt2sas_raid_functions);
6601 if (!mpt2sas_raid_template) {
6602 sas_release_transport(mpt2sas_transport_template);
6603 return -ENODEV;
6604 }
6100 6605
6101 mpt2sas_base_initialize_callback_handler(); 6606 mpt2sas_base_initialize_callback_handler();
6102 6607
@@ -6113,6 +6618,9 @@ _scsih_init(void)
6113 transport_cb_idx = mpt2sas_base_register_callback_handler( 6618 transport_cb_idx = mpt2sas_base_register_callback_handler(
6114 mpt2sas_transport_done); 6619 mpt2sas_transport_done);
6115 6620
6621 /* scsih internal commands callback handler */
6622 scsih_cb_idx = mpt2sas_base_register_callback_handler(_scsih_done);
6623
6116 /* configuration page API internal commands callback handler */ 6624 /* configuration page API internal commands callback handler */
6117 config_cb_idx = mpt2sas_base_register_callback_handler( 6625 config_cb_idx = mpt2sas_base_register_callback_handler(
6118 mpt2sas_config_done); 6626 mpt2sas_config_done);
@@ -6128,8 +6636,11 @@ _scsih_init(void)
6128 mpt2sas_ctl_init(); 6636 mpt2sas_ctl_init();
6129 6637
6130 error = pci_register_driver(&scsih_driver); 6638 error = pci_register_driver(&scsih_driver);
6131 if (error) 6639 if (error) {
6640 /* raid transport support */
6641 raid_class_release(mpt2sas_raid_template);
6132 sas_release_transport(mpt2sas_transport_template); 6642 sas_release_transport(mpt2sas_transport_template);
6643 }
6133 6644
6134 return error; 6645 return error;
6135} 6646}
@@ -6147,18 +6658,23 @@ _scsih_exit(void)
6147 6658
6148 pci_unregister_driver(&scsih_driver); 6659 pci_unregister_driver(&scsih_driver);
6149 6660
6150 sas_release_transport(mpt2sas_transport_template); 6661 mpt2sas_ctl_exit();
6662
6151 mpt2sas_base_release_callback_handler(scsi_io_cb_idx); 6663 mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
6152 mpt2sas_base_release_callback_handler(tm_cb_idx); 6664 mpt2sas_base_release_callback_handler(tm_cb_idx);
6153 mpt2sas_base_release_callback_handler(base_cb_idx); 6665 mpt2sas_base_release_callback_handler(base_cb_idx);
6154 mpt2sas_base_release_callback_handler(transport_cb_idx); 6666 mpt2sas_base_release_callback_handler(transport_cb_idx);
6667 mpt2sas_base_release_callback_handler(scsih_cb_idx);
6155 mpt2sas_base_release_callback_handler(config_cb_idx); 6668 mpt2sas_base_release_callback_handler(config_cb_idx);
6156 mpt2sas_base_release_callback_handler(ctl_cb_idx); 6669 mpt2sas_base_release_callback_handler(ctl_cb_idx);
6157 6670
6158 mpt2sas_base_release_callback_handler(tm_tr_cb_idx); 6671 mpt2sas_base_release_callback_handler(tm_tr_cb_idx);
6159 mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx); 6672 mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx);
6160 6673
6161 mpt2sas_ctl_exit(); 6674 /* raid transport support */
6675 raid_class_release(mpt2sas_raid_template);
6676 sas_release_transport(mpt2sas_transport_template);
6677
6162} 6678}
6163 6679
6164module_init(_scsih_init); 6680module_init(_scsih_init);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index eb98188c7f3f..bd7ca2b49f81 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -49,6 +49,7 @@
49#include <linux/workqueue.h> 49#include <linux/workqueue.h>
50#include <linux/delay.h> 50#include <linux/delay.h>
51#include <linux/pci.h> 51#include <linux/pci.h>
52#include <linux/slab.h>
52 53
53#include <scsi/scsi.h> 54#include <scsi/scsi.h>
54#include <scsi/scsi_cmnd.h> 55#include <scsi/scsi_cmnd.h>
@@ -59,24 +60,23 @@
59 60
60#include "mpt2sas_base.h" 61#include "mpt2sas_base.h"
61/** 62/**
62 * _transport_sas_node_find_by_handle - sas node search 63 * _transport_sas_node_find_by_sas_address - sas node search
63 * @ioc: per adapter object 64 * @ioc: per adapter object
64 * @handle: expander or hba handle (assigned by firmware) 65 * @sas_address: sas address of expander or sas host
65 * Context: Calling function should acquire ioc->sas_node_lock. 66 * Context: Calling function should acquire ioc->sas_node_lock.
66 * 67 *
67 * Search for either hba phys or expander device based on handle, then returns 68 * Search for either hba phys or expander device based on handle, then returns
68 * the sas_node object. 69 * the sas_node object.
69 */ 70 */
70static struct _sas_node * 71static struct _sas_node *
71_transport_sas_node_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) 72_transport_sas_node_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
73 u64 sas_address)
72{ 74{
73 int i; 75 if (ioc->sas_hba.sas_address == sas_address)
74 76 return &ioc->sas_hba;
75 for (i = 0; i < ioc->sas_hba.num_phys; i++) 77 else
76 if (ioc->sas_hba.phy[i].handle == handle) 78 return mpt2sas_scsih_expander_find_by_sas_address(ioc,
77 return &ioc->sas_hba; 79 sas_address);
78
79 return mpt2sas_scsih_expander_find_by_handle(ioc, handle);
80} 80}
81 81
82/** 82/**
@@ -259,8 +259,7 @@ struct rep_manu_reply{
259 u8 response_length; 259 u8 response_length;
260 u16 expander_change_count; 260 u16 expander_change_count;
261 u8 reserved0[2]; 261 u8 reserved0[2];
262 u8 sas_format:1; 262 u8 sas_format;
263 u8 reserved1:7;
264 u8 reserved2[3]; 263 u8 reserved2[3];
265 u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; 264 u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
266 u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; 265 u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
@@ -375,7 +374,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
375 mpi_request->VP_ID = 0; 374 mpi_request->VP_ID = 0;
376 sas_address_le = (u64 *)&mpi_request->SASAddress; 375 sas_address_le = (u64 *)&mpi_request->SASAddress;
377 *sas_address_le = cpu_to_le64(sas_address); 376 *sas_address_le = cpu_to_le64(sas_address);
378 mpi_request->RequestDataLength = sizeof(struct rep_manu_request); 377 mpi_request->RequestDataLength =
378 cpu_to_le16(sizeof(struct rep_manu_request));
379 psge = &mpi_request->SGL; 379 psge = &mpi_request->SGL;
380 380
381 /* WRITE sgel first */ 381 /* WRITE sgel first */
@@ -438,8 +438,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
438 SAS_EXPANDER_PRODUCT_ID_LEN); 438 SAS_EXPANDER_PRODUCT_ID_LEN);
439 strncpy(edev->product_rev, manufacture_reply->product_rev, 439 strncpy(edev->product_rev, manufacture_reply->product_rev,
440 SAS_EXPANDER_PRODUCT_REV_LEN); 440 SAS_EXPANDER_PRODUCT_REV_LEN);
441 edev->level = manufacture_reply->sas_format; 441 edev->level = manufacture_reply->sas_format & 1;
442 if (manufacture_reply->sas_format) { 442 if (edev->level) {
443 strncpy(edev->component_vendor_id, 443 strncpy(edev->component_vendor_id,
444 manufacture_reply->component_vendor_id, 444 manufacture_reply->component_vendor_id,
445 SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); 445 SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
@@ -469,7 +469,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
469 * mpt2sas_transport_port_add - insert port to the list 469 * mpt2sas_transport_port_add - insert port to the list
470 * @ioc: per adapter object 470 * @ioc: per adapter object
471 * @handle: handle of attached device 471 * @handle: handle of attached device
472 * @parent_handle: parent handle(either hba or expander) 472 * @sas_address: sas address of parent expander or sas host
473 * Context: This function will acquire ioc->sas_node_lock. 473 * Context: This function will acquire ioc->sas_node_lock.
474 * 474 *
475 * Adding new port object to the sas_node->sas_port_list. 475 * Adding new port object to the sas_node->sas_port_list.
@@ -478,7 +478,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
478 */ 478 */
479struct _sas_port * 479struct _sas_port *
480mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, 480mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
481 u16 parent_handle) 481 u64 sas_address)
482{ 482{
483 struct _sas_phy *mpt2sas_phy, *next; 483 struct _sas_phy *mpt2sas_phy, *next;
484 struct _sas_port *mpt2sas_port; 484 struct _sas_port *mpt2sas_port;
@@ -488,9 +488,6 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
488 int i; 488 int i;
489 struct sas_port *port; 489 struct sas_port *port;
490 490
491 if (!parent_handle)
492 return NULL;
493
494 mpt2sas_port = kzalloc(sizeof(struct _sas_port), 491 mpt2sas_port = kzalloc(sizeof(struct _sas_port),
495 GFP_KERNEL); 492 GFP_KERNEL);
496 if (!mpt2sas_port) { 493 if (!mpt2sas_port) {
@@ -502,17 +499,16 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
502 INIT_LIST_HEAD(&mpt2sas_port->port_list); 499 INIT_LIST_HEAD(&mpt2sas_port->port_list);
503 INIT_LIST_HEAD(&mpt2sas_port->phy_list); 500 INIT_LIST_HEAD(&mpt2sas_port->phy_list);
504 spin_lock_irqsave(&ioc->sas_node_lock, flags); 501 spin_lock_irqsave(&ioc->sas_node_lock, flags);
505 sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle); 502 sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
506 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 503 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
507 504
508 if (!sas_node) { 505 if (!sas_node) {
509 printk(MPT2SAS_ERR_FMT "%s: Could not find parent(0x%04x)!\n", 506 printk(MPT2SAS_ERR_FMT "%s: Could not find "
510 ioc->name, __func__, parent_handle); 507 "parent sas_address(0x%016llx)!\n", ioc->name,
508 __func__, (unsigned long long)sas_address);
511 goto out_fail; 509 goto out_fail;
512 } 510 }
513 511
514 mpt2sas_port->handle = parent_handle;
515 mpt2sas_port->sas_address = sas_node->sas_address;
516 if ((_transport_set_identify(ioc, handle, 512 if ((_transport_set_identify(ioc, handle,
517 &mpt2sas_port->remote_identify))) { 513 &mpt2sas_port->remote_identify))) {
518 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 514 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
@@ -604,7 +600,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
604 * mpt2sas_transport_port_remove - remove port from the list 600 * mpt2sas_transport_port_remove - remove port from the list
605 * @ioc: per adapter object 601 * @ioc: per adapter object
606 * @sas_address: sas address of attached device 602 * @sas_address: sas address of attached device
607 * @parent_handle: handle to the upstream parent(either hba or expander) 603 * @sas_address_parent: sas address of parent expander or sas host
608 * Context: This function will acquire ioc->sas_node_lock. 604 * Context: This function will acquire ioc->sas_node_lock.
609 * 605 *
610 * Removing object and freeing associated memory from the 606 * Removing object and freeing associated memory from the
@@ -614,7 +610,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
614 */ 610 */
615void 611void
616mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, 612mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
617 u16 parent_handle) 613 u64 sas_address_parent)
618{ 614{
619 int i; 615 int i;
620 unsigned long flags; 616 unsigned long flags;
@@ -624,7 +620,8 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
624 struct _sas_phy *mpt2sas_phy, *next_phy; 620 struct _sas_phy *mpt2sas_phy, *next_phy;
625 621
626 spin_lock_irqsave(&ioc->sas_node_lock, flags); 622 spin_lock_irqsave(&ioc->sas_node_lock, flags);
627 sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle); 623 sas_node = _transport_sas_node_find_by_sas_address(ioc,
624 sas_address_parent);
628 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 625 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
629 if (!sas_node) 626 if (!sas_node)
630 return; 627 return;
@@ -650,8 +647,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
650 &mpt2sas_port->phy_list, port_siblings) { 647 &mpt2sas_port->phy_list, port_siblings) {
651 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) 648 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
652 dev_printk(KERN_INFO, &mpt2sas_port->port->dev, 649 dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
653 "remove: parent_handle(0x%04x), " 650 "remove: sas_addr(0x%016llx), phy(%d)\n",
654 "sas_addr(0x%016llx), phy(%d)\n", parent_handle,
655 (unsigned long long) 651 (unsigned long long)
656 mpt2sas_port->remote_identify.sas_address, 652 mpt2sas_port->remote_identify.sas_address,
657 mpt2sas_phy->phy_id); 653 mpt2sas_phy->phy_id);
@@ -799,8 +795,8 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
799/** 795/**
800 * mpt2sas_transport_update_links - refreshing phy link changes 796 * mpt2sas_transport_update_links - refreshing phy link changes
801 * @ioc: per adapter object 797 * @ioc: per adapter object
802 * @handle: handle to sas_host or expander 798 * @sas_address: sas address of parent expander or sas host
803 * @attached_handle: attached device handle 799 * @handle: attached device handle
804 * @phy_numberv: phy number 800 * @phy_numberv: phy number
805 * @link_rate: new link rate 801 * @link_rate: new link rate
806 * 802 *
@@ -808,28 +804,25 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
808 */ 804 */
809void 805void
810mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, 806mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
811 u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate) 807 u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
812{ 808{
813 unsigned long flags; 809 unsigned long flags;
814 struct _sas_node *sas_node; 810 struct _sas_node *sas_node;
815 struct _sas_phy *mpt2sas_phy; 811 struct _sas_phy *mpt2sas_phy;
816 812
817 if (ioc->shost_recovery) { 813 if (ioc->shost_recovery)
818 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
819 __func__, ioc->name);
820 return; 814 return;
821 }
822 815
823 spin_lock_irqsave(&ioc->sas_node_lock, flags); 816 spin_lock_irqsave(&ioc->sas_node_lock, flags);
824 sas_node = _transport_sas_node_find_by_handle(ioc, handle); 817 sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
825 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 818 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
826 if (!sas_node) 819 if (!sas_node)
827 return; 820 return;
828 821
829 mpt2sas_phy = &sas_node->phy[phy_number]; 822 mpt2sas_phy = &sas_node->phy[phy_number];
830 mpt2sas_phy->attached_handle = attached_handle; 823 mpt2sas_phy->attached_handle = handle;
831 if (attached_handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) 824 if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5))
832 _transport_set_identify(ioc, mpt2sas_phy->attached_handle, 825 _transport_set_identify(ioc, handle,
833 &mpt2sas_phy->remote_identify); 826 &mpt2sas_phy->remote_identify);
834 else 827 else
835 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct 828 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
@@ -841,13 +834,11 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
841 834
842 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) 835 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
843 dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev, 836 dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
844 "refresh: handle(0x%04x), sas_addr(0x%016llx),\n" 837 "refresh: parent sas_addr(0x%016llx),\n"
845 "\tlink_rate(0x%02x), phy(%d)\n" 838 "\tlink_rate(0x%02x), phy(%d)\n"
846 "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", 839 "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
847 handle, (unsigned long long) 840 (unsigned long long)sas_address,
848 mpt2sas_phy->identify.sas_address, link_rate, 841 link_rate, phy_number, handle, (unsigned long long)
849 phy_number, attached_handle,
850 (unsigned long long)
851 mpt2sas_phy->remote_identify.sas_address); 842 mpt2sas_phy->remote_identify.sas_address);
852} 843}
853 844
@@ -865,6 +856,17 @@ rphy_to_ioc(struct sas_rphy *rphy)
865 return shost_priv(shost); 856 return shost_priv(shost);
866} 857}
867 858
859static struct _sas_phy *
860_transport_find_local_phy(struct MPT2SAS_ADAPTER *ioc, struct sas_phy *phy)
861{
862 int i;
863
864 for (i = 0; i < ioc->sas_hba.num_phys; i++)
865 if (ioc->sas_hba.phy[i].phy == phy)
866 return(&ioc->sas_hba.phy[i]);
867 return NULL;
868}
869
868/** 870/**
869 * _transport_get_linkerrors - 871 * _transport_get_linkerrors -
870 * @phy: The sas phy object 872 * @phy: The sas phy object
@@ -880,14 +882,8 @@ _transport_get_linkerrors(struct sas_phy *phy)
880 struct _sas_phy *mpt2sas_phy; 882 struct _sas_phy *mpt2sas_phy;
881 Mpi2ConfigReply_t mpi_reply; 883 Mpi2ConfigReply_t mpi_reply;
882 Mpi2SasPhyPage1_t phy_pg1; 884 Mpi2SasPhyPage1_t phy_pg1;
883 int i;
884 885
885 for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys && 886 mpt2sas_phy = _transport_find_local_phy(ioc, phy);
886 !mpt2sas_phy; i++) {
887 if (ioc->sas_hba.phy[i].phy != phy)
888 continue;
889 mpt2sas_phy = &ioc->sas_hba.phy[i];
890 }
891 887
892 if (!mpt2sas_phy) /* this phy not on sas_host */ 888 if (!mpt2sas_phy) /* this phy not on sas_host */
893 return -EINVAL; 889 return -EINVAL;
@@ -981,14 +977,8 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
981 struct _sas_phy *mpt2sas_phy; 977 struct _sas_phy *mpt2sas_phy;
982 Mpi2SasIoUnitControlReply_t mpi_reply; 978 Mpi2SasIoUnitControlReply_t mpi_reply;
983 Mpi2SasIoUnitControlRequest_t mpi_request; 979 Mpi2SasIoUnitControlRequest_t mpi_request;
984 int i;
985 980
986 for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys && 981 mpt2sas_phy = _transport_find_local_phy(ioc, phy);
987 !mpt2sas_phy; i++) {
988 if (ioc->sas_hba.phy[i].phy != phy)
989 continue;
990 mpt2sas_phy = &ioc->sas_hba.phy[i];
991 }
992 982
993 if (!mpt2sas_phy) /* this phy not on sas_host */ 983 if (!mpt2sas_phy) /* this phy not on sas_host */
994 return -EINVAL; 984 return -EINVAL;
@@ -1016,6 +1006,173 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
1016} 1006}
1017 1007
1018/** 1008/**
1009 * _transport_phy_enable - enable/disable phys
1010 * @phy: The sas phy object
1011 * @enable: enable phy when true
1012 *
1013 * Only support sas_host direct attached phys.
1014 * Returns 0 for success, non-zero for failure.
1015 */
1016static int
1017_transport_phy_enable(struct sas_phy *phy, int enable)
1018{
1019 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
1020 struct _sas_phy *mpt2sas_phy;
1021 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1022 Mpi2ConfigReply_t mpi_reply;
1023 u16 ioc_status;
1024 u16 sz;
1025 int rc = 0;
1026
1027 mpt2sas_phy = _transport_find_local_phy(ioc, phy);
1028
1029 if (!mpt2sas_phy) /* this phy not on sas_host */
1030 return -EINVAL;
1031
1032 /* sas_iounit page 1 */
1033 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
1034 sizeof(Mpi2SasIOUnit1PhyData_t));
1035 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
1036 if (!sas_iounit_pg1) {
1037 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1038 ioc->name, __FILE__, __LINE__, __func__);
1039 rc = -ENOMEM;
1040 goto out;
1041 }
1042 if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
1043 sas_iounit_pg1, sz))) {
1044 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1045 ioc->name, __FILE__, __LINE__, __func__);
1046 rc = -ENXIO;
1047 goto out;
1048 }
1049 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1050 MPI2_IOCSTATUS_MASK;
1051 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1052 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1053 ioc->name, __FILE__, __LINE__, __func__);
1054 rc = -EIO;
1055 goto out;
1056 }
1057
1058 if (enable)
1059 sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
1060 &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
1061 else
1062 sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
1063 |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
1064
1065 mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
1066
1067 out:
1068 kfree(sas_iounit_pg1);
1069 return rc;
1070}
1071
1072/**
1073 * _transport_phy_speed - set phy min/max link rates
1074 * @phy: The sas phy object
1075 * @rates: rates defined in sas_phy_linkrates
1076 *
1077 * Only support sas_host direct attached phys.
1078 * Returns 0 for success, non-zero for failure.
1079 */
1080static int
1081_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1082{
1083 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
1084 struct _sas_phy *mpt2sas_phy;
1085 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1086 Mpi2SasPhyPage0_t phy_pg0;
1087 Mpi2ConfigReply_t mpi_reply;
1088 u16 ioc_status;
1089 u16 sz;
1090 int i;
1091 int rc = 0;
1092
1093 mpt2sas_phy = _transport_find_local_phy(ioc, phy);
1094
1095 if (!mpt2sas_phy) /* this phy not on sas_host */
1096 return -EINVAL;
1097
1098 if (!rates->minimum_linkrate)
1099 rates->minimum_linkrate = phy->minimum_linkrate;
1100 else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
1101 rates->minimum_linkrate = phy->minimum_linkrate_hw;
1102
1103 if (!rates->maximum_linkrate)
1104 rates->maximum_linkrate = phy->maximum_linkrate;
1105 else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
1106 rates->maximum_linkrate = phy->maximum_linkrate_hw;
1107
1108 /* sas_iounit page 1 */
1109 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
1110 sizeof(Mpi2SasIOUnit1PhyData_t));
1111 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
1112 if (!sas_iounit_pg1) {
1113 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1114 ioc->name, __FILE__, __LINE__, __func__);
1115 rc = -ENOMEM;
1116 goto out;
1117 }
1118 if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
1119 sas_iounit_pg1, sz))) {
1120 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1121 ioc->name, __FILE__, __LINE__, __func__);
1122 rc = -ENXIO;
1123 goto out;
1124 }
1125 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1126 MPI2_IOCSTATUS_MASK;
1127 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1128 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1129 ioc->name, __FILE__, __LINE__, __func__);
1130 rc = -EIO;
1131 goto out;
1132 }
1133
1134 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
1135 if (mpt2sas_phy->phy_id != i) {
1136 sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
1137 (ioc->sas_hba.phy[i].phy->minimum_linkrate +
1138 (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
1139 } else {
1140 sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
1141 (rates->minimum_linkrate +
1142 (rates->maximum_linkrate << 4));
1143 }
1144 }
1145
1146 if (mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
1147 sz)) {
1148 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1149 ioc->name, __FILE__, __LINE__, __func__);
1150 rc = -ENXIO;
1151 goto out;
1152 }
1153
1154 /* link reset */
1155 _transport_phy_reset(phy, 0);
1156
1157 /* read phy page 0, then update the rates in the sas transport phy */
1158 if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
1159 mpt2sas_phy->phy_id)) {
1160 phy->minimum_linkrate = _transport_convert_phy_link_rate(
1161 phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
1162 phy->maximum_linkrate = _transport_convert_phy_link_rate(
1163 phy_pg0.ProgrammedLinkRate >> 4);
1164 phy->negotiated_linkrate = _transport_convert_phy_link_rate(
1165 phy_pg0.NegotiatedLinkRate &
1166 MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
1167 }
1168
1169 out:
1170 kfree(sas_iounit_pg1);
1171 return rc;
1172}
1173
1174
1175/**
1019 * _transport_smp_handler - transport portal for smp passthru 1176 * _transport_smp_handler - transport portal for smp passthru
1020 * @shost: shost object 1177 * @shost: shost object
1021 * @rphy: sas transport rphy object 1178 * @rphy: sas transport rphy object
@@ -1126,7 +1283,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1126 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1283 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1127 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 1284 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1128 if (!dma_addr_out) { 1285 if (!dma_addr_out) {
1129 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1286 mpt2sas_base_free_smid(ioc, smid);
1130 goto unmap; 1287 goto unmap;
1131 } 1288 }
1132 1289
@@ -1144,7 +1301,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1144 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1301 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1145 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 1302 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1146 if (!dma_addr_in) { 1303 if (!dma_addr_in) {
1147 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1304 mpt2sas_base_free_smid(ioc, smid);
1148 goto unmap; 1305 goto unmap;
1149 } 1306 }
1150 1307
@@ -1217,6 +1374,8 @@ struct sas_function_template mpt2sas_transport_functions = {
1217 .get_enclosure_identifier = _transport_get_enclosure_identifier, 1374 .get_enclosure_identifier = _transport_get_enclosure_identifier,
1218 .get_bay_identifier = _transport_get_bay_identifier, 1375 .get_bay_identifier = _transport_get_bay_identifier,
1219 .phy_reset = _transport_phy_reset, 1376 .phy_reset = _transport_phy_reset,
1377 .phy_enable = _transport_phy_enable,
1378 .set_phy_speed = _transport_phy_speed,
1220 .smp_handler = _transport_smp_handler, 1379 .smp_handler = _transport_smp_handler,
1221}; 1380};
1222 1381
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
index b5fbfd6ce870..39f554f5f261 100644
--- a/drivers/scsi/mvme16x_scsi.c
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -12,6 +12,7 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/slab.h>
15#include <asm/mvme16xhw.h> 16#include <asm/mvme16xhw.h>
16#include <scsi/scsi_host.h> 17#include <scsi/scsi_host.h>
17#include <scsi/scsi_device.h> 18#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index c790d45876c4..cae6b2cf492f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, 657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, 658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, 659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
660 { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
660 661
661 { } /* terminate list */ 662 { } /* terminate list */
662}; 663};
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index aa2270af1bac..885858bcc403 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -36,6 +36,7 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/irq.h> 38#include <linux/irq.h>
39#include <linux/slab.h>
39#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
40#include <scsi/libsas.h> 41#include <scsi/libsas.h>
41#include <scsi/scsi_tcq.h> 42#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index e3c482aa87b5..d013a2aa2fd5 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -98,6 +98,7 @@
98#include <linux/delay.h> 98#include <linux/delay.h>
99#include <linux/dma-mapping.h> 99#include <linux/dma-mapping.h>
100#include <linux/errno.h> 100#include <linux/errno.h>
101#include <linux/gfp.h>
101#include <linux/init.h> 102#include <linux/init.h>
102#include <linux/interrupt.h> 103#include <linux/interrupt.h>
103#include <linux/ioport.h> 104#include <linux/ioport.h>
@@ -6495,7 +6496,7 @@ static void ncr_int_ma (struct ncb *np)
6495 ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids 6496 ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
6496 ** bloat for such a should_not_happen situation). 6497 ** bloat for such a should_not_happen situation).
6497 ** In all other situation, we reset the BUS. 6498 ** In all other situation, we reset the BUS.
6498 ** Are these assumptions reasonnable ? (Wait and see ...) 6499 ** Are these assumptions reasonable ? (Wait and see ...)
6499 */ 6500 */
6500unexpected_phase: 6501unexpected_phase:
6501 dsp -= 8; 6502 dsp -= 8;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 2be7d5b018d2..4c1e54545200 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -26,7 +26,6 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/string.h> 29#include <linux/string.h>
31#include <linux/timer.h> 30#include <linux/timer.h>
32#include <linux/ioport.h> 31#include <linux/ioport.h>
@@ -1419,7 +1418,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1419 nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! "); 1418 nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! ");
1420 /* 1419 /*
1421 * TODO: To be implemented improving bus master 1420 * TODO: To be implemented improving bus master
1422 * transfer reliablity when BMCNTERR is occurred in 1421 * transfer reliability when BMCNTERR is occurred in
1423 * AutoSCSI phase described in specification. 1422 * AutoSCSI phase described in specification.
1424 */ 1423 */
1425 } 1424 }
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 7a117c18114c..ee4b6914667f 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -39,6 +39,8 @@
39 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */ 40 */
41 41
42#include <linux/slab.h>
43
42#include <scsi/osd_initiator.h> 44#include <scsi/osd_initiator.h>
43#include <scsi/osd_sec.h> 45#include <scsi/osd_sec.h>
44#include <scsi/osd_attributes.h> 46#include <scsi/osd_attributes.h>
@@ -73,7 +75,8 @@ static const char *_osd_ver_desc(struct osd_request *or)
73 75
74#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len) 76#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
75 77
76static int _osd_print_system_info(struct osd_dev *od, void *caps) 78static int _osd_get_print_system_info(struct osd_dev *od,
79 void *caps, struct osd_dev_info *odi)
77{ 80{
78 struct osd_request *or; 81 struct osd_request *or;
79 struct osd_attr get_attrs[] = { 82 struct osd_attr get_attrs[] = {
@@ -137,8 +140,12 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
137 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", 140 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
138 (char *)pFirst); 141 (char *)pFirst);
139 142
140 pFirst = get_attrs[a].val_ptr; 143 odi->osdname_len = get_attrs[a].len;
141 OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst); 144 /* Avoid NULL for memcmp optimization 0-length is good enough */
145 odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
146 if (odi->osdname_len)
147 memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
148 OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
142 a++; 149 a++;
143 150
144 pFirst = get_attrs[a++].val_ptr; 151 pFirst = get_attrs[a++].val_ptr;
@@ -171,6 +178,14 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
171 sid_dump, sizeof(sid_dump), true); 178 sid_dump, sizeof(sid_dump), true);
172 OSD_INFO("OSD_SYSTEM_ID(%d)\n" 179 OSD_INFO("OSD_SYSTEM_ID(%d)\n"
173 " [%s]\n", len, sid_dump); 180 " [%s]\n", len, sid_dump);
181
182 if (unlikely(len > sizeof(odi->systemid))) {
183 OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
184 "device idetification might not work\n", len);
185 len = sizeof(odi->systemid);
186 }
187 odi->systemid_len = len;
188 memcpy(odi->systemid, get_attrs[a].val_ptr, len);
174 a++; 189 a++;
175 } 190 }
176out: 191out:
@@ -178,16 +193,17 @@ out:
178 return ret; 193 return ret;
179} 194}
180 195
181int osd_auto_detect_ver(struct osd_dev *od, void *caps) 196int osd_auto_detect_ver(struct osd_dev *od,
197 void *caps, struct osd_dev_info *odi)
182{ 198{
183 int ret; 199 int ret;
184 200
185 /* Auto-detect the osd version */ 201 /* Auto-detect the osd version */
186 ret = _osd_print_system_info(od, caps); 202 ret = _osd_get_print_system_info(od, caps, odi);
187 if (ret) { 203 if (ret) {
188 osd_dev_set_ver(od, OSD_VER1); 204 osd_dev_set_ver(od, OSD_VER1);
189 OSD_DEBUG("converting to OSD1\n"); 205 OSD_DEBUG("converting to OSD1\n");
190 ret = _osd_print_system_info(od, caps); 206 ret = _osd_get_print_system_info(od, caps, odi);
191 } 207 }
192 208
193 return ret; 209 return ret;
@@ -418,30 +434,23 @@ static void _osd_free_seg(struct osd_request *or __unused,
418 seg->alloc_size = 0; 434 seg->alloc_size = 0;
419} 435}
420 436
421static void _put_request(struct request *rq , bool is_async) 437static void _put_request(struct request *rq)
422{ 438{
423 if (is_async) { 439 /*
424 WARN_ON(rq->bio); 440 * If osd_finalize_request() was called but the request was not
425 __blk_put_request(rq->q, rq); 441 * executed through the block layer, then we must release BIOs.
426 } else { 442 * TODO: Keep error code in or->async_error. Need to audit all
427 /* 443 * code paths.
428 * If osd_finalize_request() was called but the request was not 444 */
429 * executed through the block layer, then we must release BIOs. 445 if (unlikely(rq->bio))
430 * TODO: Keep error code in or->async_error. Need to audit all 446 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
431 * code paths. 447 else
432 */ 448 blk_put_request(rq);
433 if (unlikely(rq->bio))
434 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
435 else
436 blk_put_request(rq);
437 }
438} 449}
439 450
440void osd_end_request(struct osd_request *or) 451void osd_end_request(struct osd_request *or)
441{ 452{
442 struct request *rq = or->request; 453 struct request *rq = or->request;
443 /* IMPORTANT: make sure this agrees with osd_execute_request_async */
444 bool is_async = (or->request->end_io_data == or);
445 454
446 _osd_free_seg(or, &or->set_attr); 455 _osd_free_seg(or, &or->set_attr);
447 _osd_free_seg(or, &or->enc_get_attr); 456 _osd_free_seg(or, &or->enc_get_attr);
@@ -449,19 +458,34 @@ void osd_end_request(struct osd_request *or)
449 458
450 if (rq) { 459 if (rq) {
451 if (rq->next_rq) { 460 if (rq->next_rq) {
452 _put_request(rq->next_rq, is_async); 461 _put_request(rq->next_rq);
453 rq->next_rq = NULL; 462 rq->next_rq = NULL;
454 } 463 }
455 464
456 _put_request(rq, is_async); 465 _put_request(rq);
457 } 466 }
458 _osd_request_free(or); 467 _osd_request_free(or);
459} 468}
460EXPORT_SYMBOL(osd_end_request); 469EXPORT_SYMBOL(osd_end_request);
461 470
471static void _set_error_resid(struct osd_request *or, struct request *req,
472 int error)
473{
474 or->async_error = error;
475 or->req_errors = req->errors ? : error;
476 or->sense_len = req->sense_len;
477 if (or->out.req)
478 or->out.residual = or->out.req->resid_len;
479 if (or->in.req)
480 or->in.residual = or->in.req->resid_len;
481}
482
462int osd_execute_request(struct osd_request *or) 483int osd_execute_request(struct osd_request *or)
463{ 484{
464 return blk_execute_rq(or->request->q, NULL, or->request, 0); 485 int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
486
487 _set_error_resid(or, or->request, error);
488 return error;
465} 489}
466EXPORT_SYMBOL(osd_execute_request); 490EXPORT_SYMBOL(osd_execute_request);
467 491
@@ -469,10 +493,16 @@ static void osd_request_async_done(struct request *req, int error)
469{ 493{
470 struct osd_request *or = req->end_io_data; 494 struct osd_request *or = req->end_io_data;
471 495
472 or->async_error = error; 496 _set_error_resid(or, req, error);
497 if (req->next_rq) {
498 __blk_put_request(req->q, req->next_rq);
499 req->next_rq = NULL;
500 }
473 501
474 if (error) 502 __blk_put_request(req->q, req);
475 OSD_DEBUG("osd_request_async_done error recieved %d\n", error); 503 or->request = NULL;
504 or->in.req = NULL;
505 or->out.req = NULL;
476 506
477 if (or->async_done) 507 if (or->async_done)
478 or->async_done(or, or->async_private); 508 or->async_done(or, or->async_private);
@@ -1153,6 +1183,7 @@ int osd_req_decode_get_attr_list(struct osd_request *or,
1153 "c=%d r=%d n=%d\n", 1183 "c=%d r=%d n=%d\n",
1154 cur_bytes, returned_bytes, n); 1184 cur_bytes, returned_bytes, n);
1155 oa->val_ptr = NULL; 1185 oa->val_ptr = NULL;
1186 cur_bytes = returned_bytes; /* break the caller loop */
1156 break; 1187 break;
1157 } 1188 }
1158 1189
@@ -1404,6 +1435,10 @@ int osd_finalize_request(struct osd_request *or,
1404 cdbh->command_specific_options |= or->attributes_mode; 1435 cdbh->command_specific_options |= or->attributes_mode;
1405 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) { 1436 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1406 ret = _osd_req_finalize_attr_page(or); 1437 ret = _osd_req_finalize_attr_page(or);
1438 if (ret) {
1439 OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
1440 return ret;
1441 }
1407 } else { 1442 } else {
1408 /* TODO: I think that for the GET_ATTR command these 2 should 1443 /* TODO: I think that for the GET_ATTR command these 2 should
1409 * be reversed to keep them in execution order (for embeded 1444 * be reversed to keep them in execution order (for embeded
@@ -1436,6 +1471,15 @@ int osd_finalize_request(struct osd_request *or,
1436} 1471}
1437EXPORT_SYMBOL(osd_finalize_request); 1472EXPORT_SYMBOL(osd_finalize_request);
1438 1473
1474static bool _is_osd_security_code(int code)
1475{
1476 return (code == osd_security_audit_value_frozen) ||
1477 (code == osd_security_working_key_frozen) ||
1478 (code == osd_nonce_not_unique) ||
1479 (code == osd_nonce_timestamp_out_of_range) ||
1480 (code == osd_invalid_dataout_buffer_integrity_check_value);
1481}
1482
1439#define OSD_SENSE_PRINT1(fmt, a...) \ 1483#define OSD_SENSE_PRINT1(fmt, a...) \
1440 do { \ 1484 do { \
1441 if (__cur_sense_need_output) \ 1485 if (__cur_sense_need_output) \
@@ -1458,27 +1502,29 @@ int osd_req_decode_sense_full(struct osd_request *or,
1458#else 1502#else
1459 bool __cur_sense_need_output = !silent; 1503 bool __cur_sense_need_output = !silent;
1460#endif 1504#endif
1505 int ret;
1461 1506
1462 if (!or->request->errors) 1507 if (likely(!or->req_errors))
1463 return 0; 1508 return 0;
1464 1509
1465 ssdb = or->request->sense; 1510 osi = osi ? : &local_osi;
1466 sense_len = or->request->sense_len; 1511 memset(osi, 0, sizeof(*osi));
1512
1513 ssdb = (typeof(ssdb))or->sense;
1514 sense_len = or->sense_len;
1467 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) { 1515 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1468 OSD_ERR("Block-layer returned error(0x%x) but " 1516 OSD_ERR("Block-layer returned error(0x%x) but "
1469 "sense_len(%u) || key(%d) is empty\n", 1517 "sense_len(%u) || key(%d) is empty\n",
1470 or->request->errors, sense_len, ssdb->sense_key); 1518 or->req_errors, sense_len, ssdb->sense_key);
1471 return -EIO; 1519 goto analyze;
1472 } 1520 }
1473 1521
1474 if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) { 1522 if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
1475 OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n", 1523 OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
1476 ssdb->response_code, sense_len); 1524 ssdb->response_code, sense_len);
1477 return -EIO; 1525 goto analyze;
1478 } 1526 }
1479 1527
1480 osi = osi ? : &local_osi;
1481 memset(osi, 0, sizeof(*osi));
1482 osi->key = ssdb->sense_key; 1528 osi->key = ssdb->sense_key;
1483 osi->additional_code = be16_to_cpu(ssdb->additional_sense_code); 1529 osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
1484 original_sense_len = ssdb->additional_sense_length + 8; 1530 original_sense_len = ssdb->additional_sense_length + 8;
@@ -1488,9 +1534,10 @@ int osd_req_decode_sense_full(struct osd_request *or,
1488 __cur_sense_need_output = (osi->key > scsi_sk_recovered_error); 1534 __cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
1489#endif 1535#endif
1490 OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) " 1536 OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
1491 "additional_code=0x%x\n", 1537 "additional_code=0x%x async_error=%d errors=0x%x\n",
1492 osi->key, original_sense_len, sense_len, 1538 osi->key, original_sense_len, sense_len,
1493 osi->additional_code); 1539 osi->additional_code, or->async_error,
1540 or->req_errors);
1494 1541
1495 if (original_sense_len < sense_len) 1542 if (original_sense_len < sense_len)
1496 sense_len = original_sense_len; 1543 sense_len = original_sense_len;
@@ -1569,15 +1616,14 @@ int osd_req_decode_sense_full(struct osd_request *or,
1569 { 1616 {
1570 struct osd_sense_attributes_data_descriptor 1617 struct osd_sense_attributes_data_descriptor
1571 *osadd = cur_descriptor; 1618 *osadd = cur_descriptor;
1572 int len = min(cur_len, sense_len); 1619 unsigned len = min(cur_len, sense_len);
1573 int i = 0;
1574 struct osd_sense_attr *pattr = osadd->sense_attrs; 1620 struct osd_sense_attr *pattr = osadd->sense_attrs;
1575 1621
1576 while (len < 0) { 1622 while (len >= sizeof(*pattr)) {
1577 u32 attr_page = be32_to_cpu(pattr->attr_page); 1623 u32 attr_page = be32_to_cpu(pattr->attr_page);
1578 u32 attr_id = be32_to_cpu(pattr->attr_id); 1624 u32 attr_id = be32_to_cpu(pattr->attr_id);
1579 1625
1580 if (i++ == 0) { 1626 if (!osi->attr.attr_page) {
1581 osi->attr.attr_page = attr_page; 1627 osi->attr.attr_page = attr_page;
1582 osi->attr.attr_id = attr_id; 1628 osi->attr.attr_id = attr_id;
1583 } 1629 }
@@ -1588,6 +1634,8 @@ int osd_req_decode_sense_full(struct osd_request *or,
1588 bad_attr_list++; 1634 bad_attr_list++;
1589 max_attr--; 1635 max_attr--;
1590 } 1636 }
1637
1638 len -= sizeof(*pattr);
1591 OSD_SENSE_PRINT2( 1639 OSD_SENSE_PRINT2(
1592 "osd_sense_attribute_identification" 1640 "osd_sense_attribute_identification"
1593 "attr_page=0x%x attr_id=0x%x\n", 1641 "attr_page=0x%x attr_id=0x%x\n",
@@ -1621,7 +1669,50 @@ int osd_req_decode_sense_full(struct osd_request *or,
1621 cur_descriptor += cur_len; 1669 cur_descriptor += cur_len;
1622 } 1670 }
1623 1671
1624 return (osi->key > scsi_sk_recovered_error) ? -EIO : 0; 1672analyze:
1673 if (!osi->key) {
1674 /* scsi sense is Empty, the request was never issued to target
1675 * linux return code might tell us what happened.
1676 */
1677 if (or->async_error == -ENOMEM)
1678 osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
1679 else
1680 osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
1681 ret = or->async_error;
1682 } else if (osi->key <= scsi_sk_recovered_error) {
1683 osi->osd_err_pri = 0;
1684 ret = 0;
1685 } else if (osi->additional_code == scsi_invalid_field_in_cdb) {
1686 if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
1687 osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
1688 ret = -EFAULT; /* caller should recover from this */
1689 } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
1690 osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
1691 ret = -ENOENT;
1692 } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
1693 osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
1694 ret = -EACCES;
1695 } else {
1696 osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1697 ret = -EINVAL;
1698 }
1699 } else if (osi->additional_code == osd_quota_error) {
1700 osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
1701 ret = -ENOSPC;
1702 } else if (_is_osd_security_code(osi->additional_code)) {
1703 osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1704 ret = -EINVAL;
1705 } else {
1706 osi->osd_err_pri = OSD_ERR_PRI_EIO;
1707 ret = -EIO;
1708 }
1709
1710 if (!or->out.residual)
1711 or->out.residual = or->out.total_bytes;
1712 if (!or->in.residual)
1713 or->in.residual = or->in.total_bytes;
1714
1715 return ret;
1625} 1716}
1626EXPORT_SYMBOL(osd_req_decode_sense_full); 1717EXPORT_SYMBOL(osd_req_decode_sense_full);
1627 1718
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 0bdef3390902..ffdd9fdb9995 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -50,6 +50,7 @@
50#include <linux/idr.h> 50#include <linux/idr.h>
51#include <linux/major.h> 51#include <linux/major.h>
52#include <linux/file.h> 52#include <linux/file.h>
53#include <linux/slab.h>
53 54
54#include <scsi/scsi.h> 55#include <scsi/scsi.h>
55#include <scsi/scsi_driver.h> 56#include <scsi/scsi_driver.h>
@@ -71,8 +72,7 @@
71#define SCSI_OSD_MAX_MINOR 64 72#define SCSI_OSD_MAX_MINOR 64
72 73
73static const char osd_name[] = "osd"; 74static const char osd_name[] = "osd";
74static const char *osd_version_string = "open-osd 0.1.0"; 75static const char *osd_version_string = "open-osd 0.2.0";
75const char osd_symlink[] = "scsi_osd";
76 76
77MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); 77MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
78MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); 78MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
@@ -82,15 +82,25 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD);
82 82
83struct osd_uld_device { 83struct osd_uld_device {
84 int minor; 84 int minor;
85 struct kref kref; 85 struct device class_dev;
86 struct cdev cdev; 86 struct cdev cdev;
87 struct osd_dev od; 87 struct osd_dev od;
88 struct osd_dev_info odi;
88 struct gendisk *disk; 89 struct gendisk *disk;
89 struct device *class_member;
90}; 90};
91 91
92static void __uld_get(struct osd_uld_device *oud); 92struct osd_dev_handle {
93static void __uld_put(struct osd_uld_device *oud); 93 struct osd_dev od;
94 struct file *file;
95 struct osd_uld_device *oud;
96} ;
97
98static DEFINE_IDA(osd_minor_ida);
99
100static struct class osd_uld_class = {
101 .owner = THIS_MODULE,
102 .name = "scsi_osd",
103};
94 104
95/* 105/*
96 * Char Device operations 106 * Char Device operations
@@ -101,7 +111,7 @@ static int osd_uld_open(struct inode *inode, struct file *file)
101 struct osd_uld_device *oud = container_of(inode->i_cdev, 111 struct osd_uld_device *oud = container_of(inode->i_cdev,
102 struct osd_uld_device, cdev); 112 struct osd_uld_device, cdev);
103 113
104 __uld_get(oud); 114 get_device(&oud->class_dev);
105 /* cache osd_uld_device on file handle */ 115 /* cache osd_uld_device on file handle */
106 file->private_data = oud; 116 file->private_data = oud;
107 OSD_DEBUG("osd_uld_open %p\n", oud); 117 OSD_DEBUG("osd_uld_open %p\n", oud);
@@ -114,7 +124,7 @@ static int osd_uld_release(struct inode *inode, struct file *file)
114 124
115 OSD_DEBUG("osd_uld_release %p\n", file->private_data); 125 OSD_DEBUG("osd_uld_release %p\n", file->private_data);
116 file->private_data = NULL; 126 file->private_data = NULL;
117 __uld_put(oud); 127 put_device(&oud->class_dev);
118 return 0; 128 return 0;
119} 129}
120 130
@@ -177,7 +187,7 @@ static const struct file_operations osd_fops = {
177struct osd_dev *osduld_path_lookup(const char *name) 187struct osd_dev *osduld_path_lookup(const char *name)
178{ 188{
179 struct osd_uld_device *oud; 189 struct osd_uld_device *oud;
180 struct osd_dev *od; 190 struct osd_dev_handle *odh;
181 struct file *file; 191 struct file *file;
182 int error; 192 int error;
183 193
@@ -186,8 +196,8 @@ struct osd_dev *osduld_path_lookup(const char *name)
186 return ERR_PTR(-EINVAL); 196 return ERR_PTR(-EINVAL);
187 } 197 }
188 198
189 od = kzalloc(sizeof(*od), GFP_KERNEL); 199 odh = kzalloc(sizeof(*odh), GFP_KERNEL);
190 if (!od) 200 if (unlikely(!odh))
191 return ERR_PTR(-ENOMEM); 201 return ERR_PTR(-ENOMEM);
192 202
193 file = filp_open(name, O_RDWR, 0); 203 file = filp_open(name, O_RDWR, 0);
@@ -203,33 +213,134 @@ struct osd_dev *osduld_path_lookup(const char *name)
203 213
204 oud = file->private_data; 214 oud = file->private_data;
205 215
206 *od = oud->od; 216 odh->od = oud->od;
207 od->file = file; 217 odh->file = file;
218 odh->oud = oud;
208 219
209 return od; 220 return &odh->od;
210 221
211close_file: 222close_file:
212 fput(file); 223 fput(file);
213free_od: 224free_od:
214 kfree(od); 225 kfree(odh);
215 return ERR_PTR(error); 226 return ERR_PTR(error);
216} 227}
217EXPORT_SYMBOL(osduld_path_lookup); 228EXPORT_SYMBOL(osduld_path_lookup);
218 229
219void osduld_put_device(struct osd_dev *od) 230static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len,
231 const u8 *a2, unsigned a2_len)
220{ 232{
233 if (!a2_len) /* User string is Empty means don't care */
234 return true;
235
236 if (a1_len != a2_len)
237 return false;
238
239 return 0 == memcmp(a1, a2, a1_len);
240}
241
242struct find_oud_t {
243 const struct osd_dev_info *odi;
244 struct device *dev;
245 struct osd_uld_device *oud;
246} ;
247
248int _mach_odi(struct device *dev, void *find_data)
249{
250 struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
251 class_dev);
252 struct find_oud_t *fot = find_data;
253 const struct osd_dev_info *odi = fot->odi;
254
255 if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
256 odi->systemid, odi->systemid_len) &&
257 _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
258 odi->osdname, odi->osdname_len)) {
259 OSD_DEBUG("found device sysid_len=%d osdname=%d\n",
260 odi->systemid_len, odi->osdname_len);
261 fot->oud = oud;
262 return 1;
263 } else {
264 return 0;
265 }
266}
267
268/* osduld_info_lookup - Loop through all devices, return the requested osd_dev.
269 *
270 * if @odi->systemid_len and/or @odi->osdname_len are zero, they act as a don't
271 * care. .e.g if they're both zero /dev/osd0 is returned.
272 */
273struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi)
274{
275 struct find_oud_t find = {.odi = odi};
276
277 find.dev = class_find_device(&osd_uld_class, NULL, &find, _mach_odi);
278 if (likely(find.dev)) {
279 struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL);
280
281 if (unlikely(!odh)) {
282 put_device(find.dev);
283 return ERR_PTR(-ENOMEM);
284 }
221 285
286 odh->od = find.oud->od;
287 odh->oud = find.oud;
288
289 return &odh->od;
290 }
291
292 return ERR_PTR(-ENODEV);
293}
294EXPORT_SYMBOL(osduld_info_lookup);
295
296void osduld_put_device(struct osd_dev *od)
297{
222 if (od && !IS_ERR(od)) { 298 if (od && !IS_ERR(od)) {
223 struct osd_uld_device *oud = od->file->private_data; 299 struct osd_dev_handle *odh =
300 container_of(od, struct osd_dev_handle, od);
301 struct osd_uld_device *oud = odh->oud;
224 302
225 BUG_ON(od->scsi_device != oud->od.scsi_device); 303 BUG_ON(od->scsi_device != oud->od.scsi_device);
226 304
227 fput(od->file); 305 /* If scsi has released the device (logout), and exofs has last
228 kfree(od); 306 * reference on oud it will be freed by above osd_uld_release
307 * within fput below. But this will oops in cdev_release which
308 * is called after the fops->release. A get_/put_ pair makes
309 * sure we have a cdev for the duration of fput
310 */
311 if (odh->file) {
312 get_device(&oud->class_dev);
313 fput(odh->file);
314 }
315 put_device(&oud->class_dev);
316 kfree(odh);
229 } 317 }
230} 318}
231EXPORT_SYMBOL(osduld_put_device); 319EXPORT_SYMBOL(osduld_put_device);
232 320
321const struct osd_dev_info *osduld_device_info(struct osd_dev *od)
322{
323 struct osd_dev_handle *odh =
324 container_of(od, struct osd_dev_handle, od);
325 return &odh->oud->odi;
326}
327EXPORT_SYMBOL(osduld_device_info);
328
329bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi)
330{
331 struct osd_dev_handle *odh =
332 container_of(od, struct osd_dev_handle, od);
333 struct osd_uld_device *oud = odh->oud;
334
335 return (oud->odi.systemid_len == odi->systemid_len) &&
336 _the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
337 odi->systemid, odi->systemid_len) &&
338 (oud->odi.osdname_len == odi->osdname_len) &&
339 _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
340 odi->osdname, odi->osdname_len);
341}
342EXPORT_SYMBOL(osduld_device_same);
343
233/* 344/*
234 * Scsi Device operations 345 * Scsi Device operations
235 */ 346 */
@@ -250,14 +361,35 @@ static int __detect_osd(struct osd_uld_device *oud)
250 OSD_ERR("warning: scsi_test_unit_ready failed\n"); 361 OSD_ERR("warning: scsi_test_unit_ready failed\n");
251 362
252 osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true); 363 osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true);
253 if (osd_auto_detect_ver(&oud->od, caps)) 364 if (osd_auto_detect_ver(&oud->od, caps, &oud->odi))
254 return -ENODEV; 365 return -ENODEV;
255 366
256 return 0; 367 return 0;
257} 368}
258 369
259static struct class *osd_sysfs_class; 370static void __remove(struct device *dev)
260static DEFINE_IDA(osd_minor_ida); 371{
372 struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
373 class_dev);
374 struct scsi_device *scsi_device = oud->od.scsi_device;
375
376 kfree(oud->odi.osdname);
377
378 if (oud->cdev.owner)
379 cdev_del(&oud->cdev);
380
381 osd_dev_fini(&oud->od);
382 scsi_device_put(scsi_device);
383
384 OSD_INFO("osd_remove %s\n",
385 oud->disk ? oud->disk->disk_name : NULL);
386
387 if (oud->disk)
388 put_disk(oud->disk);
389 ida_remove(&osd_minor_ida, oud->minor);
390
391 kfree(oud);
392}
261 393
262static int osd_probe(struct device *dev) 394static int osd_probe(struct device *dev)
263{ 395{
@@ -289,7 +421,6 @@ static int osd_probe(struct device *dev)
289 if (NULL == oud) 421 if (NULL == oud)
290 goto err_retract_minor; 422 goto err_retract_minor;
291 423
292 kref_init(&oud->kref);
293 dev_set_drvdata(dev, oud); 424 dev_set_drvdata(dev, oud);
294 oud->minor = minor; 425 oud->minor = minor;
295 426
@@ -327,18 +458,25 @@ static int osd_probe(struct device *dev)
327 OSD_ERR("cdev_add failed\n"); 458 OSD_ERR("cdev_add failed\n");
328 goto err_put_disk; 459 goto err_put_disk;
329 } 460 }
330 kobject_get(&oud->cdev.kobj); /* 2nd ref see osd_remove() */ 461
331 462 /* class device member */
332 /* class_member */ 463 oud->class_dev.devt = oud->cdev.dev;
333 oud->class_member = device_create(osd_sysfs_class, dev, 464 oud->class_dev.class = &osd_uld_class;
334 MKDEV(SCSI_OSD_MAJOR, oud->minor), "%s", disk->disk_name); 465 oud->class_dev.parent = dev;
335 if (IS_ERR(oud->class_member)) { 466 oud->class_dev.release = __remove;
336 OSD_ERR("class_device_create failed\n"); 467 error = dev_set_name(&oud->class_dev, disk->disk_name);
337 error = PTR_ERR(oud->class_member); 468 if (error) {
469 OSD_ERR("dev_set_name failed => %d\n", error);
470 goto err_put_cdev;
471 }
472
473 error = device_register(&oud->class_dev);
474 if (error) {
475 OSD_ERR("device_register failed => %d\n", error);
338 goto err_put_cdev; 476 goto err_put_cdev;
339 } 477 }
340 478
341 dev_set_drvdata(oud->class_member, oud); 479 get_device(&oud->class_dev);
342 480
343 OSD_INFO("osd_probe %s\n", disk->disk_name); 481 OSD_INFO("osd_probe %s\n", disk->disk_name);
344 return 0; 482 return 0;
@@ -367,54 +505,12 @@ static int osd_remove(struct device *dev)
367 scsi_device); 505 scsi_device);
368 } 506 }
369 507
370 if (oud->class_member) 508 device_unregister(&oud->class_dev);
371 device_destroy(osd_sysfs_class,
372 MKDEV(SCSI_OSD_MAJOR, oud->minor));
373 509
374 /* We have 2 references to the cdev. One is released here 510 put_device(&oud->class_dev);
375 * and also takes down the /dev/osdX mapping. The second
376 * Will be released in __remove() after all users have released
377 * the osd_uld_device.
378 */
379 if (oud->cdev.owner)
380 cdev_del(&oud->cdev);
381
382 __uld_put(oud);
383 return 0; 511 return 0;
384} 512}
385 513
386static void __remove(struct kref *kref)
387{
388 struct osd_uld_device *oud = container_of(kref,
389 struct osd_uld_device, kref);
390 struct scsi_device *scsi_device = oud->od.scsi_device;
391
392 /* now let delete the char_dev */
393 kobject_put(&oud->cdev.kobj);
394
395 osd_dev_fini(&oud->od);
396 scsi_device_put(scsi_device);
397
398 OSD_INFO("osd_remove %s\n",
399 oud->disk ? oud->disk->disk_name : NULL);
400
401 if (oud->disk)
402 put_disk(oud->disk);
403
404 ida_remove(&osd_minor_ida, oud->minor);
405 kfree(oud);
406}
407
408static void __uld_get(struct osd_uld_device *oud)
409{
410 kref_get(&oud->kref);
411}
412
413static void __uld_put(struct osd_uld_device *oud)
414{
415 kref_put(&oud->kref, __remove);
416}
417
418/* 514/*
419 * Global driver and scsi registration 515 * Global driver and scsi registration
420 */ 516 */
@@ -432,11 +528,10 @@ static int __init osd_uld_init(void)
432{ 528{
433 int err; 529 int err;
434 530
435 osd_sysfs_class = class_create(THIS_MODULE, osd_symlink); 531 err = class_register(&osd_uld_class);
436 if (IS_ERR(osd_sysfs_class)) { 532 if (err) {
437 OSD_ERR("Unable to register sysfs class => %ld\n", 533 OSD_ERR("Unable to register sysfs class => %d\n", err);
438 PTR_ERR(osd_sysfs_class)); 534 return err;
439 return PTR_ERR(osd_sysfs_class);
440 } 535 }
441 536
442 err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), 537 err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0),
@@ -459,7 +554,7 @@ static int __init osd_uld_init(void)
459err_out_chrdev: 554err_out_chrdev:
460 unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); 555 unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
461err_out: 556err_out:
462 class_destroy(osd_sysfs_class); 557 class_unregister(&osd_uld_class);
463 return err; 558 return err;
464} 559}
465 560
@@ -467,7 +562,7 @@ static void __exit osd_uld_exit(void)
467{ 562{
468 scsi_unregister_driver(&osd_driver.gendrv); 563 scsi_unregister_driver(&osd_driver.gendrv);
469 unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); 564 unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
470 class_destroy(osd_sysfs_class); 565 class_unregister(&osd_uld_class);
471 OSD_INFO("UNLOADED %s\n", osd_version_string); 566 OSD_INFO("UNLOADED %s\n", osd_version_string);
472} 567}
473 568
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index acb835837eec..b219118f8bd6 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -38,6 +38,7 @@ static const char * osst_version = "0.99.4";
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
40#include <linux/mm.h> 40#include <linux/mm.h>
41#include <linux/slab.h>
41#include <linux/init.h> 42#include <linux/init.h>
42#include <linux/string.h> 43#include <linux/string.h>
43#include <linux/errno.h> 44#include <linux/errno.h>
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 67cde0138061..528733b4a392 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -54,15 +54,6 @@
54#include <pcmcia/cistpl.h> 54#include <pcmcia/cistpl.h>
55#include <pcmcia/ds.h> 55#include <pcmcia/ds.h>
56 56
57#ifdef PCMCIA_DEBUG
58static int pc_debug = PCMCIA_DEBUG;
59module_param(pc_debug, int, 0644);
60#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
61static char *version =
62"aha152x_cs.c 1.54 2000/06/12 21:27:25 (David Hinds)";
63#else
64#define DEBUG(n, args...)
65#endif
66 57
67/*====================================================================*/ 58/*====================================================================*/
68 59
@@ -103,7 +94,7 @@ static int aha152x_probe(struct pcmcia_device *link)
103{ 94{
104 scsi_info_t *info; 95 scsi_info_t *info;
105 96
106 DEBUG(0, "aha152x_attach()\n"); 97 dev_dbg(&link->dev, "aha152x_attach()\n");
107 98
108 /* Create new SCSI device */ 99 /* Create new SCSI device */
109 info = kzalloc(sizeof(*info), GFP_KERNEL); 100 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -115,7 +106,6 @@ static int aha152x_probe(struct pcmcia_device *link)
115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 106 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
116 link->io.IOAddrLines = 10; 107 link->io.IOAddrLines = 10;
117 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 108 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
118 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
119 link->conf.Attributes = CONF_ENABLE_IRQ; 109 link->conf.Attributes = CONF_ENABLE_IRQ;
120 link->conf.IntType = INT_MEMORY_AND_IO; 110 link->conf.IntType = INT_MEMORY_AND_IO;
121 link->conf.Present = PRESENT_OPTION; 111 link->conf.Present = PRESENT_OPTION;
@@ -127,7 +117,7 @@ static int aha152x_probe(struct pcmcia_device *link)
127 117
128static void aha152x_detach(struct pcmcia_device *link) 118static void aha152x_detach(struct pcmcia_device *link)
129{ 119{
130 DEBUG(0, "aha152x_detach(0x%p)\n", link); 120 dev_dbg(&link->dev, "aha152x_detach\n");
131 121
132 aha152x_release_cs(link); 122 aha152x_release_cs(link);
133 123
@@ -137,9 +127,6 @@ static void aha152x_detach(struct pcmcia_device *link)
137 127
138/*====================================================================*/ 128/*====================================================================*/
139 129
140#define CS_CHECK(fn, ret) \
141do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
142
143static int aha152x_config_check(struct pcmcia_device *p_dev, 130static int aha152x_config_check(struct pcmcia_device *p_dev,
144 cistpl_cftable_entry_t *cfg, 131 cistpl_cftable_entry_t *cfg,
145 cistpl_cftable_entry_t *dflt, 132 cistpl_cftable_entry_t *dflt,
@@ -164,19 +151,22 @@ static int aha152x_config_cs(struct pcmcia_device *link)
164{ 151{
165 scsi_info_t *info = link->priv; 152 scsi_info_t *info = link->priv;
166 struct aha152x_setup s; 153 struct aha152x_setup s;
167 int last_ret, last_fn; 154 int ret;
168 struct Scsi_Host *host; 155 struct Scsi_Host *host;
169 156
170 DEBUG(0, "aha152x_config(0x%p)\n", link); 157 dev_dbg(&link->dev, "aha152x_config\n");
171 158
172 last_ret = pcmcia_loop_config(link, aha152x_config_check, NULL); 159 ret = pcmcia_loop_config(link, aha152x_config_check, NULL);
173 if (last_ret) { 160 if (ret)
174 cs_error(link, RequestIO, last_ret); 161 goto failed;
175 goto failed;
176 }
177 162
178 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 163 ret = pcmcia_request_irq(link, &link->irq);
179 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 164 if (ret)
165 goto failed;
166
167 ret = pcmcia_request_configuration(link, &link->conf);
168 if (ret)
169 goto failed;
180 170
181 /* Set configuration options for the aha152x driver */ 171 /* Set configuration options for the aha152x driver */
182 memset(&s, 0, sizeof(s)); 172 memset(&s, 0, sizeof(s));
@@ -194,7 +184,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
194 host = aha152x_probe_one(&s); 184 host = aha152x_probe_one(&s);
195 if (host == NULL) { 185 if (host == NULL) {
196 printk(KERN_INFO "aha152x_cs: no SCSI devices found\n"); 186 printk(KERN_INFO "aha152x_cs: no SCSI devices found\n");
197 goto cs_failed; 187 goto failed;
198 } 188 }
199 189
200 sprintf(info->node.dev_name, "scsi%d", host->host_no); 190 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -203,8 +193,6 @@ static int aha152x_config_cs(struct pcmcia_device *link)
203 193
204 return 0; 194 return 0;
205 195
206cs_failed:
207 cs_error(link, last_fn, last_ret);
208failed: 196failed:
209 aha152x_release_cs(link); 197 aha152x_release_cs(link);
210 return -ENODEV; 198 return -ENODEV;
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 06254f46a0dd..914040684079 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -59,16 +59,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
59MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver"); 59MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver");
60MODULE_LICENSE("Dual MPL/GPL"); 60MODULE_LICENSE("Dual MPL/GPL");
61 61
62#ifdef PCMCIA_DEBUG
63static int pc_debug = PCMCIA_DEBUG;
64module_param(pc_debug, int, 0);
65#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
66static char *version =
67"fdomain_cs.c 1.47 2001/10/13 00:08:52 (David Hinds)";
68#else
69#define DEBUG(n, args...)
70#endif
71
72/*====================================================================*/ 62/*====================================================================*/
73 63
74typedef struct scsi_info_t { 64typedef struct scsi_info_t {
@@ -86,7 +76,7 @@ static int fdomain_probe(struct pcmcia_device *link)
86{ 76{
87 scsi_info_t *info; 77 scsi_info_t *info;
88 78
89 DEBUG(0, "fdomain_attach()\n"); 79 dev_dbg(&link->dev, "fdomain_attach()\n");
90 80
91 /* Create new SCSI device */ 81 /* Create new SCSI device */
92 info = kzalloc(sizeof(*info), GFP_KERNEL); 82 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -99,7 +89,6 @@ static int fdomain_probe(struct pcmcia_device *link)
99 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 89 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
100 link->io.IOAddrLines = 10; 90 link->io.IOAddrLines = 10;
101 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 91 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
102 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
103 link->conf.Attributes = CONF_ENABLE_IRQ; 92 link->conf.Attributes = CONF_ENABLE_IRQ;
104 link->conf.IntType = INT_MEMORY_AND_IO; 93 link->conf.IntType = INT_MEMORY_AND_IO;
105 link->conf.Present = PRESENT_OPTION; 94 link->conf.Present = PRESENT_OPTION;
@@ -111,7 +100,7 @@ static int fdomain_probe(struct pcmcia_device *link)
111 100
112static void fdomain_detach(struct pcmcia_device *link) 101static void fdomain_detach(struct pcmcia_device *link)
113{ 102{
114 DEBUG(0, "fdomain_detach(0x%p)\n", link); 103 dev_dbg(&link->dev, "fdomain_detach\n");
115 104
116 fdomain_release(link); 105 fdomain_release(link);
117 106
@@ -120,9 +109,6 @@ static void fdomain_detach(struct pcmcia_device *link)
120 109
121/*====================================================================*/ 110/*====================================================================*/
122 111
123#define CS_CHECK(fn, ret) \
124do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
125
126static int fdomain_config_check(struct pcmcia_device *p_dev, 112static int fdomain_config_check(struct pcmcia_device *p_dev,
127 cistpl_cftable_entry_t *cfg, 113 cistpl_cftable_entry_t *cfg,
128 cistpl_cftable_entry_t *dflt, 114 cistpl_cftable_entry_t *dflt,
@@ -137,20 +123,22 @@ static int fdomain_config_check(struct pcmcia_device *p_dev,
137static int fdomain_config(struct pcmcia_device *link) 123static int fdomain_config(struct pcmcia_device *link)
138{ 124{
139 scsi_info_t *info = link->priv; 125 scsi_info_t *info = link->priv;
140 int last_ret, last_fn; 126 int ret;
141 char str[22]; 127 char str[22];
142 struct Scsi_Host *host; 128 struct Scsi_Host *host;
143 129
144 DEBUG(0, "fdomain_config(0x%p)\n", link); 130 dev_dbg(&link->dev, "fdomain_config\n");
145 131
146 last_ret = pcmcia_loop_config(link, fdomain_config_check, NULL); 132 ret = pcmcia_loop_config(link, fdomain_config_check, NULL);
147 if (last_ret) { 133 if (ret)
148 cs_error(link, RequestIO, last_ret);
149 goto failed; 134 goto failed;
150 }
151 135
152 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 136 ret = pcmcia_request_irq(link, &link->irq);
153 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 137 if (ret)
138 goto failed;
139 ret = pcmcia_request_configuration(link, &link->conf);
140 if (ret)
141 goto failed;
154 142
155 /* A bad hack... */ 143 /* A bad hack... */
156 release_region(link->io.BasePort1, link->io.NumPorts1); 144 release_region(link->io.BasePort1, link->io.NumPorts1);
@@ -162,11 +150,11 @@ static int fdomain_config(struct pcmcia_device *link)
162 host = __fdomain_16x0_detect(&fdomain_driver_template); 150 host = __fdomain_16x0_detect(&fdomain_driver_template);
163 if (!host) { 151 if (!host) {
164 printk(KERN_INFO "fdomain_cs: no SCSI devices found\n"); 152 printk(KERN_INFO "fdomain_cs: no SCSI devices found\n");
165 goto cs_failed; 153 goto failed;
166 } 154 }
167 155
168 if (scsi_add_host(host, NULL)) 156 if (scsi_add_host(host, NULL))
169 goto cs_failed; 157 goto failed;
170 scsi_scan_host(host); 158 scsi_scan_host(host);
171 159
172 sprintf(info->node.dev_name, "scsi%d", host->host_no); 160 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -175,8 +163,6 @@ static int fdomain_config(struct pcmcia_device *link)
175 163
176 return 0; 164 return 0;
177 165
178cs_failed:
179 cs_error(link, last_fn, last_ret);
180failed: 166failed:
181 fdomain_release(link); 167 fdomain_release(link);
182 return -ENODEV; 168 return -ENODEV;
@@ -188,7 +174,7 @@ static void fdomain_release(struct pcmcia_device *link)
188{ 174{
189 scsi_info_t *info = link->priv; 175 scsi_info_t *info = link->priv;
190 176
191 DEBUG(0, "fdomain_release(0x%p)\n", link); 177 dev_dbg(&link->dev, "fdomain_release\n");
192 178
193 scsi_remove_host(info->host); 179 scsi_remove_host(info->host);
194 pcmcia_disable_device(link); 180 pcmcia_disable_device(link);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index e32c344d7ad8..021246454872 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1564,12 +1564,10 @@ static int nsp_cs_probe(struct pcmcia_device *link)
1564 link->io.IOAddrLines = 10; /* not used */ 1564 link->io.IOAddrLines = 10; /* not used */
1565 1565
1566 /* Interrupt setup */ 1566 /* Interrupt setup */
1567 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 1567 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
1568 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
1569 1568
1570 /* Interrupt handler */ 1569 /* Interrupt handler */
1571 link->irq.Handler = &nspintr; 1570 link->irq.Handler = &nspintr;
1572 link->irq.Instance = info;
1573 link->irq.Attributes |= IRQF_SHARED; 1571 link->irq.Attributes |= IRQF_SHARED;
1574 1572
1575 /* General socket configuration */ 1573 /* General socket configuration */
@@ -1684,10 +1682,10 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev,
1684 if (cfg_mem->req.Size < 0x1000) 1682 if (cfg_mem->req.Size < 0x1000)
1685 cfg_mem->req.Size = 0x1000; 1683 cfg_mem->req.Size = 0x1000;
1686 cfg_mem->req.AccessSpeed = 0; 1684 cfg_mem->req.AccessSpeed = 0;
1687 if (pcmcia_request_window(&p_dev, &cfg_mem->req, &p_dev->win) != 0) 1685 if (pcmcia_request_window(p_dev, &cfg_mem->req, &p_dev->win) != 0)
1688 goto next_entry; 1686 goto next_entry;
1689 map.Page = 0; map.CardOffset = mem->win[0].card_addr; 1687 map.Page = 0; map.CardOffset = mem->win[0].card_addr;
1690 if (pcmcia_map_mem_page(p_dev->win, &map) != 0) 1688 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
1691 goto next_entry; 1689 goto next_entry;
1692 1690
1693 cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size); 1691 cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size);
@@ -1719,6 +1717,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
1719 cfg_mem->data = data; 1717 cfg_mem->data = data;
1720 1718
1721 ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem); 1719 ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem);
1720 if (ret)
1722 goto cs_failed; 1721 goto cs_failed;
1723 1722
1724 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1723 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index 7db28cd49446..8c61a4fe1db9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -187,7 +187,7 @@
187#define S_IO BIT(1) /* Input/Output line from SCSI bus */ 187#define S_IO BIT(1) /* Input/Output line from SCSI bus */
188#define S_CD BIT(2) /* Command/Data line from SCSI bus */ 188#define S_CD BIT(2) /* Command/Data line from SCSI bus */
189#define S_BUSY BIT(3) /* Busy line from SCSI bus */ 189#define S_BUSY BIT(3) /* Busy line from SCSI bus */
190#define S_ACK BIT(4) /* Acknowlege line from SCSI bus */ 190#define S_ACK BIT(4) /* Acknowledge line from SCSI bus */
191#define S_REQUEST BIT(5) /* Request line from SCSI bus */ 191#define S_REQUEST BIT(5) /* Request line from SCSI bus */
192#define S_SELECT BIT(6) /* */ 192#define S_SELECT BIT(6) /* */
193#define S_ATN BIT(7) /* */ 193#define S_ATN BIT(7) /* */
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 20c3e5e6d88a..f85f094870b4 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -62,15 +62,6 @@
62 62
63static char qlogic_name[] = "qlogic_cs"; 63static char qlogic_name[] = "qlogic_cs";
64 64
65#ifdef PCMCIA_DEBUG
66static int pc_debug = PCMCIA_DEBUG;
67module_param(pc_debug, int, 0644);
68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
69static char *version = "qlogic_cs.c 1.79-ac 2002/10/26 (David Hinds)";
70#else
71#define DEBUG(n, args...)
72#endif
73
74static struct scsi_host_template qlogicfas_driver_template = { 65static struct scsi_host_template qlogicfas_driver_template = {
75 .module = THIS_MODULE, 66 .module = THIS_MODULE,
76 .name = qlogic_name, 67 .name = qlogic_name,
@@ -159,7 +150,7 @@ static int qlogic_probe(struct pcmcia_device *link)
159{ 150{
160 scsi_info_t *info; 151 scsi_info_t *info;
161 152
162 DEBUG(0, "qlogic_attach()\n"); 153 dev_dbg(&link->dev, "qlogic_attach()\n");
163 154
164 /* Create new SCSI device */ 155 /* Create new SCSI device */
165 info = kzalloc(sizeof(*info), GFP_KERNEL); 156 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -171,7 +162,6 @@ static int qlogic_probe(struct pcmcia_device *link)
171 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 162 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
172 link->io.IOAddrLines = 10; 163 link->io.IOAddrLines = 10;
173 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
174 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
175 link->conf.Attributes = CONF_ENABLE_IRQ; 165 link->conf.Attributes = CONF_ENABLE_IRQ;
176 link->conf.IntType = INT_MEMORY_AND_IO; 166 link->conf.IntType = INT_MEMORY_AND_IO;
177 link->conf.Present = PRESENT_OPTION; 167 link->conf.Present = PRESENT_OPTION;
@@ -183,7 +173,7 @@ static int qlogic_probe(struct pcmcia_device *link)
183 173
184static void qlogic_detach(struct pcmcia_device *link) 174static void qlogic_detach(struct pcmcia_device *link)
185{ 175{
186 DEBUG(0, "qlogic_detach(0x%p)\n", link); 176 dev_dbg(&link->dev, "qlogic_detach\n");
187 177
188 qlogic_release(link); 178 qlogic_release(link);
189 kfree(link->priv); 179 kfree(link->priv);
@@ -192,9 +182,6 @@ static void qlogic_detach(struct pcmcia_device *link)
192 182
193/*====================================================================*/ 183/*====================================================================*/
194 184
195#define CS_CHECK(fn, ret) \
196do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
197
198static int qlogic_config_check(struct pcmcia_device *p_dev, 185static int qlogic_config_check(struct pcmcia_device *p_dev,
199 cistpl_cftable_entry_t *cfg, 186 cistpl_cftable_entry_t *cfg,
200 cistpl_cftable_entry_t *dflt, 187 cistpl_cftable_entry_t *dflt,
@@ -213,19 +200,22 @@ static int qlogic_config_check(struct pcmcia_device *p_dev,
213static int qlogic_config(struct pcmcia_device * link) 200static int qlogic_config(struct pcmcia_device * link)
214{ 201{
215 scsi_info_t *info = link->priv; 202 scsi_info_t *info = link->priv;
216 int last_ret, last_fn; 203 int ret;
217 struct Scsi_Host *host; 204 struct Scsi_Host *host;
218 205
219 DEBUG(0, "qlogic_config(0x%p)\n", link); 206 dev_dbg(&link->dev, "qlogic_config\n");
220 207
221 last_ret = pcmcia_loop_config(link, qlogic_config_check, NULL); 208 ret = pcmcia_loop_config(link, qlogic_config_check, NULL);
222 if (last_ret) { 209 if (ret)
223 cs_error(link, RequestIO, last_ret); 210 goto failed;
211
212 ret = pcmcia_request_irq(link, &link->irq);
213 if (ret)
224 goto failed; 214 goto failed;
225 }
226 215
227 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 216 ret = pcmcia_request_configuration(link, &link->conf);
228 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 217 if (ret)
218 goto failed;
229 219
230 if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { 220 if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) {
231 /* set ATAcmd */ 221 /* set ATAcmd */
@@ -244,7 +234,7 @@ static int qlogic_config(struct pcmcia_device * link)
244 234
245 if (!host) { 235 if (!host) {
246 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); 236 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
247 goto cs_failed; 237 goto failed;
248 } 238 }
249 239
250 sprintf(info->node.dev_name, "scsi%d", host->host_no); 240 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -253,12 +243,9 @@ static int qlogic_config(struct pcmcia_device * link)
253 243
254 return 0; 244 return 0;
255 245
256cs_failed:
257 cs_error(link, last_fn, last_ret);
258 pcmcia_disable_device(link);
259failed: 246failed:
247 pcmcia_disable_device(link);
260 return -ENODEV; 248 return -ENODEV;
261
262} /* qlogic_config */ 249} /* qlogic_config */
263 250
264/*====================================================================*/ 251/*====================================================================*/
@@ -267,7 +254,7 @@ static void qlogic_release(struct pcmcia_device *link)
267{ 254{
268 scsi_info_t *info = link->priv; 255 scsi_info_t *info = link->priv;
269 256
270 DEBUG(0, "qlogic_release(0x%p)\n", link); 257 dev_dbg(&link->dev, "qlogic_release\n");
271 258
272 scsi_remove_host(info->host); 259 scsi_remove_host(info->host);
273 260
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index b330c11a1752..e7564d8f0cbf 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -77,17 +77,6 @@
77#include <pcmcia/ds.h> 77#include <pcmcia/ds.h>
78#include <pcmcia/ciscode.h> 78#include <pcmcia/ciscode.h>
79 79
80/* ================================================================== */
81
82#ifdef PCMCIA_DEBUG
83static int pc_debug = PCMCIA_DEBUG;
84module_param(pc_debug, int, 0);
85#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
86static char *version =
87"sym53c500_cs.c 0.9c 2004/10/27 (Bob Tracy)";
88#else
89#define DEBUG(n, args...)
90#endif
91 80
92/* ================================================================== */ 81/* ================================================================== */
93 82
@@ -525,7 +514,7 @@ SYM53C500_release(struct pcmcia_device *link)
525 struct scsi_info_t *info = link->priv; 514 struct scsi_info_t *info = link->priv;
526 struct Scsi_Host *shost = info->host; 515 struct Scsi_Host *shost = info->host;
527 516
528 DEBUG(0, "SYM53C500_release(0x%p)\n", link); 517 dev_dbg(&link->dev, "SYM53C500_release\n");
529 518
530 /* 519 /*
531 * Do this before releasing/freeing resources. 520 * Do this before releasing/freeing resources.
@@ -697,9 +686,6 @@ static struct scsi_host_template sym53c500_driver_template = {
697 .shost_attrs = SYM53C500_shost_attrs 686 .shost_attrs = SYM53C500_shost_attrs
698}; 687};
699 688
700#define CS_CHECK(fn, ret) \
701do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
702
703static int SYM53C500_config_check(struct pcmcia_device *p_dev, 689static int SYM53C500_config_check(struct pcmcia_device *p_dev,
704 cistpl_cftable_entry_t *cfg, 690 cistpl_cftable_entry_t *cfg,
705 cistpl_cftable_entry_t *dflt, 691 cistpl_cftable_entry_t *dflt,
@@ -719,24 +705,27 @@ static int
719SYM53C500_config(struct pcmcia_device *link) 705SYM53C500_config(struct pcmcia_device *link)
720{ 706{
721 struct scsi_info_t *info = link->priv; 707 struct scsi_info_t *info = link->priv;
722 int last_ret, last_fn; 708 int ret;
723 int irq_level, port_base; 709 int irq_level, port_base;
724 struct Scsi_Host *host; 710 struct Scsi_Host *host;
725 struct scsi_host_template *tpnt = &sym53c500_driver_template; 711 struct scsi_host_template *tpnt = &sym53c500_driver_template;
726 struct sym53c500_data *data; 712 struct sym53c500_data *data;
727 713
728 DEBUG(0, "SYM53C500_config(0x%p)\n", link); 714 dev_dbg(&link->dev, "SYM53C500_config\n");
729 715
730 info->manf_id = link->manf_id; 716 info->manf_id = link->manf_id;
731 717
732 last_ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL); 718 ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL);
733 if (last_ret) { 719 if (ret)
734 cs_error(link, RequestIO, last_ret); 720 goto failed;
721
722 ret = pcmcia_request_irq(link, &link->irq);
723 if (ret)
735 goto failed; 724 goto failed;
736 }
737 725
738 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 726 ret = pcmcia_request_configuration(link, &link->conf);
739 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 727 if (ret)
728 goto failed;
740 729
741 /* 730 /*
742 * That's the trouble with copying liberally from another driver. 731 * That's the trouble with copying liberally from another driver.
@@ -824,8 +813,6 @@ err_release:
824 printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); 813 printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n");
825 return -ENODEV; 814 return -ENODEV;
826 815
827cs_failed:
828 cs_error(link, last_fn, last_ret);
829failed: 816failed:
830 SYM53C500_release(link); 817 SYM53C500_release(link);
831 return -ENODEV; 818 return -ENODEV;
@@ -855,7 +842,7 @@ static int sym53c500_resume(struct pcmcia_device *link)
855static void 842static void
856SYM53C500_detach(struct pcmcia_device *link) 843SYM53C500_detach(struct pcmcia_device *link)
857{ 844{
858 DEBUG(0, "SYM53C500_detach(0x%p)\n", link); 845 dev_dbg(&link->dev, "SYM53C500_detach\n");
859 846
860 SYM53C500_release(link); 847 SYM53C500_release(link);
861 848
@@ -868,7 +855,7 @@ SYM53C500_probe(struct pcmcia_device *link)
868{ 855{
869 struct scsi_info_t *info; 856 struct scsi_info_t *info;
870 857
871 DEBUG(0, "SYM53C500_attach()\n"); 858 dev_dbg(&link->dev, "SYM53C500_attach()\n");
872 859
873 /* Create new SCSI device */ 860 /* Create new SCSI device */
874 info = kzalloc(sizeof(*info), GFP_KERNEL); 861 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -880,7 +867,6 @@ SYM53C500_probe(struct pcmcia_device *link)
880 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 867 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
881 link->io.IOAddrLines = 10; 868 link->io.IOAddrLines = 10;
882 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 869 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
883 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
884 link->conf.Attributes = CONF_ENABLE_IRQ; 870 link->conf.Attributes = CONF_ENABLE_IRQ;
885 link->conf.IntType = INT_MEMORY_AND_IO; 871 link->conf.IntType = INT_MEMORY_AND_IO;
886 872
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
new file mode 100644
index 000000000000..52f04296171c
--- /dev/null
+++ b/drivers/scsi/pm8001/Makefile
@@ -0,0 +1,12 @@
1#
2# Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver
3#
4# Copyright (C) 2008-2009 USI Co., Ltd.
5
6
7obj-$(CONFIG_SCSI_PM8001) += pm8001.o
8pm8001-y += pm8001_init.o \
9 pm8001_sas.o \
10 pm8001_ctl.o \
11 pm8001_hwi.o
12
diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h
new file mode 100644
index 000000000000..4efa4d0950e5
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_chips.h
@@ -0,0 +1,89 @@
1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#ifndef _PM8001_CHIPS_H_
42#define _PM8001_CHIPS_H_
43
44static inline u32 pm8001_read_32(void *virt_addr)
45{
46 return *((u32 *)virt_addr);
47}
48
49static inline void pm8001_write_32(void *addr, u32 offset, u32 val)
50{
51 *((u32 *)(addr + offset)) = val;
52}
53
54static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar,
55 u32 offset)
56{
57 return readl(pm8001_ha->io_mem[bar].memvirtaddr + offset);
58}
59
60static inline void pm8001_cw32(struct pm8001_hba_info *pm8001_ha, u32 bar,
61 u32 addr, u32 val)
62{
63 writel(val, pm8001_ha->io_mem[bar].memvirtaddr + addr);
64}
65static inline u32 pm8001_mr32(void __iomem *addr, u32 offset)
66{
67 return readl(addr + offset);
68}
69static inline void pm8001_mw32(void __iomem *addr, u32 offset, u32 val)
70{
71 writel(val, addr + offset);
72}
73static inline u32 get_pci_bar_index(u32 pcibar)
74{
75 switch (pcibar) {
76 case 0x18:
77 case 0x1C:
78 return 1;
79 case 0x20:
80 return 2;
81 case 0x24:
82 return 3;
83 default:
84 return 0;
85 }
86}
87
88#endif /* _PM8001_CHIPS_H_ */
89
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
new file mode 100644
index 000000000000..45bc197bc22f
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -0,0 +1,574 @@
1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40#include <linux/firmware.h>
41#include <linux/slab.h>
42#include "pm8001_sas.h"
43#include "pm8001_ctl.h"
44
45/* scsi host attributes */
46
47/**
48 * pm8001_ctl_mpi_interface_rev_show - MPI interface revision number
49 * @cdev: pointer to embedded class device
50 * @buf: the buffer returned
51 *
52 * A sysfs 'read-only' shost attribute.
53 */
54static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
55 struct device_attribute *attr, char *buf)
56{
57 struct Scsi_Host *shost = class_to_shost(cdev);
58 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
59 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
60
61 return snprintf(buf, PAGE_SIZE, "%d\n",
62 pm8001_ha->main_cfg_tbl.interface_rev);
63}
64static
65DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
66
67/**
68 * pm8001_ctl_fw_version_show - firmware version
69 * @cdev: pointer to embedded class device
70 * @buf: the buffer returned
71 *
72 * A sysfs 'read-only' shost attribute.
73 */
74static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
75 struct device_attribute *attr, char *buf)
76{
77 struct Scsi_Host *shost = class_to_shost(cdev);
78 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
79 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
80
81 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
82 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
83 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
84 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
85 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
86}
87static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
88/**
89 * pm8001_ctl_max_out_io_show - max outstanding io supported
90 * @cdev: pointer to embedded class device
91 * @buf: the buffer returned
92 *
93 * A sysfs 'read-only' shost attribute.
94 */
95static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
96 struct device_attribute *attr, char *buf)
97{
98 struct Scsi_Host *shost = class_to_shost(cdev);
99 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
100 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
101
102 return snprintf(buf, PAGE_SIZE, "%d\n",
103 pm8001_ha->main_cfg_tbl.max_out_io);
104}
105static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
106/**
107 * pm8001_ctl_max_devices_show - max devices support
108 * @cdev: pointer to embedded class device
109 * @buf: the buffer returned
110 *
111 * A sysfs 'read-only' shost attribute.
112 */
113static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
114 struct device_attribute *attr, char *buf)
115{
116 struct Scsi_Host *shost = class_to_shost(cdev);
117 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
118 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
119
120 return snprintf(buf, PAGE_SIZE, "%04d\n",
121 (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
122}
123static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
124/**
125 * pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no
126 * hardware limitation
127 * @cdev: pointer to embedded class device
128 * @buf: the buffer returned
129 *
130 * A sysfs 'read-only' shost attribute.
131 */
132static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
133 struct device_attribute *attr, char *buf)
134{
135 struct Scsi_Host *shost = class_to_shost(cdev);
136 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
137 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
138
139 return snprintf(buf, PAGE_SIZE, "%04d\n",
140 pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
141}
142static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
143
144#define SAS_1_0 0x1
145#define SAS_1_1 0x2
146#define SAS_2_0 0x4
147
148static ssize_t
149show_sas_spec_support_status(unsigned int mode, char *buf)
150{
151 ssize_t len = 0;
152
153 if (mode & SAS_1_1)
154 len = sprintf(buf, "%s", "SAS1.1");
155 if (mode & SAS_2_0)
156 len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0");
157 len += sprintf(buf + len, "\n");
158
159 return len;
160}
161
162/**
163 * pm8001_ctl_sas_spec_support_show - sas spec supported
164 * @cdev: pointer to embedded class device
165 * @buf: the buffer returned
166 *
167 * A sysfs 'read-only' shost attribute.
168 */
169static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
170 struct device_attribute *attr, char *buf)
171{
172 unsigned int mode;
173 struct Scsi_Host *shost = class_to_shost(cdev);
174 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
175 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
176 mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
177 return show_sas_spec_support_status(mode, buf);
178}
179static DEVICE_ATTR(sas_spec_support, S_IRUGO,
180 pm8001_ctl_sas_spec_support_show, NULL);
181
182/**
183 * pm8001_ctl_sas_address_show - sas address
184 * @cdev: pointer to embedded class device
185 * @buf: the buffer returned
186 *
187 * This is the controller sas address
188 *
189 * A sysfs 'read-only' shost attribute.
190 */
191static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev,
192 struct device_attribute *attr, char *buf)
193{
194 struct Scsi_Host *shost = class_to_shost(cdev);
195 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
196 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
197 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
198 be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr));
199}
200static DEVICE_ATTR(host_sas_address, S_IRUGO,
201 pm8001_ctl_host_sas_address_show, NULL);
202
203/**
204 * pm8001_ctl_logging_level_show - logging level
205 * @cdev: pointer to embedded class device
206 * @buf: the buffer returned
207 *
208 * A sysfs 'read/write' shost attribute.
209 */
210static ssize_t pm8001_ctl_logging_level_show(struct device *cdev,
211 struct device_attribute *attr, char *buf)
212{
213 struct Scsi_Host *shost = class_to_shost(cdev);
214 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
215 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
216
217 return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level);
218}
219static ssize_t pm8001_ctl_logging_level_store(struct device *cdev,
220 struct device_attribute *attr, const char *buf, size_t count)
221{
222 struct Scsi_Host *shost = class_to_shost(cdev);
223 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
224 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
225 int val = 0;
226
227 if (sscanf(buf, "%x", &val) != 1)
228 return -EINVAL;
229
230 pm8001_ha->logging_level = val;
231 return strlen(buf);
232}
233
234static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
235 pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store);
236/**
237 * pm8001_ctl_aap_log_show - aap1 event log
238 * @cdev: pointer to embedded class device
239 * @buf: the buffer returned
240 *
241 * A sysfs 'read-only' shost attribute.
242 */
243static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
244 struct device_attribute *attr, char *buf)
245{
246 struct Scsi_Host *shost = class_to_shost(cdev);
247 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
248 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
249 int i;
250#define AAP1_MEMMAP(r, c) \
251 (*(u32 *)((u8*)pm8001_ha->memoryMap.region[AAP1].virt_ptr + (r) * 32 \
252 + (c)))
253
254 char *str = buf;
255 int max = 2;
256 for (i = 0; i < max; i++) {
257 str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
258 "0x%08x 0x%08x\n",
259 AAP1_MEMMAP(i, 0),
260 AAP1_MEMMAP(i, 4),
261 AAP1_MEMMAP(i, 8),
262 AAP1_MEMMAP(i, 12),
263 AAP1_MEMMAP(i, 16),
264 AAP1_MEMMAP(i, 20),
265 AAP1_MEMMAP(i, 24),
266 AAP1_MEMMAP(i, 28));
267 }
268
269 return str - buf;
270}
271static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
272/**
273 * pm8001_ctl_aap_log_show - IOP event log
274 * @cdev: pointer to embedded class device
275 * @buf: the buffer returned
276 *
277 * A sysfs 'read-only' shost attribute.
278 */
279static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
280 struct device_attribute *attr, char *buf)
281{
282 struct Scsi_Host *shost = class_to_shost(cdev);
283 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
284 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
285#define IOP_MEMMAP(r, c) \
286 (*(u32 *)((u8*)pm8001_ha->memoryMap.region[IOP].virt_ptr + (r) * 32 \
287 + (c)))
288 int i;
289 char *str = buf;
290 int max = 2;
291 for (i = 0; i < max; i++) {
292 str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
293 "0x%08x 0x%08x\n",
294 IOP_MEMMAP(i, 0),
295 IOP_MEMMAP(i, 4),
296 IOP_MEMMAP(i, 8),
297 IOP_MEMMAP(i, 12),
298 IOP_MEMMAP(i, 16),
299 IOP_MEMMAP(i, 20),
300 IOP_MEMMAP(i, 24),
301 IOP_MEMMAP(i, 28));
302 }
303
304 return str - buf;
305}
306static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
307
308#define FLASH_CMD_NONE 0x00
309#define FLASH_CMD_UPDATE 0x01
310#define FLASH_CMD_SET_NVMD 0x02
311
312struct flash_command {
313 u8 command[8];
314 int code;
315};
316
317static struct flash_command flash_command_table[] =
318{
319 {"set_nvmd", FLASH_CMD_SET_NVMD},
320 {"update", FLASH_CMD_UPDATE},
321 {"", FLASH_CMD_NONE} /* Last entry should be NULL. */
322};
323
324struct error_fw {
325 char *reason;
326 int err_code;
327};
328
329static struct error_fw flash_error_table[] =
330{
331 {"Failed to open fw image file", FAIL_OPEN_BIOS_FILE},
332 {"image header mismatch", FLASH_UPDATE_HDR_ERR},
333 {"image offset mismatch", FLASH_UPDATE_OFFSET_ERR},
334 {"image CRC Error", FLASH_UPDATE_CRC_ERR},
335 {"image length Error.", FLASH_UPDATE_LENGTH_ERR},
336 {"Failed to program flash chip", FLASH_UPDATE_HW_ERR},
337 {"Flash chip not supported.", FLASH_UPDATE_DNLD_NOT_SUPPORTED},
338 {"Flash update disabled.", FLASH_UPDATE_DISABLED},
339 {"Flash in progress", FLASH_IN_PROGRESS},
340 {"Image file size Error", FAIL_FILE_SIZE},
341 {"Input parameter error", FAIL_PARAMETERS},
342 {"Out of memory", FAIL_OUT_MEMORY},
343 {"OK", 0} /* Last entry err_code = 0. */
344};
345
346static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
347{
348 struct pm8001_ioctl_payload *payload;
349 DECLARE_COMPLETION_ONSTACK(completion);
350 u8 *ioctlbuffer = NULL;
351 u32 length = 0;
352 u32 ret = 0;
353
354 length = 1024 * 5 + sizeof(*payload) - 1;
355 ioctlbuffer = kzalloc(length, GFP_KERNEL);
356 if (!ioctlbuffer)
357 return -ENOMEM;
358 if ((pm8001_ha->fw_image->size <= 0) ||
359 (pm8001_ha->fw_image->size > 4096)) {
360 ret = FAIL_FILE_SIZE;
361 goto out;
362 }
363 payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
364 memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
365 pm8001_ha->fw_image->size);
366 payload->length = pm8001_ha->fw_image->size;
367 payload->id = 0;
368 pm8001_ha->nvmd_completion = &completion;
369 ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
370 wait_for_completion(&completion);
371out:
372 kfree(ioctlbuffer);
373 return ret;
374}
375
376static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
377{
378 struct pm8001_ioctl_payload *payload;
379 DECLARE_COMPLETION_ONSTACK(completion);
380 u8 *ioctlbuffer = NULL;
381 u32 length = 0;
382 struct fw_control_info *fwControl;
383 u32 loopNumber, loopcount = 0;
384 u32 sizeRead = 0;
385 u32 partitionSize, partitionSizeTmp;
386 u32 ret = 0;
387 u32 partitionNumber = 0;
388 struct pm8001_fw_image_header *image_hdr;
389
390 length = 1024 * 16 + sizeof(*payload) - 1;
391 ioctlbuffer = kzalloc(length, GFP_KERNEL);
392 image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
393 if (!ioctlbuffer)
394 return -ENOMEM;
395 if (pm8001_ha->fw_image->size < 28) {
396 ret = FAIL_FILE_SIZE;
397 goto out;
398 }
399
400 while (sizeRead < pm8001_ha->fw_image->size) {
401 partitionSizeTmp =
402 *(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
403 partitionSize = be32_to_cpu(partitionSizeTmp);
404 loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE;
405 if (loopcount % IOCTL_BUF_SIZE)
406 loopcount++;
407 if (loopcount == 0)
408 loopcount++;
409 for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
410 payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
411 payload->length = 1024*16;
412 payload->id = 0;
413 fwControl =
414 (struct fw_control_info *)payload->func_specific;
415 fwControl->len = IOCTL_BUF_SIZE; /* IN */
416 fwControl->size = partitionSize + HEADER_LEN;/* IN */
417 fwControl->retcode = 0;/* OUT */
418 fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */
419
420 /* for the last chunk of data in case file size is not even with
421 4k, load only the rest*/
422 if (((loopcount-loopNumber) == 1) &&
423 ((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) {
424 fwControl->len =
425 (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
426 memcpy((u8 *)fwControl->buffer,
427 (u8 *)pm8001_ha->fw_image->data + sizeRead,
428 (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE);
429 sizeRead +=
430 (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
431 } else {
432 memcpy((u8 *)fwControl->buffer,
433 (u8 *)pm8001_ha->fw_image->data + sizeRead,
434 IOCTL_BUF_SIZE);
435 sizeRead += IOCTL_BUF_SIZE;
436 }
437
438 pm8001_ha->nvmd_completion = &completion;
439 ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
440 wait_for_completion(&completion);
441 if (ret || (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS)) {
442 ret = fwControl->retcode;
443 kfree(ioctlbuffer);
444 ioctlbuffer = NULL;
445 break;
446 }
447 }
448 if (ret)
449 break;
450 partitionNumber++;
451}
452out:
453 kfree(ioctlbuffer);
454 return ret;
455}
456static ssize_t pm8001_store_update_fw(struct device *cdev,
457 struct device_attribute *attr,
458 const char *buf, size_t count)
459{
460 struct Scsi_Host *shost = class_to_shost(cdev);
461 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
462 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
463 char *cmd_ptr, *filename_ptr;
464 int res, i;
465 int flash_command = FLASH_CMD_NONE;
466 int err = 0;
467 if (!capable(CAP_SYS_ADMIN))
468 return -EACCES;
469
470 cmd_ptr = kzalloc(count*2, GFP_KERNEL);
471
472 if (!cmd_ptr) {
473 err = FAIL_OUT_MEMORY;
474 goto out;
475 }
476
477 filename_ptr = cmd_ptr + count;
478 res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
479 if (res != 2) {
480 err = FAIL_PARAMETERS;
481 goto out1;
482 }
483
484 for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
485 if (!memcmp(flash_command_table[i].command,
486 cmd_ptr, strlen(cmd_ptr))) {
487 flash_command = flash_command_table[i].code;
488 break;
489 }
490 }
491 if (flash_command == FLASH_CMD_NONE) {
492 err = FAIL_PARAMETERS;
493 goto out1;
494 }
495
496 if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) {
497 err = FLASH_IN_PROGRESS;
498 goto out1;
499 }
500 err = request_firmware(&pm8001_ha->fw_image,
501 filename_ptr,
502 pm8001_ha->dev);
503
504 if (err) {
505 PM8001_FAIL_DBG(pm8001_ha,
506 pm8001_printk("Failed to load firmware image file %s,"
507 " error %d\n", filename_ptr, err));
508 err = FAIL_OPEN_BIOS_FILE;
509 goto out1;
510 }
511
512 switch (flash_command) {
513 case FLASH_CMD_UPDATE:
514 pm8001_ha->fw_status = FLASH_IN_PROGRESS;
515 err = pm8001_update_flash(pm8001_ha);
516 break;
517 case FLASH_CMD_SET_NVMD:
518 pm8001_ha->fw_status = FLASH_IN_PROGRESS;
519 err = pm8001_set_nvmd(pm8001_ha);
520 break;
521 default:
522 pm8001_ha->fw_status = FAIL_PARAMETERS;
523 err = FAIL_PARAMETERS;
524 break;
525 }
526 release_firmware(pm8001_ha->fw_image);
527out1:
528 kfree(cmd_ptr);
529out:
530 pm8001_ha->fw_status = err;
531
532 if (!err)
533 return count;
534 else
535 return -err;
536}
537
538static ssize_t pm8001_show_update_fw(struct device *cdev,
539 struct device_attribute *attr, char *buf)
540{
541 int i;
542 struct Scsi_Host *shost = class_to_shost(cdev);
543 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
544 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
545
546 for (i = 0; flash_error_table[i].err_code != 0; i++) {
547 if (flash_error_table[i].err_code == pm8001_ha->fw_status)
548 break;
549 }
550 if (pm8001_ha->fw_status != FLASH_IN_PROGRESS)
551 pm8001_ha->fw_status = FLASH_OK;
552
553 return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
554 flash_error_table[i].err_code,
555 flash_error_table[i].reason);
556}
557
558static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO,
559 pm8001_show_update_fw, pm8001_store_update_fw);
560struct device_attribute *pm8001_host_attrs[] = {
561 &dev_attr_interface_rev,
562 &dev_attr_fw_version,
563 &dev_attr_update_fw,
564 &dev_attr_aap_log,
565 &dev_attr_iop_log,
566 &dev_attr_max_out_io,
567 &dev_attr_max_devices,
568 &dev_attr_max_sg_list,
569 &dev_attr_sas_spec_support,
570 &dev_attr_logging_level,
571 &dev_attr_host_sas_address,
572 NULL,
573};
574
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
new file mode 100644
index 000000000000..63ad4aa0c422
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -0,0 +1,57 @@
1 /*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#ifndef PM8001_CTL_H_INCLUDED
42#define PM8001_CTL_H_INCLUDED
43
44#define IOCTL_BUF_SIZE 4096
45#define HEADER_LEN 28
46#define SIZE_OFFSET 16
47
48
49#define FLASH_OK 0x000000
50#define FAIL_OPEN_BIOS_FILE 0x000100
51#define FAIL_FILE_SIZE 0x000a00
52#define FAIL_PARAMETERS 0x000b00
53#define FAIL_OUT_MEMORY 0x000c00
54#define FLASH_IN_PROGRESS 0x001000
55
56#endif /* PM8001_CTL_H_INCLUDED */
57
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
new file mode 100644
index 000000000000..944afada61ee
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -0,0 +1,112 @@
1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#ifndef _PM8001_DEFS_H_
42#define _PM8001_DEFS_H_
43
44enum chip_flavors {
45 chip_8001,
46};
47#define USI_MAX_MEMCNT 9
48#define PM8001_MAX_DMA_SG SG_ALL
49enum phy_speed {
50 PHY_SPEED_15 = 0x01,
51 PHY_SPEED_30 = 0x02,
52 PHY_SPEED_60 = 0x04,
53};
54
55enum data_direction {
56 DATA_DIR_NONE = 0x0, /* NO TRANSFER */
57 DATA_DIR_IN = 0x01, /* INBOUND */
58 DATA_DIR_OUT = 0x02, /* OUTBOUND */
59 DATA_DIR_BYRECIPIENT = 0x04, /* UNSPECIFIED */
60};
61
62enum port_type {
63 PORT_TYPE_SAS = (1L << 1),
64 PORT_TYPE_SATA = (1L << 0),
65};
66
67/* driver compile-time configuration */
68#define PM8001_MAX_CCB 512 /* max ccbs supported */
69#define PM8001_MAX_INB_NUM 1
70#define PM8001_MAX_OUTB_NUM 1
71#define PM8001_CAN_QUEUE 128 /* SCSI Queue depth */
72
73/* unchangeable hardware details */
74#define PM8001_MAX_PHYS 8 /* max. possible phys */
75#define PM8001_MAX_PORTS 8 /* max. possible ports */
76#define PM8001_MAX_DEVICES 1024 /* max supported device */
77
78enum memory_region_num {
79 AAP1 = 0x0, /* application acceleration processor */
80 IOP, /* IO processor */
81 CI, /* consumer index */
82 PI, /* producer index */
83 IB, /* inbound queue */
84 OB, /* outbound queue */
85 NVMD, /* NVM device */
86 DEV_MEM, /* memory for devices */
87 CCB_MEM, /* memory for command control block */
88};
89#define PM8001_EVENT_LOG_SIZE (128 * 1024)
90
91/*error code*/
92enum mpi_err {
93 MPI_IO_STATUS_SUCCESS = 0x0,
94 MPI_IO_STATUS_BUSY = 0x01,
95 MPI_IO_STATUS_FAIL = 0x02,
96};
97
98/**
99 * Phy Control constants
100 */
101enum phy_control_type {
102 PHY_LINK_RESET = 0x01,
103 PHY_HARD_RESET = 0x02,
104 PHY_NOTIFY_ENABLE_SPINUP = 0x10,
105};
106
107enum pm8001_hba_info_flags {
108 PM8001F_INIT_TIME = (1U << 0),
109 PM8001F_RUN_TIME = (1U << 1),
110};
111
112#endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
new file mode 100644
index 000000000000..909c00ec044f
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -0,0 +1,4494 @@
1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40 #include <linux/slab.h>
41 #include "pm8001_sas.h"
42 #include "pm8001_hwi.h"
43 #include "pm8001_chips.h"
44 #include "pm8001_ctl.h"
45
46/**
47 * read_main_config_table - read the configure table and save it.
48 * @pm8001_ha: our hba card information
49 */
50static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha)
51{
52 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
53 pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00);
54 pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04);
55 pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08);
56 pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C);
57 pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10);
58 pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14);
59 pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18);
60 pm8001_ha->main_cfg_tbl.inbound_queue_offset =
61 pm8001_mr32(address, MAIN_IBQ_OFFSET);
62 pm8001_ha->main_cfg_tbl.outbound_queue_offset =
63 pm8001_mr32(address, MAIN_OBQ_OFFSET);
64 pm8001_ha->main_cfg_tbl.hda_mode_flag =
65 pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
66
67 /* read analog Setting offset from the configuration table */
68 pm8001_ha->main_cfg_tbl.anolog_setup_table_offset =
69 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
70
71 /* read Error Dump Offset and Length */
72 pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 =
73 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
74 pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 =
75 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
76 pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 =
77 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
78 pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 =
79 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
80}
81
82/**
83 * read_general_status_table - read the general status table and save it.
84 * @pm8001_ha: our hba card information
85 */
86static void __devinit
87read_general_status_table(struct pm8001_hba_info *pm8001_ha)
88{
89 void __iomem *address = pm8001_ha->general_stat_tbl_addr;
90 pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00);
91 pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04);
92 pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08);
93 pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C);
94 pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10);
95 pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14);
96 pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18);
97 pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C);
98 pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20);
99 pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24);
100 pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28);
101 pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C);
102 pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30);
103 pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34);
104 pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38);
105 pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C);
106 pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40);
107 pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44);
108 pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48);
109 pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C);
110 pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50);
111 pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54);
112 pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58);
113 pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C);
114 pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60);
115}
116
117/**
118 * read_inbnd_queue_table - read the inbound queue table and save it.
119 * @pm8001_ha: our hba card information
120 */
121static void __devinit
122read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
123{
124 int inbQ_num = 1;
125 int i;
126 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
127 for (i = 0; i < inbQ_num; i++) {
128 u32 offset = i * 0x20;
129 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
130 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
131 pm8001_ha->inbnd_q_tbl[i].pi_offset =
132 pm8001_mr32(address, (offset + 0x18));
133 }
134}
135
136/**
137 * read_outbnd_queue_table - read the outbound queue table and save it.
138 * @pm8001_ha: our hba card information
139 */
140static void __devinit
141read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
142{
143 int outbQ_num = 1;
144 int i;
145 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
146 for (i = 0; i < outbQ_num; i++) {
147 u32 offset = i * 0x24;
148 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
149 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
150 pm8001_ha->outbnd_q_tbl[i].ci_offset =
151 pm8001_mr32(address, (offset + 0x18));
152 }
153}
154
155/**
156 * init_default_table_values - init the default table.
157 * @pm8001_ha: our hba card information
158 */
159static void __devinit
160init_default_table_values(struct pm8001_hba_info *pm8001_ha)
161{
162 int qn = 1;
163 int i;
164 u32 offsetib, offsetob;
165 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
166 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
167
168 pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0;
169 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0;
170 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0;
171 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0;
172 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0;
173 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0;
174 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0;
175 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
176 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
177 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0;
178 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0;
179
180 pm8001_ha->main_cfg_tbl.upper_event_log_addr =
181 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
182 pm8001_ha->main_cfg_tbl.lower_event_log_addr =
183 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
184 pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE;
185 pm8001_ha->main_cfg_tbl.event_log_option = 0x01;
186 pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr =
187 pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
188 pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr =
189 pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
190 pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE;
191 pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01;
192 pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01;
193 for (i = 0; i < qn; i++) {
194 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
195 0x00000100 | (0x00000040 << 16) | (0x00<<30);
196 pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
197 pm8001_ha->memoryMap.region[IB].phys_addr_hi;
198 pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
199 pm8001_ha->memoryMap.region[IB].phys_addr_lo;
200 pm8001_ha->inbnd_q_tbl[i].base_virt =
201 (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr;
202 pm8001_ha->inbnd_q_tbl[i].total_length =
203 pm8001_ha->memoryMap.region[IB].total_len;
204 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
205 pm8001_ha->memoryMap.region[CI].phys_addr_hi;
206 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
207 pm8001_ha->memoryMap.region[CI].phys_addr_lo;
208 pm8001_ha->inbnd_q_tbl[i].ci_virt =
209 pm8001_ha->memoryMap.region[CI].virt_ptr;
210 offsetib = i * 0x20;
211 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
212 get_pci_bar_index(pm8001_mr32(addressib,
213 (offsetib + 0x14)));
214 pm8001_ha->inbnd_q_tbl[i].pi_offset =
215 pm8001_mr32(addressib, (offsetib + 0x18));
216 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
217 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
218 }
219 for (i = 0; i < qn; i++) {
220 pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
221 256 | (64 << 16) | (1<<30);
222 pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
223 pm8001_ha->memoryMap.region[OB].phys_addr_hi;
224 pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
225 pm8001_ha->memoryMap.region[OB].phys_addr_lo;
226 pm8001_ha->outbnd_q_tbl[i].base_virt =
227 (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr;
228 pm8001_ha->outbnd_q_tbl[i].total_length =
229 pm8001_ha->memoryMap.region[OB].total_len;
230 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
231 pm8001_ha->memoryMap.region[PI].phys_addr_hi;
232 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
233 pm8001_ha->memoryMap.region[PI].phys_addr_lo;
234 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay =
235 0 | (10 << 16) | (0 << 24);
236 pm8001_ha->outbnd_q_tbl[i].pi_virt =
237 pm8001_ha->memoryMap.region[PI].virt_ptr;
238 offsetob = i * 0x24;
239 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
240 get_pci_bar_index(pm8001_mr32(addressob,
241 offsetob + 0x14));
242 pm8001_ha->outbnd_q_tbl[i].ci_offset =
243 pm8001_mr32(addressob, (offsetob + 0x18));
244 pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
245 pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
246 }
247}
248
249/**
250 * update_main_config_table - update the main default table to the HBA.
251 * @pm8001_ha: our hba card information
252 */
253static void __devinit
254update_main_config_table(struct pm8001_hba_info *pm8001_ha)
255{
256 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
257 pm8001_mw32(address, 0x24,
258 pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd);
259 pm8001_mw32(address, 0x28,
260 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3);
261 pm8001_mw32(address, 0x2C,
262 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7);
263 pm8001_mw32(address, 0x30,
264 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3);
265 pm8001_mw32(address, 0x34,
266 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7);
267 pm8001_mw32(address, 0x38,
268 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3);
269 pm8001_mw32(address, 0x3C,
270 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7);
271 pm8001_mw32(address, 0x40,
272 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3);
273 pm8001_mw32(address, 0x44,
274 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7);
275 pm8001_mw32(address, 0x48,
276 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3);
277 pm8001_mw32(address, 0x4C,
278 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7);
279 pm8001_mw32(address, 0x50,
280 pm8001_ha->main_cfg_tbl.upper_event_log_addr);
281 pm8001_mw32(address, 0x54,
282 pm8001_ha->main_cfg_tbl.lower_event_log_addr);
283 pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size);
284 pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option);
285 pm8001_mw32(address, 0x60,
286 pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr);
287 pm8001_mw32(address, 0x64,
288 pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr);
289 pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size);
290 pm8001_mw32(address, 0x6C,
291 pm8001_ha->main_cfg_tbl.iop_event_log_option);
292 pm8001_mw32(address, 0x70,
293 pm8001_ha->main_cfg_tbl.fatal_err_interrupt);
294}
295
296/**
297 * update_inbnd_queue_table - update the inbound queue table to the HBA.
298 * @pm8001_ha: our hba card information
299 */
300static void __devinit
301update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
302{
303 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
304 u16 offset = number * 0x20;
305 pm8001_mw32(address, offset + 0x00,
306 pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
307 pm8001_mw32(address, offset + 0x04,
308 pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
309 pm8001_mw32(address, offset + 0x08,
310 pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
311 pm8001_mw32(address, offset + 0x0C,
312 pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
313 pm8001_mw32(address, offset + 0x10,
314 pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
315}
316
317/**
318 * update_outbnd_queue_table - update the outbound queue table to the HBA.
319 * @pm8001_ha: our hba card information
320 */
321static void __devinit
322update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
323{
324 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
325 u16 offset = number * 0x24;
326 pm8001_mw32(address, offset + 0x00,
327 pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
328 pm8001_mw32(address, offset + 0x04,
329 pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
330 pm8001_mw32(address, offset + 0x08,
331 pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
332 pm8001_mw32(address, offset + 0x0C,
333 pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
334 pm8001_mw32(address, offset + 0x10,
335 pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
336 pm8001_mw32(address, offset + 0x1C,
337 pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
338}
339
340/**
341 * bar4_shift - function is called to shift BAR base address
342 * @pm8001_ha : our hba card infomation
343 * @shiftValue : shifting value in memory bar.
344 */
345static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
346{
347 u32 regVal;
348 u32 max_wait_count;
349
350 /* program the inbound AXI translation Lower Address */
351 pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
352
353 /* confirm the setting is written */
354 max_wait_count = 1 * 1000 * 1000; /* 1 sec */
355 do {
356 udelay(1);
357 regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
358 } while ((regVal != shiftValue) && (--max_wait_count));
359
360 if (!max_wait_count) {
361 PM8001_INIT_DBG(pm8001_ha,
362 pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
363 " = 0x%x\n", regVal));
364 return -1;
365 }
366 return 0;
367}
368
369/**
370 * mpi_set_phys_g3_with_ssc
371 * @pm8001_ha: our hba card information
372 * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc.
373 */
374static void __devinit
375mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
376{
377 u32 value, offset, i;
378
379#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
380#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
381#define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074
382#define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074
383#define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12
384#define PHY_G3_WITH_SSC_BIT_SHIFT 13
385#define SNW3_PHY_CAPABILITIES_PARITY 31
386
387 /*
388 * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
389 * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
390 */
391 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
392 return;
393
394 for (i = 0; i < 4; i++) {
395 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
396 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
397 }
398 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
399 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
400 return;
401 for (i = 4; i < 8; i++) {
402 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
403 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
404 }
405 /*************************************************************
406 Change the SSC upspreading value to 0x0 so that upspreading is disabled.
407 Device MABC SMOD0 Controls
408 Address: (via MEMBASE-III):
409 Using shifted destination address 0x0_0000: with Offset 0xD8
410
411 31:28 R/W Reserved Do not change
412 27:24 R/W SAS_SMOD_SPRDUP 0000
413 23:20 R/W SAS_SMOD_SPRDDN 0000
414 19:0 R/W Reserved Do not change
415 Upon power-up this register will read as 0x8990c016,
416 and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
417 so that the written value will be 0x8090c016.
418 This will ensure only down-spreading SSC is enabled on the SPC.
419 *************************************************************/
420 value = pm8001_cr32(pm8001_ha, 2, 0xd8);
421 pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
422
423 /*set the shifted destination address to 0x0 to avoid error operation */
424 bar4_shift(pm8001_ha, 0x0);
425 return;
426}
427
428/**
429 * mpi_set_open_retry_interval_reg
430 * @pm8001_ha: our hba card information
431 * @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us.
432 */
433static void __devinit
434mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
435 u32 interval)
436{
437 u32 offset;
438 u32 value;
439 u32 i;
440
441#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
442#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
443#define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4
444#define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4
445#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
446
447 value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
448 /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
449 if (-1 == bar4_shift(pm8001_ha,
450 OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR))
451 return;
452 for (i = 0; i < 4; i++) {
453 offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
454 pm8001_cw32(pm8001_ha, 2, offset, value);
455 }
456
457 if (-1 == bar4_shift(pm8001_ha,
458 OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR))
459 return;
460 for (i = 4; i < 8; i++) {
461 offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
462 pm8001_cw32(pm8001_ha, 2, offset, value);
463 }
464 /*set the shifted destination address to 0x0 to avoid error operation */
465 bar4_shift(pm8001_ha, 0x0);
466 return;
467}
468
469/**
470 * mpi_init_check - check firmware initialization status.
471 * @pm8001_ha: our hba card information
472 */
473static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
474{
475 u32 max_wait_count;
476 u32 value;
477 u32 gst_len_mpistate;
478 /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
479 table is updated */
480 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE);
481 /* wait until Inbound DoorBell Clear Register toggled */
482 max_wait_count = 1 * 1000 * 1000;/* 1 sec */
483 do {
484 udelay(1);
485 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
486 value &= SPC_MSGU_CFG_TABLE_UPDATE;
487 } while ((value != 0) && (--max_wait_count));
488
489 if (!max_wait_count)
490 return -1;
491 /* check the MPI-State for initialization */
492 gst_len_mpistate =
493 pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
494 GST_GSTLEN_MPIS_OFFSET);
495 if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK))
496 return -1;
497 /* check MPI Initialization error */
498 gst_len_mpistate = gst_len_mpistate >> 16;
499 if (0x0000 != gst_len_mpistate)
500 return -1;
501 return 0;
502}
503
504/**
505 * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
506 * @pm8001_ha: our hba card information
507 */
508static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
509{
510 u32 value, value1;
511 u32 max_wait_count;
512 /* check error state */
513 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
514 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
515 /* check AAP error */
516 if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) {
517 /* error state */
518 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
519 return -1;
520 }
521
522 /* check IOP error */
523 if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) {
524 /* error state */
525 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
526 return -1;
527 }
528
529 /* bit 4-31 of scratch pad1 should be zeros if it is not
530 in error state*/
531 if (value & SCRATCH_PAD1_STATE_MASK) {
532 /* error case */
533 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
534 return -1;
535 }
536
537 /* bit 2, 4-31 of scratch pad2 should be zeros if it is not
538 in error state */
539 if (value1 & SCRATCH_PAD2_STATE_MASK) {
540 /* error case */
541 return -1;
542 }
543
544 max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */
545
546 /* wait until scratch pad 1 and 2 registers in ready state */
547 do {
548 udelay(1);
549 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
550 & SCRATCH_PAD1_RDY;
551 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
552 & SCRATCH_PAD2_RDY;
553 if ((--max_wait_count) == 0)
554 return -1;
555 } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY));
556 return 0;
557}
558
559static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
560{
561 void __iomem *base_addr;
562 u32 value;
563 u32 offset;
564 u32 pcibar;
565 u32 pcilogic;
566
567 value = pm8001_cr32(pm8001_ha, 0, 0x44);
568 offset = value & 0x03FFFFFF;
569 PM8001_INIT_DBG(pm8001_ha,
570 pm8001_printk("Scratchpad 0 Offset: %x \n", offset));
571 pcilogic = (value & 0xFC000000) >> 26;
572 pcibar = get_pci_bar_index(pcilogic);
573 PM8001_INIT_DBG(pm8001_ha,
574 pm8001_printk("Scratchpad 0 PCI BAR: %d \n", pcibar));
575 pm8001_ha->main_cfg_tbl_addr = base_addr =
576 pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
577 pm8001_ha->general_stat_tbl_addr =
578 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18);
579 pm8001_ha->inbnd_q_tbl_addr =
580 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C);
581 pm8001_ha->outbnd_q_tbl_addr =
582 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20);
583}
584
585/**
586 * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
587 * @pm8001_ha: our hba card information
588 */
589static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
590{
591 /* check the firmware status */
592 if (-1 == check_fw_ready(pm8001_ha)) {
593 PM8001_FAIL_DBG(pm8001_ha,
594 pm8001_printk("Firmware is not ready!\n"));
595 return -EBUSY;
596 }
597
598 /* Initialize pci space address eg: mpi offset */
599 init_pci_device_addresses(pm8001_ha);
600 init_default_table_values(pm8001_ha);
601 read_main_config_table(pm8001_ha);
602 read_general_status_table(pm8001_ha);
603 read_inbnd_queue_table(pm8001_ha);
604 read_outbnd_queue_table(pm8001_ha);
605 /* update main config table ,inbound table and outbound table */
606 update_main_config_table(pm8001_ha);
607 update_inbnd_queue_table(pm8001_ha, 0);
608 update_outbnd_queue_table(pm8001_ha, 0);
609 mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
610 mpi_set_open_retry_interval_reg(pm8001_ha, 7);
611 /* notify firmware update finished and check initialization status */
612 if (0 == mpi_init_check(pm8001_ha)) {
613 PM8001_INIT_DBG(pm8001_ha,
614 pm8001_printk("MPI initialize successful!\n"));
615 } else
616 return -EBUSY;
617 /*This register is a 16-bit timer with a resolution of 1us. This is the
618 timer used for interrupt delay/coalescing in the PCIe Application Layer.
619 Zero is not a valid value. A value of 1 in the register will cause the
620 interrupts to be normal. A value greater than 1 will cause coalescing
621 delays.*/
622 pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1);
623 pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0);
624 return 0;
625}
626
627static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
628{
629 u32 max_wait_count;
630 u32 value;
631 u32 gst_len_mpistate;
632 init_pci_device_addresses(pm8001_ha);
633 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
634 table is stop */
635 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET);
636
637 /* wait until Inbound DoorBell Clear Register toggled */
638 max_wait_count = 1 * 1000 * 1000;/* 1 sec */
639 do {
640 udelay(1);
641 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
642 value &= SPC_MSGU_CFG_TABLE_RESET;
643 } while ((value != 0) && (--max_wait_count));
644
645 if (!max_wait_count) {
646 PM8001_FAIL_DBG(pm8001_ha,
647 pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value));
648 return -1;
649 }
650
651 /* check the MPI-State for termination in progress */
652 /* wait until Inbound DoorBell Clear Register toggled */
653 max_wait_count = 1 * 1000 * 1000; /* 1 sec */
654 do {
655 udelay(1);
656 gst_len_mpistate =
657 pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
658 GST_GSTLEN_MPIS_OFFSET);
659 if (GST_MPI_STATE_UNINIT ==
660 (gst_len_mpistate & GST_MPI_STATE_MASK))
661 break;
662 } while (--max_wait_count);
663 if (!max_wait_count) {
664 PM8001_FAIL_DBG(pm8001_ha,
665 pm8001_printk(" TIME OUT MPI State = 0x%x\n",
666 gst_len_mpistate & GST_MPI_STATE_MASK));
667 return -1;
668 }
669 return 0;
670}
671
672/**
673 * soft_reset_ready_check - Function to check FW is ready for soft reset.
674 * @pm8001_ha: our hba card information
675 */
676static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
677{
678 u32 regVal, regVal1, regVal2;
679 if (mpi_uninit_check(pm8001_ha) != 0) {
680 PM8001_FAIL_DBG(pm8001_ha,
681 pm8001_printk("MPI state is not ready\n"));
682 return -1;
683 }
684 /* read the scratch pad 2 register bit 2 */
685 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
686 & SCRATCH_PAD2_FWRDY_RST;
687 if (regVal == SCRATCH_PAD2_FWRDY_RST) {
688 PM8001_INIT_DBG(pm8001_ha,
689 pm8001_printk("Firmware is ready for reset .\n"));
690 } else {
691 /* Trigger NMI twice via RB6 */
692 if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
693 PM8001_FAIL_DBG(pm8001_ha,
694 pm8001_printk("Shift Bar4 to 0x%x failed\n",
695 RB6_ACCESS_REG));
696 return -1;
697 }
698 pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET,
699 RB6_MAGIC_NUMBER_RST);
700 pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST);
701 /* wait for 100 ms */
702 mdelay(100);
703 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) &
704 SCRATCH_PAD2_FWRDY_RST;
705 if (regVal != SCRATCH_PAD2_FWRDY_RST) {
706 regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
707 regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
708 PM8001_FAIL_DBG(pm8001_ha,
709 pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1"
710 "=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
711 regVal1, regVal2));
712 PM8001_FAIL_DBG(pm8001_ha,
713 pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
714 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
715 PM8001_FAIL_DBG(pm8001_ha,
716 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
717 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
718 return -1;
719 }
720 }
721 return 0;
722}
723
724/**
725 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
726 * the FW register status to the originated status.
727 * @pm8001_ha: our hba card information
728 * @signature: signature in host scratch pad0 register.
729 */
730static int
731pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
732{
733 u32 regVal, toggleVal;
734 u32 max_wait_count;
735 u32 regVal1, regVal2, regVal3;
736
737 /* step1: Check FW is ready for soft reset */
738 if (soft_reset_ready_check(pm8001_ha) != 0) {
739 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n"));
740 return -1;
741 }
742
743 /* step 2: clear NMI status register on AAP1 and IOP, write the same
744 value to clear */
745 /* map 0x60000 to BAR4(0x20), BAR2(win) */
746 if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
747 PM8001_FAIL_DBG(pm8001_ha,
748 pm8001_printk("Shift Bar4 to 0x%x failed\n",
749 MBIC_AAP1_ADDR_BASE));
750 return -1;
751 }
752 regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP);
753 PM8001_INIT_DBG(pm8001_ha,
754 pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
755 pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
756 /* map 0x70000 to BAR4(0x20), BAR2(win) */
757 if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
758 PM8001_FAIL_DBG(pm8001_ha,
759 pm8001_printk("Shift Bar4 to 0x%x failed\n",
760 MBIC_IOP_ADDR_BASE));
761 return -1;
762 }
763 regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1);
764 PM8001_INIT_DBG(pm8001_ha,
765 pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal));
766 pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0);
767
768 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE);
769 PM8001_INIT_DBG(pm8001_ha,
770 pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal));
771 pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0);
772
773 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT);
774 PM8001_INIT_DBG(pm8001_ha,
775 pm8001_printk("PCIE - Event Interrupt = 0x%x\n", regVal));
776 pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal);
777
778 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE);
779 PM8001_INIT_DBG(pm8001_ha,
780 pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal));
781 pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0);
782
783 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT);
784 PM8001_INIT_DBG(pm8001_ha,
785 pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal));
786 pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal);
787
788 /* read the scratch pad 1 register bit 2 */
789 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
790 & SCRATCH_PAD1_RST;
791 toggleVal = regVal ^ SCRATCH_PAD1_RST;
792
793 /* set signature in host scratch pad0 register to tell SPC that the
794 host performs the soft reset */
795 pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature);
796
797 /* read required registers for confirmming */
798 /* map 0x0700000 to BAR4(0x20), BAR2(win) */
799 if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
800 PM8001_FAIL_DBG(pm8001_ha,
801 pm8001_printk("Shift Bar4 to 0x%x failed\n",
802 GSM_ADDR_BASE));
803 return -1;
804 }
805 PM8001_INIT_DBG(pm8001_ha,
806 pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and"
807 " Reset = 0x%x\n",
808 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
809
810 /* step 3: host read GSM Configuration and Reset register */
811 regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
812 /* Put those bits to low */
813 /* GSM XCBI offset = 0x70 0000
814 0x00 Bit 13 COM_SLV_SW_RSTB 1
815 0x00 Bit 12 QSSP_SW_RSTB 1
816 0x00 Bit 11 RAAE_SW_RSTB 1
817 0x00 Bit 9 RB_1_SW_RSTB 1
818 0x00 Bit 8 SM_SW_RSTB 1
819 */
820 regVal &= ~(0x00003b00);
821 /* host write GSM Configuration and Reset register */
822 pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
823 PM8001_INIT_DBG(pm8001_ha,
824 pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM "
825 "Configuration and Reset is set to = 0x%x\n",
826 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
827
828 /* step 4: */
829 /* disable GSM - Read Address Parity Check */
830 regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
831 PM8001_INIT_DBG(pm8001_ha,
832 pm8001_printk("GSM 0x700038 - Read Address Parity Check "
833 "Enable = 0x%x\n", regVal1));
834 pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0);
835 PM8001_INIT_DBG(pm8001_ha,
836 pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
837 "is set to = 0x%x\n",
838 pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
839
840 /* disable GSM - Write Address Parity Check */
841 regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
842 PM8001_INIT_DBG(pm8001_ha,
843 pm8001_printk("GSM 0x700040 - Write Address Parity Check"
844 " Enable = 0x%x\n", regVal2));
845 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0);
846 PM8001_INIT_DBG(pm8001_ha,
847 pm8001_printk("GSM 0x700040 - Write Address Parity Check "
848 "Enable is set to = 0x%x\n",
849 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
850
851 /* disable GSM - Write Data Parity Check */
852 regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
853 PM8001_INIT_DBG(pm8001_ha,
854 pm8001_printk("GSM 0x300048 - Write Data Parity Check"
855 " Enable = 0x%x\n", regVal3));
856 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0);
857 PM8001_INIT_DBG(pm8001_ha,
858 pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable"
859 "is set to = 0x%x\n",
860 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
861
862 /* step 5: delay 10 usec */
863 udelay(10);
864 /* step 5-b: set GPIO-0 output control to tristate anyway */
865 if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
866 PM8001_INIT_DBG(pm8001_ha,
867 pm8001_printk("Shift Bar4 to 0x%x failed\n",
868 GPIO_ADDR_BASE));
869 return -1;
870 }
871 regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
872 PM8001_INIT_DBG(pm8001_ha,
873 pm8001_printk("GPIO Output Control Register:"
874 " = 0x%x\n", regVal));
875 /* set GPIO-0 output control to tri-state */
876 regVal &= 0xFFFFFFFC;
877 pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
878
879 /* Step 6: Reset the IOP and AAP1 */
880 /* map 0x00000 to BAR4(0x20), BAR2(win) */
881 if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
882 PM8001_FAIL_DBG(pm8001_ha,
883 pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
884 SPC_TOP_LEVEL_ADDR_BASE));
885 return -1;
886 }
887 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
888 PM8001_INIT_DBG(pm8001_ha,
889 pm8001_printk("Top Register before resetting IOP/AAP1"
890 ":= 0x%x\n", regVal));
891 regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
892 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
893
894 /* step 7: Reset the BDMA/OSSP */
895 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
896 PM8001_INIT_DBG(pm8001_ha,
897 pm8001_printk("Top Register before resetting BDMA/OSSP"
898 ": = 0x%x\n", regVal));
899 regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
900 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
901
902 /* step 8: delay 10 usec */
903 udelay(10);
904
905 /* step 9: bring the BDMA and OSSP out of reset */
906 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
907 PM8001_INIT_DBG(pm8001_ha,
908 pm8001_printk("Top Register before bringing up BDMA/OSSP"
909 ":= 0x%x\n", regVal));
910 regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
911 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
912
913 /* step 10: delay 10 usec */
914 udelay(10);
915
916 /* step 11: reads and sets the GSM Configuration and Reset Register */
917 /* map 0x0700000 to BAR4(0x20), BAR2(win) */
918 if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
919 PM8001_FAIL_DBG(pm8001_ha,
920 pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
921 GSM_ADDR_BASE));
922 return -1;
923 }
924 PM8001_INIT_DBG(pm8001_ha,
925 pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and "
926 "Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
927 regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
928 /* Put those bits to high */
929 /* GSM XCBI offset = 0x70 0000
930 0x00 Bit 13 COM_SLV_SW_RSTB 1
931 0x00 Bit 12 QSSP_SW_RSTB 1
932 0x00 Bit 11 RAAE_SW_RSTB 1
933 0x00 Bit 9 RB_1_SW_RSTB 1
934 0x00 Bit 8 SM_SW_RSTB 1
935 */
936 regVal |= (GSM_CONFIG_RESET_VALUE);
937 pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
938 PM8001_INIT_DBG(pm8001_ha,
939 pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM"
940 " Configuration and Reset is set to = 0x%x\n",
941 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
942
943 /* step 12: Restore GSM - Read Address Parity Check */
944 regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
945 /* just for debugging */
946 PM8001_INIT_DBG(pm8001_ha,
947 pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
948 " = 0x%x\n", regVal));
949 pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1);
950 PM8001_INIT_DBG(pm8001_ha,
951 pm8001_printk("GSM 0x700038 - Read Address Parity"
952 " Check Enable is set to = 0x%x\n",
953 pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
954 /* Restore GSM - Write Address Parity Check */
955 regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
956 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2);
957 PM8001_INIT_DBG(pm8001_ha,
958 pm8001_printk("GSM 0x700040 - Write Address Parity Check"
959 " Enable is set to = 0x%x\n",
960 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
961 /* Restore GSM - Write Data Parity Check */
962 regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
963 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3);
964 PM8001_INIT_DBG(pm8001_ha,
965 pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable"
966 "is set to = 0x%x\n",
967 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
968
969 /* step 13: bring the IOP and AAP1 out of reset */
970 /* map 0x00000 to BAR4(0x20), BAR2(win) */
971 if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
972 PM8001_FAIL_DBG(pm8001_ha,
973 pm8001_printk("Shift Bar4 to 0x%x failed\n",
974 SPC_TOP_LEVEL_ADDR_BASE));
975 return -1;
976 }
977 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
978 regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
979 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
980
981 /* step 14: delay 10 usec - Normal Mode */
982 udelay(10);
983 /* check Soft Reset Normal mode or Soft Reset HDA mode */
984 if (signature == SPC_SOFT_RESET_SIGNATURE) {
985 /* step 15 (Normal Mode): wait until scratch pad1 register
986 bit 2 toggled */
987 max_wait_count = 2 * 1000 * 1000;/* 2 sec */
988 do {
989 udelay(1);
990 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
991 SCRATCH_PAD1_RST;
992 } while ((regVal != toggleVal) && (--max_wait_count));
993
994 if (!max_wait_count) {
995 regVal = pm8001_cr32(pm8001_ha, 0,
996 MSGU_SCRATCH_PAD_1);
997 PM8001_FAIL_DBG(pm8001_ha,
998 pm8001_printk("TIMEOUT : ToggleVal 0x%x,"
999 "MSGU_SCRATCH_PAD1 = 0x%x\n",
1000 toggleVal, regVal));
1001 PM8001_FAIL_DBG(pm8001_ha,
1002 pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
1003 pm8001_cr32(pm8001_ha, 0,
1004 MSGU_SCRATCH_PAD_0)));
1005 PM8001_FAIL_DBG(pm8001_ha,
1006 pm8001_printk("SCRATCH_PAD2 value = 0x%x\n",
1007 pm8001_cr32(pm8001_ha, 0,
1008 MSGU_SCRATCH_PAD_2)));
1009 PM8001_FAIL_DBG(pm8001_ha,
1010 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
1011 pm8001_cr32(pm8001_ha, 0,
1012 MSGU_SCRATCH_PAD_3)));
1013 return -1;
1014 }
1015
1016 /* step 16 (Normal) - Clear ODMR and ODCR */
1017 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
1018 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
1019
1020 /* step 17 (Normal Mode): wait for the FW and IOP to get
1021 ready - 1 sec timeout */
1022 /* Wait for the SPC Configuration Table to be ready */
1023 if (check_fw_ready(pm8001_ha) == -1) {
1024 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
1025 /* return error if MPI Configuration Table not ready */
1026 PM8001_INIT_DBG(pm8001_ha,
1027 pm8001_printk("FW not ready SCRATCH_PAD1"
1028 " = 0x%x\n", regVal));
1029 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
1030 /* return error if MPI Configuration Table not ready */
1031 PM8001_INIT_DBG(pm8001_ha,
1032 pm8001_printk("FW not ready SCRATCH_PAD2"
1033 " = 0x%x\n", regVal));
1034 PM8001_INIT_DBG(pm8001_ha,
1035 pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
1036 pm8001_cr32(pm8001_ha, 0,
1037 MSGU_SCRATCH_PAD_0)));
1038 PM8001_INIT_DBG(pm8001_ha,
1039 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
1040 pm8001_cr32(pm8001_ha, 0,
1041 MSGU_SCRATCH_PAD_3)));
1042 return -1;
1043 }
1044 }
1045
1046 PM8001_INIT_DBG(pm8001_ha,
1047 pm8001_printk("SPC soft reset Complete\n"));
1048 return 0;
1049}
1050
1051static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
1052{
1053 u32 i;
1054 u32 regVal;
1055 PM8001_INIT_DBG(pm8001_ha,
1056 pm8001_printk("chip reset start\n"));
1057
1058 /* do SPC chip reset. */
1059 regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
1060 regVal &= ~(SPC_REG_RESET_DEVICE);
1061 pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
1062
1063 /* delay 10 usec */
1064 udelay(10);
1065
1066 /* bring chip reset out of reset */
1067 regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
1068 regVal |= SPC_REG_RESET_DEVICE;
1069 pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
1070
1071 /* delay 10 usec */
1072 udelay(10);
1073
1074 /* wait for 20 msec until the firmware gets reloaded */
1075 i = 20;
1076 do {
1077 mdelay(1);
1078 } while ((--i) != 0);
1079
1080 PM8001_INIT_DBG(pm8001_ha,
1081 pm8001_printk("chip reset finished\n"));
1082}
1083
1084/**
1085 * pm8001_chip_iounmap - which maped when initilized.
1086 * @pm8001_ha: our hba card information
1087 */
1088static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
1089{
1090 s8 bar, logical = 0;
1091 for (bar = 0; bar < 6; bar++) {
1092 /*
1093 ** logical BARs for SPC:
1094 ** bar 0 and 1 - logical BAR0
1095 ** bar 2 and 3 - logical BAR1
1096 ** bar4 - logical BAR2
1097 ** bar5 - logical BAR3
1098 ** Skip the appropriate assignments:
1099 */
1100 if ((bar == 1) || (bar == 3))
1101 continue;
1102 if (pm8001_ha->io_mem[logical].memvirtaddr) {
1103 iounmap(pm8001_ha->io_mem[logical].memvirtaddr);
1104 logical++;
1105 }
1106 }
1107}
1108
1109/**
1110 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
1111 * @pm8001_ha: our hba card information
1112 */
1113static void
1114pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
1115{
1116 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
1117 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
1118}
1119
1120 /**
1121 * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
1122 * @pm8001_ha: our hba card information
1123 */
1124static void
1125pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
1126{
1127 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
1128}
1129
1130/**
1131 * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
1132 * @pm8001_ha: our hba card information
1133 */
1134static void
1135pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
1136 u32 int_vec_idx)
1137{
1138 u32 msi_index;
1139 u32 value;
1140 msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
1141 msi_index += MSIX_TABLE_BASE;
1142 pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE);
1143 value = (1 << int_vec_idx);
1144 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value);
1145
1146}
1147
1148/**
1149 * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt
1150 * @pm8001_ha: our hba card information
1151 */
1152static void
1153pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
1154 u32 int_vec_idx)
1155{
1156 u32 msi_index;
1157 msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
1158 msi_index += MSIX_TABLE_BASE;
1159 pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE);
1160
1161}
1162/**
1163 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
1164 * @pm8001_ha: our hba card information
1165 */
1166static void
1167pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
1168{
1169#ifdef PM8001_USE_MSIX
1170 pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
1171 return;
1172#endif
1173 pm8001_chip_intx_interrupt_enable(pm8001_ha);
1174
1175}
1176
1177/**
1178 * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
1179 * @pm8001_ha: our hba card information
1180 */
1181static void
1182pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
1183{
1184#ifdef PM8001_USE_MSIX
1185 pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
1186 return;
1187#endif
1188 pm8001_chip_intx_interrupt_disable(pm8001_ha);
1189
1190}
1191
1192/**
1193 * mpi_msg_free_get- get the free message buffer for transfer inbound queue.
1194 * @circularQ: the inbound queue we want to transfer to HBA.
1195 * @messageSize: the message size of this transfer, normally it is 64 bytes
1196 * @messagePtr: the pointer to message.
1197 */
1198static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1199 u16 messageSize, void **messagePtr)
1200{
1201 u32 offset, consumer_index;
1202 struct mpi_msg_hdr *msgHeader;
1203 u8 bcCount = 1; /* only support single buffer */
1204
1205 /* Checks is the requested message size can be allocated in this queue*/
1206 if (messageSize > 64) {
1207 *messagePtr = NULL;
1208 return -1;
1209 }
1210
1211 /* Stores the new consumer index */
1212 consumer_index = pm8001_read_32(circularQ->ci_virt);
1213 circularQ->consumer_index = cpu_to_le32(consumer_index);
1214 if (((circularQ->producer_idx + bcCount) % 256) ==
1215 circularQ->consumer_index) {
1216 *messagePtr = NULL;
1217 return -1;
1218 }
1219 /* get memory IOMB buffer address */
1220 offset = circularQ->producer_idx * 64;
1221 /* increment to next bcCount element */
1222 circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256;
1223 /* Adds that distance to the base of the region virtual address plus
1224 the message header size*/
1225 msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset);
1226 *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr);
1227 return 0;
1228}
1229
1230/**
1231 * mpi_build_cmd- build the message queue for transfer, update the PI to FW
1232 * to tell the fw to get this message from IOMB.
1233 * @pm8001_ha: our hba card information
1234 * @circularQ: the inbound queue we want to transfer to HBA.
1235 * @opCode: the operation code represents commands which LLDD and fw recognized.
1236 * @payload: the command payload of each operation command.
1237 */
1238static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
1239 struct inbound_queue_table *circularQ,
1240 u32 opCode, void *payload)
1241{
1242 u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
1243 u32 responseQueue = 0;
1244 void *pMessage;
1245
1246 if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
1247 PM8001_IO_DBG(pm8001_ha,
1248 pm8001_printk("No free mpi buffer \n"));
1249 return -1;
1250 }
1251 BUG_ON(!payload);
1252 /*Copy to the payload*/
1253 memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
1254
1255 /*Build the header*/
1256 Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
1257 | ((responseQueue & 0x3F) << 16)
1258 | ((category & 0xF) << 12) | (opCode & 0xFFF));
1259
1260 pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header));
1261 /*Update the PI to the firmware*/
1262 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
1263 circularQ->pi_offset, circularQ->producer_idx);
1264 PM8001_IO_DBG(pm8001_ha,
1265 pm8001_printk("after PI= %d CI= %d \n", circularQ->producer_idx,
1266 circularQ->consumer_index));
1267 return 0;
1268}
1269
1270static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1271 struct outbound_queue_table *circularQ, u8 bc)
1272{
1273 u32 producer_index;
1274 struct mpi_msg_hdr *msgHeader;
1275 struct mpi_msg_hdr *pOutBoundMsgHeader;
1276
1277 msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
1278 pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
1279 circularQ->consumer_idx * 64);
1280 if (pOutBoundMsgHeader != msgHeader) {
1281 PM8001_FAIL_DBG(pm8001_ha,
1282 pm8001_printk("consumer_idx = %d msgHeader = %p\n",
1283 circularQ->consumer_idx, msgHeader));
1284
1285 /* Update the producer index from SPC */
1286 producer_index = pm8001_read_32(circularQ->pi_virt);
1287 circularQ->producer_index = cpu_to_le32(producer_index);
1288 PM8001_FAIL_DBG(pm8001_ha,
1289 pm8001_printk("consumer_idx = %d producer_index = %d"
1290 "msgHeader = %p\n", circularQ->consumer_idx,
1291 circularQ->producer_index, msgHeader));
1292 return 0;
1293 }
1294 /* free the circular queue buffer elements associated with the message*/
1295 circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256;
1296 /* update the CI of outbound queue */
1297 pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
1298 circularQ->consumer_idx);
1299 /* Update the producer index from SPC*/
1300 producer_index = pm8001_read_32(circularQ->pi_virt);
1301 circularQ->producer_index = cpu_to_le32(producer_index);
1302 PM8001_IO_DBG(pm8001_ha,
1303 pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx,
1304 circularQ->producer_index));
1305 return 0;
1306}
1307
1308/**
1309 * mpi_msg_consume- get the MPI message from outbound queue message table.
1310 * @pm8001_ha: our hba card information
1311 * @circularQ: the outbound queue table.
1312 * @messagePtr1: the message contents of this outbound message.
1313 * @pBC: the message size.
1314 */
1315static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1316 struct outbound_queue_table *circularQ,
1317 void **messagePtr1, u8 *pBC)
1318{
1319 struct mpi_msg_hdr *msgHeader;
1320 __le32 msgHeader_tmp;
1321 u32 header_tmp;
1322 do {
1323 /* If there are not-yet-delivered messages ... */
1324 if (circularQ->producer_index != circularQ->consumer_idx) {
1325 /*Get the pointer to the circular queue buffer element*/
1326 msgHeader = (struct mpi_msg_hdr *)
1327 (circularQ->base_virt +
1328 circularQ->consumer_idx * 64);
1329 /* read header */
1330 header_tmp = pm8001_read_32(msgHeader);
1331 msgHeader_tmp = cpu_to_le32(header_tmp);
1332 if (0 != (msgHeader_tmp & 0x80000000)) {
1333 if (OPC_OUB_SKIP_ENTRY !=
1334 (msgHeader_tmp & 0xfff)) {
1335 *messagePtr1 =
1336 ((u8 *)msgHeader) +
1337 sizeof(struct mpi_msg_hdr);
1338 *pBC = (u8)((msgHeader_tmp >> 24) &
1339 0x1f);
1340 PM8001_IO_DBG(pm8001_ha,
1341 pm8001_printk(": CI=%d PI=%d "
1342 "msgHeader=%x\n",
1343 circularQ->consumer_idx,
1344 circularQ->producer_index,
1345 msgHeader_tmp));
1346 return MPI_IO_STATUS_SUCCESS;
1347 } else {
1348 circularQ->consumer_idx =
1349 (circularQ->consumer_idx +
1350 ((msgHeader_tmp >> 24) & 0x1f))
1351 % 256;
1352 msgHeader_tmp = 0;
1353 pm8001_write_32(msgHeader, 0, 0);
1354 /* update the CI of outbound queue */
1355 pm8001_cw32(pm8001_ha,
1356 circularQ->ci_pci_bar,
1357 circularQ->ci_offset,
1358 circularQ->consumer_idx);
1359 }
1360 } else {
1361 circularQ->consumer_idx =
1362 (circularQ->consumer_idx +
1363 ((msgHeader_tmp >> 24) & 0x1f)) % 256;
1364 msgHeader_tmp = 0;
1365 pm8001_write_32(msgHeader, 0, 0);
1366 /* update the CI of outbound queue */
1367 pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar,
1368 circularQ->ci_offset,
1369 circularQ->consumer_idx);
1370 return MPI_IO_STATUS_FAIL;
1371 }
1372 } else {
1373 u32 producer_index;
1374 void *pi_virt = circularQ->pi_virt;
1375 /* Update the producer index from SPC */
1376 producer_index = pm8001_read_32(pi_virt);
1377 circularQ->producer_index = cpu_to_le32(producer_index);
1378 }
1379 } while (circularQ->producer_index != circularQ->consumer_idx);
1380 /* while we don't have any more not-yet-delivered message */
1381 /* report empty */
1382 return MPI_IO_STATUS_BUSY;
1383}
1384
1385static void pm8001_work_queue(struct work_struct *work)
1386{
1387 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1388 struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q);
1389 struct pm8001_device *pm8001_dev;
1390 struct domain_device *dev;
1391
1392 switch (wq->handler) {
1393 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1394 pm8001_dev = wq->data;
1395 dev = pm8001_dev->sas_device;
1396 pm8001_I_T_nexus_reset(dev);
1397 break;
1398 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
1399 pm8001_dev = wq->data;
1400 dev = pm8001_dev->sas_device;
1401 pm8001_I_T_nexus_reset(dev);
1402 break;
1403 case IO_DS_IN_ERROR:
1404 pm8001_dev = wq->data;
1405 dev = pm8001_dev->sas_device;
1406 pm8001_I_T_nexus_reset(dev);
1407 break;
1408 case IO_DS_NON_OPERATIONAL:
1409 pm8001_dev = wq->data;
1410 dev = pm8001_dev->sas_device;
1411 pm8001_I_T_nexus_reset(dev);
1412 break;
1413 }
1414 list_del(&wq->entry);
1415 kfree(wq);
1416}
1417
1418static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
1419 int handler)
1420{
1421 struct pm8001_wq *wq;
1422 int ret = 0;
1423
1424 wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC);
1425 if (wq) {
1426 wq->pm8001_ha = pm8001_ha;
1427 wq->data = data;
1428 wq->handler = handler;
1429 INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue);
1430 list_add_tail(&wq->entry, &pm8001_ha->wq_list);
1431 schedule_delayed_work(&wq->work_q, 0);
1432 } else
1433 ret = -ENOMEM;
1434
1435 return ret;
1436}
1437
1438/**
1439 * mpi_ssp_completion- process the event that FW response to the SSP request.
1440 * @pm8001_ha: our hba card information
1441 * @piomb: the message contents of this outbound message.
1442 *
1443 * When FW has completed a ssp request for example a IO request, after it has
1444 * filled the SG data with the data, it will trigger this event represent
1445 * that he has finished the job,please check the coresponding buffer.
1446 * So we will tell the caller who maybe waiting the result to tell upper layer
1447 * that the task has been finished.
1448 */
1449static void
1450mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1451{
1452 struct sas_task *t;
1453 struct pm8001_ccb_info *ccb;
1454 unsigned long flags;
1455 u32 status;
1456 u32 param;
1457 u32 tag;
1458 struct ssp_completion_resp *psspPayload;
1459 struct task_status_struct *ts;
1460 struct ssp_response_iu *iu;
1461 struct pm8001_device *pm8001_dev;
1462 psspPayload = (struct ssp_completion_resp *)(piomb + 4);
1463 status = le32_to_cpu(psspPayload->status);
1464 tag = le32_to_cpu(psspPayload->tag);
1465 ccb = &pm8001_ha->ccb_info[tag];
1466 pm8001_dev = ccb->device;
1467 param = le32_to_cpu(psspPayload->param);
1468
1469 t = ccb->task;
1470
1471 if (status && status != IO_UNDERFLOW)
1472 PM8001_FAIL_DBG(pm8001_ha,
1473 pm8001_printk("sas IO status 0x%x\n", status));
1474 if (unlikely(!t || !t->lldd_task || !t->dev))
1475 return;
1476 ts = &t->task_status;
1477 switch (status) {
1478 case IO_SUCCESS:
1479 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
1480 ",param = %d \n", param));
1481 if (param == 0) {
1482 ts->resp = SAS_TASK_COMPLETE;
1483 ts->stat = SAM_GOOD;
1484 } else {
1485 ts->resp = SAS_TASK_COMPLETE;
1486 ts->stat = SAS_PROTO_RESPONSE;
1487 ts->residual = param;
1488 iu = &psspPayload->ssp_resp_iu;
1489 sas_ssp_task_response(pm8001_ha->dev, t, iu);
1490 }
1491 if (pm8001_dev)
1492 pm8001_dev->running_req--;
1493 break;
1494 case IO_ABORTED:
1495 PM8001_IO_DBG(pm8001_ha,
1496 pm8001_printk("IO_ABORTED IOMB Tag \n"));
1497 ts->resp = SAS_TASK_COMPLETE;
1498 ts->stat = SAS_ABORTED_TASK;
1499 break;
1500 case IO_UNDERFLOW:
1501 /* SSP Completion with error */
1502 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW"
1503 ",param = %d \n", param));
1504 ts->resp = SAS_TASK_COMPLETE;
1505 ts->stat = SAS_DATA_UNDERRUN;
1506 ts->residual = param;
1507 if (pm8001_dev)
1508 pm8001_dev->running_req--;
1509 break;
1510 case IO_NO_DEVICE:
1511 PM8001_IO_DBG(pm8001_ha,
1512 pm8001_printk("IO_NO_DEVICE\n"));
1513 ts->resp = SAS_TASK_UNDELIVERED;
1514 ts->stat = SAS_PHY_DOWN;
1515 break;
1516 case IO_XFER_ERROR_BREAK:
1517 PM8001_IO_DBG(pm8001_ha,
1518 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1519 ts->resp = SAS_TASK_COMPLETE;
1520 ts->stat = SAS_OPEN_REJECT;
1521 break;
1522 case IO_XFER_ERROR_PHY_NOT_READY:
1523 PM8001_IO_DBG(pm8001_ha,
1524 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1525 ts->resp = SAS_TASK_COMPLETE;
1526 ts->stat = SAS_OPEN_REJECT;
1527 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1528 break;
1529 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1530 PM8001_IO_DBG(pm8001_ha,
1531 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
1532 ts->resp = SAS_TASK_COMPLETE;
1533 ts->stat = SAS_OPEN_REJECT;
1534 ts->open_rej_reason = SAS_OREJ_EPROTO;
1535 break;
1536 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1537 PM8001_IO_DBG(pm8001_ha,
1538 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1539 ts->resp = SAS_TASK_COMPLETE;
1540 ts->stat = SAS_OPEN_REJECT;
1541 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1542 break;
1543 case IO_OPEN_CNX_ERROR_BREAK:
1544 PM8001_IO_DBG(pm8001_ha,
1545 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
1546 ts->resp = SAS_TASK_COMPLETE;
1547 ts->stat = SAS_OPEN_REJECT;
1548 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1549 break;
1550 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1551 PM8001_IO_DBG(pm8001_ha,
1552 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
1553 ts->resp = SAS_TASK_COMPLETE;
1554 ts->stat = SAS_OPEN_REJECT;
1555 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1556 if (!t->uldd_task)
1557 pm8001_handle_event(pm8001_ha,
1558 pm8001_dev,
1559 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1560 break;
1561 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
1562 PM8001_IO_DBG(pm8001_ha,
1563 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
1564 ts->resp = SAS_TASK_COMPLETE;
1565 ts->stat = SAS_OPEN_REJECT;
1566 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1567 break;
1568 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
1569 PM8001_IO_DBG(pm8001_ha,
1570 pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
1571 "NOT_SUPPORTED\n"));
1572 ts->resp = SAS_TASK_COMPLETE;
1573 ts->stat = SAS_OPEN_REJECT;
1574 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1575 break;
1576 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
1577 PM8001_IO_DBG(pm8001_ha,
1578 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
1579 ts->resp = SAS_TASK_UNDELIVERED;
1580 ts->stat = SAS_OPEN_REJECT;
1581 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1582 break;
1583 case IO_XFER_ERROR_NAK_RECEIVED:
1584 PM8001_IO_DBG(pm8001_ha,
1585 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
1586 ts->resp = SAS_TASK_COMPLETE;
1587 ts->stat = SAS_OPEN_REJECT;
1588 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1589 break;
1590 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
1591 PM8001_IO_DBG(pm8001_ha,
1592 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
1593 ts->resp = SAS_TASK_COMPLETE;
1594 ts->stat = SAS_NAK_R_ERR;
1595 break;
1596 case IO_XFER_ERROR_DMA:
1597 PM8001_IO_DBG(pm8001_ha,
1598 pm8001_printk("IO_XFER_ERROR_DMA\n"));
1599 ts->resp = SAS_TASK_COMPLETE;
1600 ts->stat = SAS_OPEN_REJECT;
1601 break;
1602 case IO_XFER_OPEN_RETRY_TIMEOUT:
1603 PM8001_IO_DBG(pm8001_ha,
1604 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1605 ts->resp = SAS_TASK_COMPLETE;
1606 ts->stat = SAS_OPEN_REJECT;
1607 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1608 break;
1609 case IO_XFER_ERROR_OFFSET_MISMATCH:
1610 PM8001_IO_DBG(pm8001_ha,
1611 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
1612 ts->resp = SAS_TASK_COMPLETE;
1613 ts->stat = SAS_OPEN_REJECT;
1614 break;
1615 case IO_PORT_IN_RESET:
1616 PM8001_IO_DBG(pm8001_ha,
1617 pm8001_printk("IO_PORT_IN_RESET\n"));
1618 ts->resp = SAS_TASK_COMPLETE;
1619 ts->stat = SAS_OPEN_REJECT;
1620 break;
1621 case IO_DS_NON_OPERATIONAL:
1622 PM8001_IO_DBG(pm8001_ha,
1623 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
1624 ts->resp = SAS_TASK_COMPLETE;
1625 ts->stat = SAS_OPEN_REJECT;
1626 if (!t->uldd_task)
1627 pm8001_handle_event(pm8001_ha,
1628 pm8001_dev,
1629 IO_DS_NON_OPERATIONAL);
1630 break;
1631 case IO_DS_IN_RECOVERY:
1632 PM8001_IO_DBG(pm8001_ha,
1633 pm8001_printk("IO_DS_IN_RECOVERY\n"));
1634 ts->resp = SAS_TASK_COMPLETE;
1635 ts->stat = SAS_OPEN_REJECT;
1636 break;
1637 case IO_TM_TAG_NOT_FOUND:
1638 PM8001_IO_DBG(pm8001_ha,
1639 pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
1640 ts->resp = SAS_TASK_COMPLETE;
1641 ts->stat = SAS_OPEN_REJECT;
1642 break;
1643 case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
1644 PM8001_IO_DBG(pm8001_ha,
1645 pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
1646 ts->resp = SAS_TASK_COMPLETE;
1647 ts->stat = SAS_OPEN_REJECT;
1648 break;
1649 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
1650 PM8001_IO_DBG(pm8001_ha,
1651 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
1652 ts->resp = SAS_TASK_COMPLETE;
1653 ts->stat = SAS_OPEN_REJECT;
1654 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1655 default:
1656 PM8001_IO_DBG(pm8001_ha,
1657 pm8001_printk("Unknown status 0x%x\n", status));
1658 /* not allowed case. Therefore, return failed status */
1659 ts->resp = SAS_TASK_COMPLETE;
1660 ts->stat = SAS_OPEN_REJECT;
1661 break;
1662 }
1663 PM8001_IO_DBG(pm8001_ha,
1664 pm8001_printk("scsi_status = %x \n ",
1665 psspPayload->ssp_resp_iu.status));
1666 spin_lock_irqsave(&t->task_state_lock, flags);
1667 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1668 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
1669 t->task_state_flags |= SAS_TASK_STATE_DONE;
1670 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
1671 spin_unlock_irqrestore(&t->task_state_lock, flags);
1672 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
1673 " io_status 0x%x resp 0x%x "
1674 "stat 0x%x but aborted by upper layer!\n",
1675 t, status, ts->resp, ts->stat));
1676 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1677 } else {
1678 spin_unlock_irqrestore(&t->task_state_lock, flags);
1679 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1680 mb();/* in order to force CPU ordering */
1681 t->task_done(t);
1682 }
1683}
1684
1685/*See the comments for mpi_ssp_completion */
1686static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
1687{
1688 struct sas_task *t;
1689 unsigned long flags;
1690 struct task_status_struct *ts;
1691 struct pm8001_ccb_info *ccb;
1692 struct pm8001_device *pm8001_dev;
1693 struct ssp_event_resp *psspPayload =
1694 (struct ssp_event_resp *)(piomb + 4);
1695 u32 event = le32_to_cpu(psspPayload->event);
1696 u32 tag = le32_to_cpu(psspPayload->tag);
1697 u32 port_id = le32_to_cpu(psspPayload->port_id);
1698 u32 dev_id = le32_to_cpu(psspPayload->device_id);
1699
1700 ccb = &pm8001_ha->ccb_info[tag];
1701 t = ccb->task;
1702 pm8001_dev = ccb->device;
1703 if (event)
1704 PM8001_FAIL_DBG(pm8001_ha,
1705 pm8001_printk("sas IO status 0x%x\n", event));
1706 if (unlikely(!t || !t->lldd_task || !t->dev))
1707 return;
1708 ts = &t->task_status;
1709 PM8001_IO_DBG(pm8001_ha,
1710 pm8001_printk("port_id = %x,device_id = %x\n",
1711 port_id, dev_id));
1712 switch (event) {
1713 case IO_OVERFLOW:
1714 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
1715 ts->resp = SAS_TASK_COMPLETE;
1716 ts->stat = SAS_DATA_OVERRUN;
1717 ts->residual = 0;
1718 if (pm8001_dev)
1719 pm8001_dev->running_req--;
1720 break;
1721 case IO_XFER_ERROR_BREAK:
1722 PM8001_IO_DBG(pm8001_ha,
1723 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1724 ts->resp = SAS_TASK_COMPLETE;
1725 ts->stat = SAS_INTERRUPTED;
1726 break;
1727 case IO_XFER_ERROR_PHY_NOT_READY:
1728 PM8001_IO_DBG(pm8001_ha,
1729 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1730 ts->resp = SAS_TASK_COMPLETE;
1731 ts->stat = SAS_OPEN_REJECT;
1732 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1733 break;
1734 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1735 PM8001_IO_DBG(pm8001_ha,
1736 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
1737 "_SUPPORTED\n"));
1738 ts->resp = SAS_TASK_COMPLETE;
1739 ts->stat = SAS_OPEN_REJECT;
1740 ts->open_rej_reason = SAS_OREJ_EPROTO;
1741 break;
1742 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1743 PM8001_IO_DBG(pm8001_ha,
1744 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1745 ts->resp = SAS_TASK_COMPLETE;
1746 ts->stat = SAS_OPEN_REJECT;
1747 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1748 break;
1749 case IO_OPEN_CNX_ERROR_BREAK:
1750 PM8001_IO_DBG(pm8001_ha,
1751 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
1752 ts->resp = SAS_TASK_COMPLETE;
1753 ts->stat = SAS_OPEN_REJECT;
1754 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1755 break;
1756 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1757 PM8001_IO_DBG(pm8001_ha,
1758 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
1759 ts->resp = SAS_TASK_COMPLETE;
1760 ts->stat = SAS_OPEN_REJECT;
1761 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1762 if (!t->uldd_task)
1763 pm8001_handle_event(pm8001_ha,
1764 pm8001_dev,
1765 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1766 break;
1767 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
1768 PM8001_IO_DBG(pm8001_ha,
1769 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
1770 ts->resp = SAS_TASK_COMPLETE;
1771 ts->stat = SAS_OPEN_REJECT;
1772 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1773 break;
1774 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
1775 PM8001_IO_DBG(pm8001_ha,
1776 pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
1777 "NOT_SUPPORTED\n"));
1778 ts->resp = SAS_TASK_COMPLETE;
1779 ts->stat = SAS_OPEN_REJECT;
1780 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1781 break;
1782 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
1783 PM8001_IO_DBG(pm8001_ha,
1784 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
1785 ts->resp = SAS_TASK_COMPLETE;
1786 ts->stat = SAS_OPEN_REJECT;
1787 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1788 break;
1789 case IO_XFER_ERROR_NAK_RECEIVED:
1790 PM8001_IO_DBG(pm8001_ha,
1791 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
1792 ts->resp = SAS_TASK_COMPLETE;
1793 ts->stat = SAS_OPEN_REJECT;
1794 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1795 break;
1796 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
1797 PM8001_IO_DBG(pm8001_ha,
1798 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
1799 ts->resp = SAS_TASK_COMPLETE;
1800 ts->stat = SAS_NAK_R_ERR;
1801 break;
1802 case IO_XFER_OPEN_RETRY_TIMEOUT:
1803 PM8001_IO_DBG(pm8001_ha,
1804 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1805 ts->resp = SAS_TASK_COMPLETE;
1806 ts->stat = SAS_OPEN_REJECT;
1807 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1808 break;
1809 case IO_XFER_ERROR_UNEXPECTED_PHASE:
1810 PM8001_IO_DBG(pm8001_ha,
1811 pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
1812 ts->resp = SAS_TASK_COMPLETE;
1813 ts->stat = SAS_DATA_OVERRUN;
1814 break;
1815 case IO_XFER_ERROR_XFER_RDY_OVERRUN:
1816 PM8001_IO_DBG(pm8001_ha,
1817 pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
1818 ts->resp = SAS_TASK_COMPLETE;
1819 ts->stat = SAS_DATA_OVERRUN;
1820 break;
1821 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
1822 PM8001_IO_DBG(pm8001_ha,
1823 pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
1824 ts->resp = SAS_TASK_COMPLETE;
1825 ts->stat = SAS_DATA_OVERRUN;
1826 break;
1827 case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
1828 PM8001_IO_DBG(pm8001_ha,
1829 pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
1830 ts->resp = SAS_TASK_COMPLETE;
1831 ts->stat = SAS_DATA_OVERRUN;
1832 break;
1833 case IO_XFER_ERROR_OFFSET_MISMATCH:
1834 PM8001_IO_DBG(pm8001_ha,
1835 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
1836 ts->resp = SAS_TASK_COMPLETE;
1837 ts->stat = SAS_DATA_OVERRUN;
1838 break;
1839 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
1840 PM8001_IO_DBG(pm8001_ha,
1841 pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
1842 ts->resp = SAS_TASK_COMPLETE;
1843 ts->stat = SAS_DATA_OVERRUN;
1844 break;
1845 case IO_XFER_CMD_FRAME_ISSUED:
1846 PM8001_IO_DBG(pm8001_ha,
1847 pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
1848 return;
1849 default:
1850 PM8001_IO_DBG(pm8001_ha,
1851 pm8001_printk("Unknown status 0x%x\n", event));
1852 /* not allowed case. Therefore, return failed status */
1853 ts->resp = SAS_TASK_COMPLETE;
1854 ts->stat = SAS_DATA_OVERRUN;
1855 break;
1856 }
1857 spin_lock_irqsave(&t->task_state_lock, flags);
1858 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1859 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
1860 t->task_state_flags |= SAS_TASK_STATE_DONE;
1861 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
1862 spin_unlock_irqrestore(&t->task_state_lock, flags);
1863 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
1864 " event 0x%x resp 0x%x "
1865 "stat 0x%x but aborted by upper layer!\n",
1866 t, event, ts->resp, ts->stat));
1867 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1868 } else {
1869 spin_unlock_irqrestore(&t->task_state_lock, flags);
1870 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1871 mb();/* in order to force CPU ordering */
1872 t->task_done(t);
1873 }
1874}
1875
1876/*See the comments for mpi_ssp_completion */
1877static void
1878mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1879{
1880 struct sas_task *t;
1881 struct pm8001_ccb_info *ccb;
1882 unsigned long flags = 0;
1883 u32 param;
1884 u32 status;
1885 u32 tag;
1886 struct sata_completion_resp *psataPayload;
1887 struct task_status_struct *ts;
1888 struct ata_task_resp *resp ;
1889 u32 *sata_resp;
1890 struct pm8001_device *pm8001_dev;
1891
1892 psataPayload = (struct sata_completion_resp *)(piomb + 4);
1893 status = le32_to_cpu(psataPayload->status);
1894 tag = le32_to_cpu(psataPayload->tag);
1895
1896 ccb = &pm8001_ha->ccb_info[tag];
1897 param = le32_to_cpu(psataPayload->param);
1898 t = ccb->task;
1899 ts = &t->task_status;
1900 pm8001_dev = ccb->device;
1901 if (status)
1902 PM8001_FAIL_DBG(pm8001_ha,
1903 pm8001_printk("sata IO status 0x%x\n", status));
1904 if (unlikely(!t || !t->lldd_task || !t->dev))
1905 return;
1906
1907 switch (status) {
1908 case IO_SUCCESS:
1909 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
1910 if (param == 0) {
1911 ts->resp = SAS_TASK_COMPLETE;
1912 ts->stat = SAM_GOOD;
1913 } else {
1914 u8 len;
1915 ts->resp = SAS_TASK_COMPLETE;
1916 ts->stat = SAS_PROTO_RESPONSE;
1917 ts->residual = param;
1918 PM8001_IO_DBG(pm8001_ha,
1919 pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
1920 param));
1921 sata_resp = &psataPayload->sata_resp[0];
1922 resp = (struct ata_task_resp *)ts->buf;
1923 if (t->ata_task.dma_xfer == 0 &&
1924 t->data_dir == PCI_DMA_FROMDEVICE) {
1925 len = sizeof(struct pio_setup_fis);
1926 PM8001_IO_DBG(pm8001_ha,
1927 pm8001_printk("PIO read len = %d\n", len));
1928 } else if (t->ata_task.use_ncq) {
1929 len = sizeof(struct set_dev_bits_fis);
1930 PM8001_IO_DBG(pm8001_ha,
1931 pm8001_printk("FPDMA len = %d\n", len));
1932 } else {
1933 len = sizeof(struct dev_to_host_fis);
1934 PM8001_IO_DBG(pm8001_ha,
1935 pm8001_printk("other len = %d\n", len));
1936 }
1937 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
1938 resp->frame_len = len;
1939 memcpy(&resp->ending_fis[0], sata_resp, len);
1940 ts->buf_valid_size = sizeof(*resp);
1941 } else
1942 PM8001_IO_DBG(pm8001_ha,
1943 pm8001_printk("response to large \n"));
1944 }
1945 if (pm8001_dev)
1946 pm8001_dev->running_req--;
1947 break;
1948 case IO_ABORTED:
1949 PM8001_IO_DBG(pm8001_ha,
1950 pm8001_printk("IO_ABORTED IOMB Tag \n"));
1951 ts->resp = SAS_TASK_COMPLETE;
1952 ts->stat = SAS_ABORTED_TASK;
1953 if (pm8001_dev)
1954 pm8001_dev->running_req--;
1955 break;
1956 /* following cases are to do cases */
1957 case IO_UNDERFLOW:
1958 /* SATA Completion with error */
1959 PM8001_IO_DBG(pm8001_ha,
1960 pm8001_printk("IO_UNDERFLOW param = %d\n", param));
1961 ts->resp = SAS_TASK_COMPLETE;
1962 ts->stat = SAS_DATA_UNDERRUN;
1963 ts->residual = param;
1964 if (pm8001_dev)
1965 pm8001_dev->running_req--;
1966 break;
1967 case IO_NO_DEVICE:
1968 PM8001_IO_DBG(pm8001_ha,
1969 pm8001_printk("IO_NO_DEVICE\n"));
1970 ts->resp = SAS_TASK_UNDELIVERED;
1971 ts->stat = SAS_PHY_DOWN;
1972 break;
1973 case IO_XFER_ERROR_BREAK:
1974 PM8001_IO_DBG(pm8001_ha,
1975 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1976 ts->resp = SAS_TASK_COMPLETE;
1977 ts->stat = SAS_INTERRUPTED;
1978 break;
1979 case IO_XFER_ERROR_PHY_NOT_READY:
1980 PM8001_IO_DBG(pm8001_ha,
1981 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1982 ts->resp = SAS_TASK_COMPLETE;
1983 ts->stat = SAS_OPEN_REJECT;
1984 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1985 break;
1986 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1987 PM8001_IO_DBG(pm8001_ha,
1988 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
1989 "_SUPPORTED\n"));
1990 ts->resp = SAS_TASK_COMPLETE;
1991 ts->stat = SAS_OPEN_REJECT;
1992 ts->open_rej_reason = SAS_OREJ_EPROTO;
1993 break;
1994 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1995 PM8001_IO_DBG(pm8001_ha,
1996 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1997 ts->resp = SAS_TASK_COMPLETE;
1998 ts->stat = SAS_OPEN_REJECT;
1999 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2000 break;
2001 case IO_OPEN_CNX_ERROR_BREAK:
2002 PM8001_IO_DBG(pm8001_ha,
2003 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
2004 ts->resp = SAS_TASK_COMPLETE;
2005 ts->stat = SAS_OPEN_REJECT;
2006 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2007 break;
2008 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2009 PM8001_IO_DBG(pm8001_ha,
2010 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
2011 ts->resp = SAS_TASK_COMPLETE;
2012 ts->stat = SAS_DEV_NO_RESPONSE;
2013 if (!t->uldd_task) {
2014 pm8001_handle_event(pm8001_ha,
2015 pm8001_dev,
2016 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2017 ts->resp = SAS_TASK_UNDELIVERED;
2018 ts->stat = SAS_QUEUE_FULL;
2019 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2020 mb();/*in order to force CPU ordering*/
2021 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2022 t->task_done(t);
2023 spin_lock_irqsave(&pm8001_ha->lock, flags);
2024 return;
2025 }
2026 break;
2027 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2028 PM8001_IO_DBG(pm8001_ha,
2029 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
2030 ts->resp = SAS_TASK_UNDELIVERED;
2031 ts->stat = SAS_OPEN_REJECT;
2032 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2033 if (!t->uldd_task) {
2034 pm8001_handle_event(pm8001_ha,
2035 pm8001_dev,
2036 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2037 ts->resp = SAS_TASK_UNDELIVERED;
2038 ts->stat = SAS_QUEUE_FULL;
2039 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2040 mb();/*ditto*/
2041 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2042 t->task_done(t);
2043 spin_lock_irqsave(&pm8001_ha->lock, flags);
2044 return;
2045 }
2046 break;
2047 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2048 PM8001_IO_DBG(pm8001_ha,
2049 pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
2050 "NOT_SUPPORTED\n"));
2051 ts->resp = SAS_TASK_COMPLETE;
2052 ts->stat = SAS_OPEN_REJECT;
2053 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2054 break;
2055 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
2056 PM8001_IO_DBG(pm8001_ha,
2057 pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES"
2058 "_BUSY\n"));
2059 ts->resp = SAS_TASK_COMPLETE;
2060 ts->stat = SAS_DEV_NO_RESPONSE;
2061 if (!t->uldd_task) {
2062 pm8001_handle_event(pm8001_ha,
2063 pm8001_dev,
2064 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
2065 ts->resp = SAS_TASK_UNDELIVERED;
2066 ts->stat = SAS_QUEUE_FULL;
2067 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2068 mb();/* ditto*/
2069 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2070 t->task_done(t);
2071 spin_lock_irqsave(&pm8001_ha->lock, flags);
2072 return;
2073 }
2074 break;
2075 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2076 PM8001_IO_DBG(pm8001_ha,
2077 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
2078 ts->resp = SAS_TASK_COMPLETE;
2079 ts->stat = SAS_OPEN_REJECT;
2080 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2081 break;
2082 case IO_XFER_ERROR_NAK_RECEIVED:
2083 PM8001_IO_DBG(pm8001_ha,
2084 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
2085 ts->resp = SAS_TASK_COMPLETE;
2086 ts->stat = SAS_NAK_R_ERR;
2087 break;
2088 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
2089 PM8001_IO_DBG(pm8001_ha,
2090 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
2091 ts->resp = SAS_TASK_COMPLETE;
2092 ts->stat = SAS_NAK_R_ERR;
2093 break;
2094 case IO_XFER_ERROR_DMA:
2095 PM8001_IO_DBG(pm8001_ha,
2096 pm8001_printk("IO_XFER_ERROR_DMA\n"));
2097 ts->resp = SAS_TASK_COMPLETE;
2098 ts->stat = SAS_ABORTED_TASK;
2099 break;
2100 case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
2101 PM8001_IO_DBG(pm8001_ha,
2102 pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
2103 ts->resp = SAS_TASK_UNDELIVERED;
2104 ts->stat = SAS_DEV_NO_RESPONSE;
2105 break;
2106 case IO_XFER_ERROR_REJECTED_NCQ_MODE:
2107 PM8001_IO_DBG(pm8001_ha,
2108 pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
2109 ts->resp = SAS_TASK_COMPLETE;
2110 ts->stat = SAS_DATA_UNDERRUN;
2111 break;
2112 case IO_XFER_OPEN_RETRY_TIMEOUT:
2113 PM8001_IO_DBG(pm8001_ha,
2114 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
2115 ts->resp = SAS_TASK_COMPLETE;
2116 ts->stat = SAS_OPEN_TO;
2117 break;
2118 case IO_PORT_IN_RESET:
2119 PM8001_IO_DBG(pm8001_ha,
2120 pm8001_printk("IO_PORT_IN_RESET\n"));
2121 ts->resp = SAS_TASK_COMPLETE;
2122 ts->stat = SAS_DEV_NO_RESPONSE;
2123 break;
2124 case IO_DS_NON_OPERATIONAL:
2125 PM8001_IO_DBG(pm8001_ha,
2126 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
2127 ts->resp = SAS_TASK_COMPLETE;
2128 ts->stat = SAS_DEV_NO_RESPONSE;
2129 if (!t->uldd_task) {
2130 pm8001_handle_event(pm8001_ha, pm8001_dev,
2131 IO_DS_NON_OPERATIONAL);
2132 ts->resp = SAS_TASK_UNDELIVERED;
2133 ts->stat = SAS_QUEUE_FULL;
2134 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2135 mb();/*ditto*/
2136 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2137 t->task_done(t);
2138 spin_lock_irqsave(&pm8001_ha->lock, flags);
2139 return;
2140 }
2141 break;
2142 case IO_DS_IN_RECOVERY:
2143 PM8001_IO_DBG(pm8001_ha,
2144 pm8001_printk(" IO_DS_IN_RECOVERY\n"));
2145 ts->resp = SAS_TASK_COMPLETE;
2146 ts->stat = SAS_DEV_NO_RESPONSE;
2147 break;
2148 case IO_DS_IN_ERROR:
2149 PM8001_IO_DBG(pm8001_ha,
2150 pm8001_printk("IO_DS_IN_ERROR\n"));
2151 ts->resp = SAS_TASK_COMPLETE;
2152 ts->stat = SAS_DEV_NO_RESPONSE;
2153 if (!t->uldd_task) {
2154 pm8001_handle_event(pm8001_ha, pm8001_dev,
2155 IO_DS_IN_ERROR);
2156 ts->resp = SAS_TASK_UNDELIVERED;
2157 ts->stat = SAS_QUEUE_FULL;
2158 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2159 mb();/*ditto*/
2160 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2161 t->task_done(t);
2162 spin_lock_irqsave(&pm8001_ha->lock, flags);
2163 return;
2164 }
2165 break;
2166 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
2167 PM8001_IO_DBG(pm8001_ha,
2168 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
2169 ts->resp = SAS_TASK_COMPLETE;
2170 ts->stat = SAS_OPEN_REJECT;
2171 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2172 default:
2173 PM8001_IO_DBG(pm8001_ha,
2174 pm8001_printk("Unknown status 0x%x\n", status));
2175 /* not allowed case. Therefore, return failed status */
2176 ts->resp = SAS_TASK_COMPLETE;
2177 ts->stat = SAS_DEV_NO_RESPONSE;
2178 break;
2179 }
2180 spin_lock_irqsave(&t->task_state_lock, flags);
2181 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2182 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2183 t->task_state_flags |= SAS_TASK_STATE_DONE;
2184 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2185 spin_unlock_irqrestore(&t->task_state_lock, flags);
2186 PM8001_FAIL_DBG(pm8001_ha,
2187 pm8001_printk("task 0x%p done with io_status 0x%x"
2188 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2189 t, status, ts->resp, ts->stat));
2190 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2191 } else if (t->uldd_task) {
2192 spin_unlock_irqrestore(&t->task_state_lock, flags);
2193 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2194 mb();/* ditto */
2195 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2196 t->task_done(t);
2197 spin_lock_irqsave(&pm8001_ha->lock, flags);
2198 } else if (!t->uldd_task) {
2199 spin_unlock_irqrestore(&t->task_state_lock, flags);
2200 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2201 mb();/*ditto*/
2202 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2203 t->task_done(t);
2204 spin_lock_irqsave(&pm8001_ha->lock, flags);
2205 }
2206}
2207
2208/*See the comments for mpi_ssp_completion */
2209static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2210{
2211 struct sas_task *t;
2212 unsigned long flags = 0;
2213 struct task_status_struct *ts;
2214 struct pm8001_ccb_info *ccb;
2215 struct pm8001_device *pm8001_dev;
2216 struct sata_event_resp *psataPayload =
2217 (struct sata_event_resp *)(piomb + 4);
2218 u32 event = le32_to_cpu(psataPayload->event);
2219 u32 tag = le32_to_cpu(psataPayload->tag);
2220 u32 port_id = le32_to_cpu(psataPayload->port_id);
2221 u32 dev_id = le32_to_cpu(psataPayload->device_id);
2222
2223 ccb = &pm8001_ha->ccb_info[tag];
2224 t = ccb->task;
2225 pm8001_dev = ccb->device;
2226 if (event)
2227 PM8001_FAIL_DBG(pm8001_ha,
2228 pm8001_printk("sata IO status 0x%x\n", event));
2229 if (unlikely(!t || !t->lldd_task || !t->dev))
2230 return;
2231 ts = &t->task_status;
2232 PM8001_IO_DBG(pm8001_ha,
2233 pm8001_printk("port_id = %x,device_id = %x\n",
2234 port_id, dev_id));
2235 switch (event) {
2236 case IO_OVERFLOW:
2237 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
2238 ts->resp = SAS_TASK_COMPLETE;
2239 ts->stat = SAS_DATA_OVERRUN;
2240 ts->residual = 0;
2241 if (pm8001_dev)
2242 pm8001_dev->running_req--;
2243 break;
2244 case IO_XFER_ERROR_BREAK:
2245 PM8001_IO_DBG(pm8001_ha,
2246 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
2247 ts->resp = SAS_TASK_COMPLETE;
2248 ts->stat = SAS_INTERRUPTED;
2249 break;
2250 case IO_XFER_ERROR_PHY_NOT_READY:
2251 PM8001_IO_DBG(pm8001_ha,
2252 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
2253 ts->resp = SAS_TASK_COMPLETE;
2254 ts->stat = SAS_OPEN_REJECT;
2255 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2256 break;
2257 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2258 PM8001_IO_DBG(pm8001_ha,
2259 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
2260 "_SUPPORTED\n"));
2261 ts->resp = SAS_TASK_COMPLETE;
2262 ts->stat = SAS_OPEN_REJECT;
2263 ts->open_rej_reason = SAS_OREJ_EPROTO;
2264 break;
2265 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2266 PM8001_IO_DBG(pm8001_ha,
2267 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
2268 ts->resp = SAS_TASK_COMPLETE;
2269 ts->stat = SAS_OPEN_REJECT;
2270 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2271 break;
2272 case IO_OPEN_CNX_ERROR_BREAK:
2273 PM8001_IO_DBG(pm8001_ha,
2274 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
2275 ts->resp = SAS_TASK_COMPLETE;
2276 ts->stat = SAS_OPEN_REJECT;
2277 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2278 break;
2279 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2280 PM8001_IO_DBG(pm8001_ha,
2281 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
2282 ts->resp = SAS_TASK_UNDELIVERED;
2283 ts->stat = SAS_DEV_NO_RESPONSE;
2284 if (!t->uldd_task) {
2285 pm8001_handle_event(pm8001_ha,
2286 pm8001_dev,
2287 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2288 ts->resp = SAS_TASK_COMPLETE;
2289 ts->stat = SAS_QUEUE_FULL;
2290 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2291 mb();/*ditto*/
2292 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2293 t->task_done(t);
2294 spin_lock_irqsave(&pm8001_ha->lock, flags);
2295 return;
2296 }
2297 break;
2298 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2299 PM8001_IO_DBG(pm8001_ha,
2300 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
2301 ts->resp = SAS_TASK_UNDELIVERED;
2302 ts->stat = SAS_OPEN_REJECT;
2303 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2304 break;
2305 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2306 PM8001_IO_DBG(pm8001_ha,
2307 pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
2308 "NOT_SUPPORTED\n"));
2309 ts->resp = SAS_TASK_COMPLETE;
2310 ts->stat = SAS_OPEN_REJECT;
2311 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2312 break;
2313 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2314 PM8001_IO_DBG(pm8001_ha,
2315 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
2316 ts->resp = SAS_TASK_COMPLETE;
2317 ts->stat = SAS_OPEN_REJECT;
2318 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2319 break;
2320 case IO_XFER_ERROR_NAK_RECEIVED:
2321 PM8001_IO_DBG(pm8001_ha,
2322 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
2323 ts->resp = SAS_TASK_COMPLETE;
2324 ts->stat = SAS_NAK_R_ERR;
2325 break;
2326 case IO_XFER_ERROR_PEER_ABORTED:
2327 PM8001_IO_DBG(pm8001_ha,
2328 pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
2329 ts->resp = SAS_TASK_COMPLETE;
2330 ts->stat = SAS_NAK_R_ERR;
2331 break;
2332 case IO_XFER_ERROR_REJECTED_NCQ_MODE:
2333 PM8001_IO_DBG(pm8001_ha,
2334 pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
2335 ts->resp = SAS_TASK_COMPLETE;
2336 ts->stat = SAS_DATA_UNDERRUN;
2337 break;
2338 case IO_XFER_OPEN_RETRY_TIMEOUT:
2339 PM8001_IO_DBG(pm8001_ha,
2340 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
2341 ts->resp = SAS_TASK_COMPLETE;
2342 ts->stat = SAS_OPEN_TO;
2343 break;
2344 case IO_XFER_ERROR_UNEXPECTED_PHASE:
2345 PM8001_IO_DBG(pm8001_ha,
2346 pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
2347 ts->resp = SAS_TASK_COMPLETE;
2348 ts->stat = SAS_OPEN_TO;
2349 break;
2350 case IO_XFER_ERROR_XFER_RDY_OVERRUN:
2351 PM8001_IO_DBG(pm8001_ha,
2352 pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
2353 ts->resp = SAS_TASK_COMPLETE;
2354 ts->stat = SAS_OPEN_TO;
2355 break;
2356 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
2357 PM8001_IO_DBG(pm8001_ha,
2358 pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
2359 ts->resp = SAS_TASK_COMPLETE;
2360 ts->stat = SAS_OPEN_TO;
2361 break;
2362 case IO_XFER_ERROR_OFFSET_MISMATCH:
2363 PM8001_IO_DBG(pm8001_ha,
2364 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
2365 ts->resp = SAS_TASK_COMPLETE;
2366 ts->stat = SAS_OPEN_TO;
2367 break;
2368 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
2369 PM8001_IO_DBG(pm8001_ha,
2370 pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
2371 ts->resp = SAS_TASK_COMPLETE;
2372 ts->stat = SAS_OPEN_TO;
2373 break;
2374 case IO_XFER_CMD_FRAME_ISSUED:
2375 PM8001_IO_DBG(pm8001_ha,
2376 pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
2377 break;
2378 case IO_XFER_PIO_SETUP_ERROR:
2379 PM8001_IO_DBG(pm8001_ha,
2380 pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
2381 ts->resp = SAS_TASK_COMPLETE;
2382 ts->stat = SAS_OPEN_TO;
2383 break;
2384 default:
2385 PM8001_IO_DBG(pm8001_ha,
2386 pm8001_printk("Unknown status 0x%x\n", event));
2387 /* not allowed case. Therefore, return failed status */
2388 ts->resp = SAS_TASK_COMPLETE;
2389 ts->stat = SAS_OPEN_TO;
2390 break;
2391 }
2392 spin_lock_irqsave(&t->task_state_lock, flags);
2393 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2394 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2395 t->task_state_flags |= SAS_TASK_STATE_DONE;
2396 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2397 spin_unlock_irqrestore(&t->task_state_lock, flags);
2398 PM8001_FAIL_DBG(pm8001_ha,
2399 pm8001_printk("task 0x%p done with io_status 0x%x"
2400 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2401 t, event, ts->resp, ts->stat));
2402 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2403 } else if (t->uldd_task) {
2404 spin_unlock_irqrestore(&t->task_state_lock, flags);
2405 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2406 mb();/* ditto */
2407 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2408 t->task_done(t);
2409 spin_lock_irqsave(&pm8001_ha->lock, flags);
2410 } else if (!t->uldd_task) {
2411 spin_unlock_irqrestore(&t->task_state_lock, flags);
2412 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2413 mb();/*ditto*/
2414 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2415 t->task_done(t);
2416 spin_lock_irqsave(&pm8001_ha->lock, flags);
2417 }
2418}
2419
2420/*See the comments for mpi_ssp_completion */
2421static void
2422mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2423{
2424 u32 param;
2425 struct sas_task *t;
2426 struct pm8001_ccb_info *ccb;
2427 unsigned long flags;
2428 u32 status;
2429 u32 tag;
2430 struct smp_completion_resp *psmpPayload;
2431 struct task_status_struct *ts;
2432 struct pm8001_device *pm8001_dev;
2433
2434 psmpPayload = (struct smp_completion_resp *)(piomb + 4);
2435 status = le32_to_cpu(psmpPayload->status);
2436 tag = le32_to_cpu(psmpPayload->tag);
2437
2438 ccb = &pm8001_ha->ccb_info[tag];
2439 param = le32_to_cpu(psmpPayload->param);
2440 t = ccb->task;
2441 ts = &t->task_status;
2442 pm8001_dev = ccb->device;
2443 if (status)
2444 PM8001_FAIL_DBG(pm8001_ha,
2445 pm8001_printk("smp IO status 0x%x\n", status));
2446 if (unlikely(!t || !t->lldd_task || !t->dev))
2447 return;
2448
2449 switch (status) {
2450 case IO_SUCCESS:
2451 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
2452 ts->resp = SAS_TASK_COMPLETE;
2453 ts->stat = SAM_GOOD;
2454 if (pm8001_dev)
2455 pm8001_dev->running_req--;
2456 break;
2457 case IO_ABORTED:
2458 PM8001_IO_DBG(pm8001_ha,
2459 pm8001_printk("IO_ABORTED IOMB\n"));
2460 ts->resp = SAS_TASK_COMPLETE;
2461 ts->stat = SAS_ABORTED_TASK;
2462 if (pm8001_dev)
2463 pm8001_dev->running_req--;
2464 break;
2465 case IO_OVERFLOW:
2466 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
2467 ts->resp = SAS_TASK_COMPLETE;
2468 ts->stat = SAS_DATA_OVERRUN;
2469 ts->residual = 0;
2470 if (pm8001_dev)
2471 pm8001_dev->running_req--;
2472 break;
2473 case IO_NO_DEVICE:
2474 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
2475 ts->resp = SAS_TASK_COMPLETE;
2476 ts->stat = SAS_PHY_DOWN;
2477 break;
2478 case IO_ERROR_HW_TIMEOUT:
2479 PM8001_IO_DBG(pm8001_ha,
2480 pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
2481 ts->resp = SAS_TASK_COMPLETE;
2482 ts->stat = SAM_BUSY;
2483 break;
2484 case IO_XFER_ERROR_BREAK:
2485 PM8001_IO_DBG(pm8001_ha,
2486 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
2487 ts->resp = SAS_TASK_COMPLETE;
2488 ts->stat = SAM_BUSY;
2489 break;
2490 case IO_XFER_ERROR_PHY_NOT_READY:
2491 PM8001_IO_DBG(pm8001_ha,
2492 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
2493 ts->resp = SAS_TASK_COMPLETE;
2494 ts->stat = SAM_BUSY;
2495 break;
2496 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2497 PM8001_IO_DBG(pm8001_ha,
2498 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
2499 ts->resp = SAS_TASK_COMPLETE;
2500 ts->stat = SAS_OPEN_REJECT;
2501 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2502 break;
2503 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2504 PM8001_IO_DBG(pm8001_ha,
2505 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
2506 ts->resp = SAS_TASK_COMPLETE;
2507 ts->stat = SAS_OPEN_REJECT;
2508 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2509 break;
2510 case IO_OPEN_CNX_ERROR_BREAK:
2511 PM8001_IO_DBG(pm8001_ha,
2512 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
2513 ts->resp = SAS_TASK_COMPLETE;
2514 ts->stat = SAS_OPEN_REJECT;
2515 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2516 break;
2517 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2518 PM8001_IO_DBG(pm8001_ha,
2519 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
2520 ts->resp = SAS_TASK_COMPLETE;
2521 ts->stat = SAS_OPEN_REJECT;
2522 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2523 pm8001_handle_event(pm8001_ha,
2524 pm8001_dev,
2525 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2526 break;
2527 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2528 PM8001_IO_DBG(pm8001_ha,
2529 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
2530 ts->resp = SAS_TASK_COMPLETE;
2531 ts->stat = SAS_OPEN_REJECT;
2532 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2533 break;
2534 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2535 PM8001_IO_DBG(pm8001_ha,
2536 pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
2537 "NOT_SUPPORTED\n"));
2538 ts->resp = SAS_TASK_COMPLETE;
2539 ts->stat = SAS_OPEN_REJECT;
2540 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2541 break;
2542 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2543 PM8001_IO_DBG(pm8001_ha,
2544 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
2545 ts->resp = SAS_TASK_COMPLETE;
2546 ts->stat = SAS_OPEN_REJECT;
2547 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2548 break;
2549 case IO_XFER_ERROR_RX_FRAME:
2550 PM8001_IO_DBG(pm8001_ha,
2551 pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
2552 ts->resp = SAS_TASK_COMPLETE;
2553 ts->stat = SAS_DEV_NO_RESPONSE;
2554 break;
2555 case IO_XFER_OPEN_RETRY_TIMEOUT:
2556 PM8001_IO_DBG(pm8001_ha,
2557 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
2558 ts->resp = SAS_TASK_COMPLETE;
2559 ts->stat = SAS_OPEN_REJECT;
2560 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2561 break;
2562 case IO_ERROR_INTERNAL_SMP_RESOURCE:
2563 PM8001_IO_DBG(pm8001_ha,
2564 pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
2565 ts->resp = SAS_TASK_COMPLETE;
2566 ts->stat = SAS_QUEUE_FULL;
2567 break;
2568 case IO_PORT_IN_RESET:
2569 PM8001_IO_DBG(pm8001_ha,
2570 pm8001_printk("IO_PORT_IN_RESET\n"));
2571 ts->resp = SAS_TASK_COMPLETE;
2572 ts->stat = SAS_OPEN_REJECT;
2573 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2574 break;
2575 case IO_DS_NON_OPERATIONAL:
2576 PM8001_IO_DBG(pm8001_ha,
2577 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
2578 ts->resp = SAS_TASK_COMPLETE;
2579 ts->stat = SAS_DEV_NO_RESPONSE;
2580 break;
2581 case IO_DS_IN_RECOVERY:
2582 PM8001_IO_DBG(pm8001_ha,
2583 pm8001_printk("IO_DS_IN_RECOVERY\n"));
2584 ts->resp = SAS_TASK_COMPLETE;
2585 ts->stat = SAS_OPEN_REJECT;
2586 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2587 break;
2588 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
2589 PM8001_IO_DBG(pm8001_ha,
2590 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
2591 ts->resp = SAS_TASK_COMPLETE;
2592 ts->stat = SAS_OPEN_REJECT;
2593 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2594 break;
2595 default:
2596 PM8001_IO_DBG(pm8001_ha,
2597 pm8001_printk("Unknown status 0x%x\n", status));
2598 ts->resp = SAS_TASK_COMPLETE;
2599 ts->stat = SAS_DEV_NO_RESPONSE;
2600 /* not allowed case. Therefore, return failed status */
2601 break;
2602 }
2603 spin_lock_irqsave(&t->task_state_lock, flags);
2604 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2605 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2606 t->task_state_flags |= SAS_TASK_STATE_DONE;
2607 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2608 spin_unlock_irqrestore(&t->task_state_lock, flags);
2609 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
2610 " io_status 0x%x resp 0x%x "
2611 "stat 0x%x but aborted by upper layer!\n",
2612 t, status, ts->resp, ts->stat));
2613 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2614 } else {
2615 spin_unlock_irqrestore(&t->task_state_lock, flags);
2616 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2617 mb();/* in order to force CPU ordering */
2618 t->task_done(t);
2619 }
2620}
2621
2622static void
2623mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2624{
2625 struct set_dev_state_resp *pPayload =
2626 (struct set_dev_state_resp *)(piomb + 4);
2627 u32 tag = le32_to_cpu(pPayload->tag);
2628 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
2629 struct pm8001_device *pm8001_dev = ccb->device;
2630 u32 status = le32_to_cpu(pPayload->status);
2631 u32 device_id = le32_to_cpu(pPayload->device_id);
2632 u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS;
2633 u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS;
2634 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
2635 "from 0x%x to 0x%x status = 0x%x!\n",
2636 device_id, pds, nds, status));
2637 complete(pm8001_dev->setds_completion);
2638 ccb->task = NULL;
2639 ccb->ccb_tag = 0xFFFFFFFF;
2640 pm8001_ccb_free(pm8001_ha, tag);
2641}
2642
2643static void
2644mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2645{
2646 struct get_nvm_data_resp *pPayload =
2647 (struct get_nvm_data_resp *)(piomb + 4);
2648 u32 tag = le32_to_cpu(pPayload->tag);
2649 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
2650 u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
2651 complete(pm8001_ha->nvmd_completion);
2652 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n"));
2653 if ((dlen_status & NVMD_STAT) != 0) {
2654 PM8001_FAIL_DBG(pm8001_ha,
2655 pm8001_printk("Set nvm data error!\n"));
2656 return;
2657 }
2658 ccb->task = NULL;
2659 ccb->ccb_tag = 0xFFFFFFFF;
2660 pm8001_ccb_free(pm8001_ha, tag);
2661}
2662
2663static void
2664mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2665{
2666 struct fw_control_ex *fw_control_context;
2667 struct get_nvm_data_resp *pPayload =
2668 (struct get_nvm_data_resp *)(piomb + 4);
2669 u32 tag = le32_to_cpu(pPayload->tag);
2670 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
2671 u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
2672 u32 ir_tds_bn_dps_das_nvm =
2673 le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
2674 void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
2675 fw_control_context = ccb->fw_control_context;
2676
2677 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
2678 if ((dlen_status & NVMD_STAT) != 0) {
2679 PM8001_FAIL_DBG(pm8001_ha,
2680 pm8001_printk("Get nvm data error!\n"));
2681 complete(pm8001_ha->nvmd_completion);
2682 return;
2683 }
2684
2685 if (ir_tds_bn_dps_das_nvm & IPMode) {
2686 /* indirect mode - IR bit set */
2687 PM8001_MSG_DBG(pm8001_ha,
2688 pm8001_printk("Get NVMD success, IR=1\n"));
2689 if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) {
2690 if (ir_tds_bn_dps_das_nvm == 0x80a80200) {
2691 memcpy(pm8001_ha->sas_addr,
2692 ((u8 *)virt_addr + 4),
2693 SAS_ADDR_SIZE);
2694 PM8001_MSG_DBG(pm8001_ha,
2695 pm8001_printk("Get SAS address"
2696 " from VPD successfully!\n"));
2697 }
2698 } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM)
2699 || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) ||
2700 ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) {
2701 ;
2702 } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP)
2703 || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) {
2704 ;
2705 } else {
2706 /* Should not be happened*/
2707 PM8001_MSG_DBG(pm8001_ha,
2708 pm8001_printk("(IR=1)Wrong Device type 0x%x\n",
2709 ir_tds_bn_dps_das_nvm));
2710 }
2711 } else /* direct mode */{
2712 PM8001_MSG_DBG(pm8001_ha,
2713 pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
2714 (dlen_status & NVMD_LEN) >> 24));
2715 }
2716 memcpy(fw_control_context->usrAddr,
2717 pm8001_ha->memoryMap.region[NVMD].virt_ptr,
2718 fw_control_context->len);
2719 complete(pm8001_ha->nvmd_completion);
2720 ccb->task = NULL;
2721 ccb->ccb_tag = 0xFFFFFFFF;
2722 pm8001_ccb_free(pm8001_ha, tag);
2723}
2724
2725static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
2726{
2727 struct local_phy_ctl_resp *pPayload =
2728 (struct local_phy_ctl_resp *)(piomb + 4);
2729 u32 status = le32_to_cpu(pPayload->status);
2730 u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS;
2731 u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
2732 if (status != 0) {
2733 PM8001_MSG_DBG(pm8001_ha,
2734 pm8001_printk("%x phy execute %x phy op failed! \n",
2735 phy_id, phy_op));
2736 } else
2737 PM8001_MSG_DBG(pm8001_ha,
2738 pm8001_printk("%x phy execute %x phy op success! \n",
2739 phy_id, phy_op));
2740 return 0;
2741}
2742
2743/**
2744 * pm8001_bytes_dmaed - one of the interface function communication with libsas
2745 * @pm8001_ha: our hba card information
2746 * @i: which phy that received the event.
2747 *
2748 * when HBA driver received the identify done event or initiate FIS received
2749 * event(for SATA), it will invoke this function to notify the sas layer that
2750 * the sas toplogy has formed, please discover the the whole sas domain,
2751 * while receive a broadcast(change) primitive just tell the sas
2752 * layer to discover the changed domain rather than the whole domain.
2753 */
2754static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
2755{
2756 struct pm8001_phy *phy = &pm8001_ha->phy[i];
2757 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2758 struct sas_ha_struct *sas_ha;
2759 if (!phy->phy_attached)
2760 return;
2761
2762 sas_ha = pm8001_ha->sas;
2763 if (sas_phy->phy) {
2764 struct sas_phy *sphy = sas_phy->phy;
2765 sphy->negotiated_linkrate = sas_phy->linkrate;
2766 sphy->minimum_linkrate = phy->minimum_linkrate;
2767 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
2768 sphy->maximum_linkrate = phy->maximum_linkrate;
2769 sphy->maximum_linkrate_hw = phy->maximum_linkrate;
2770 }
2771
2772 if (phy->phy_type & PORT_TYPE_SAS) {
2773 struct sas_identify_frame *id;
2774 id = (struct sas_identify_frame *)phy->frame_rcvd;
2775 id->dev_type = phy->identify.device_type;
2776 id->initiator_bits = SAS_PROTOCOL_ALL;
2777 id->target_bits = phy->identify.target_port_protocols;
2778 } else if (phy->phy_type & PORT_TYPE_SATA) {
2779 /*Nothing*/
2780 }
2781 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i));
2782
2783 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
2784 pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
2785}
2786
2787/* Get the link rate speed */
2788static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
2789{
2790 struct sas_phy *sas_phy = phy->sas_phy.phy;
2791
2792 switch (link_rate) {
2793 case PHY_SPEED_60:
2794 phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
2795 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
2796 break;
2797 case PHY_SPEED_30:
2798 phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
2799 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
2800 break;
2801 case PHY_SPEED_15:
2802 phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
2803 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
2804 break;
2805 }
2806 sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
2807 sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
2808 sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
2809 sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
2810 sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
2811}
2812
2813/**
2814 * asd_get_attached_sas_addr -- extract/generate attached SAS address
2815 * @phy: pointer to asd_phy
2816 * @sas_addr: pointer to buffer where the SAS address is to be written
2817 *
2818 * This function extracts the SAS address from an IDENTIFY frame
2819 * received. If OOB is SATA, then a SAS address is generated from the
2820 * HA tables.
2821 *
2822 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
2823 * buffer.
2824 */
2825static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
2826 u8 *sas_addr)
2827{
2828 if (phy->sas_phy.frame_rcvd[0] == 0x34
2829 && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
2830 struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha;
2831 /* FIS device-to-host */
2832 u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr);
2833 addr += phy->sas_phy.id;
2834 *(__be64 *)sas_addr = cpu_to_be64(addr);
2835 } else {
2836 struct sas_identify_frame *idframe =
2837 (void *) phy->sas_phy.frame_rcvd;
2838 memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
2839 }
2840}
2841
2842/**
2843 * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
2844 * @pm8001_ha: our hba card information
2845 * @Qnum: the outbound queue message number.
2846 * @SEA: source of event to ack
2847 * @port_id: port id.
2848 * @phyId: phy id.
2849 * @param0: parameter 0.
2850 * @param1: parameter 1.
2851 */
2852static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
2853 u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
2854{
2855 struct hw_event_ack_req payload;
2856 u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
2857
2858 struct inbound_queue_table *circularQ;
2859
2860 memset((u8 *)&payload, 0, sizeof(payload));
2861 circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
2862 payload.tag = 1;
2863 payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
2864 ((phyId & 0x0F) << 4) | (port_id & 0x0F));
2865 payload.param0 = cpu_to_le32(param0);
2866 payload.param1 = cpu_to_le32(param1);
2867 mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
2868}
2869
2870static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
2871 u32 phyId, u32 phy_op);
2872
2873/**
2874 * hw_event_sas_phy_up -FW tells me a SAS phy up event.
2875 * @pm8001_ha: our hba card information
2876 * @piomb: IO message buffer
2877 */
2878static void
2879hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2880{
2881 struct hw_event_resp *pPayload =
2882 (struct hw_event_resp *)(piomb + 4);
2883 u32 lr_evt_status_phyid_portid =
2884 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
2885 u8 link_rate =
2886 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
2887 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
2888 u8 phy_id =
2889 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2890 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2891 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2892 struct pm8001_port *port = &pm8001_ha->port[port_id];
2893 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2894 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2895 unsigned long flags;
2896 u8 deviceType = pPayload->sas_identify.dev_type;
2897 port->port_state = portstate;
2898 PM8001_MSG_DBG(pm8001_ha,
2899 pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
2900 port_id, phy_id));
2901
2902 switch (deviceType) {
2903 case SAS_PHY_UNUSED:
2904 PM8001_MSG_DBG(pm8001_ha,
2905 pm8001_printk("device type no device.\n"));
2906 break;
2907 case SAS_END_DEVICE:
2908 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
2909 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
2910 PHY_NOTIFY_ENABLE_SPINUP);
2911 port->port_attached = 1;
2912 get_lrate_mode(phy, link_rate);
2913 break;
2914 case SAS_EDGE_EXPANDER_DEVICE:
2915 PM8001_MSG_DBG(pm8001_ha,
2916 pm8001_printk("expander device.\n"));
2917 port->port_attached = 1;
2918 get_lrate_mode(phy, link_rate);
2919 break;
2920 case SAS_FANOUT_EXPANDER_DEVICE:
2921 PM8001_MSG_DBG(pm8001_ha,
2922 pm8001_printk("fanout expander device.\n"));
2923 port->port_attached = 1;
2924 get_lrate_mode(phy, link_rate);
2925 break;
2926 default:
2927 PM8001_MSG_DBG(pm8001_ha,
2928 pm8001_printk("unknown device type(%x)\n", deviceType));
2929 break;
2930 }
2931 phy->phy_type |= PORT_TYPE_SAS;
2932 phy->identify.device_type = deviceType;
2933 phy->phy_attached = 1;
2934 if (phy->identify.device_type == SAS_END_DEV)
2935 phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
2936 else if (phy->identify.device_type != NO_DEVICE)
2937 phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
2938 phy->sas_phy.oob_mode = SAS_OOB_MODE;
2939 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
2940 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
2941 memcpy(phy->frame_rcvd, &pPayload->sas_identify,
2942 sizeof(struct sas_identify_frame)-4);
2943 phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
2944 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
2945 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
2946 if (pm8001_ha->flags == PM8001F_RUN_TIME)
2947 mdelay(200);/*delay a moment to wait disk to spinup*/
2948 pm8001_bytes_dmaed(pm8001_ha, phy_id);
2949}
2950
2951/**
2952 * hw_event_sata_phy_up -FW tells me a SATA phy up event.
2953 * @pm8001_ha: our hba card information
2954 * @piomb: IO message buffer
2955 */
2956static void
2957hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2958{
2959 struct hw_event_resp *pPayload =
2960 (struct hw_event_resp *)(piomb + 4);
2961 u32 lr_evt_status_phyid_portid =
2962 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
2963 u8 link_rate =
2964 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
2965 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
2966 u8 phy_id =
2967 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2968 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2969 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2970 struct pm8001_port *port = &pm8001_ha->port[port_id];
2971 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2972 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2973 unsigned long flags;
2974 PM8001_MSG_DBG(pm8001_ha,
2975 pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
2976 " phy id = %d\n", port_id, phy_id));
2977 port->port_state = portstate;
2978 port->port_attached = 1;
2979 get_lrate_mode(phy, link_rate);
2980 phy->phy_type |= PORT_TYPE_SATA;
2981 phy->phy_attached = 1;
2982 phy->sas_phy.oob_mode = SATA_OOB_MODE;
2983 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
2984 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
2985 memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
2986 sizeof(struct dev_to_host_fis));
2987 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
2988 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
2989 phy->identify.device_type = SATA_DEV;
2990 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
2991 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
2992 pm8001_bytes_dmaed(pm8001_ha, phy_id);
2993}
2994
2995/**
2996 * hw_event_phy_down -we should notify the libsas the phy is down.
2997 * @pm8001_ha: our hba card information
2998 * @piomb: IO message buffer
2999 */
3000static void
3001hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3002{
3003 struct hw_event_resp *pPayload =
3004 (struct hw_event_resp *)(piomb + 4);
3005 u32 lr_evt_status_phyid_portid =
3006 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
3007 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
3008 u8 phy_id =
3009 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
3010 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
3011 u8 portstate = (u8)(npip_portstate & 0x0000000F);
3012 struct pm8001_port *port = &pm8001_ha->port[port_id];
3013 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3014 port->port_state = portstate;
3015 phy->phy_type = 0;
3016 phy->identify.device_type = 0;
3017 phy->phy_attached = 0;
3018 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
3019 switch (portstate) {
3020 case PORT_VALID:
3021 break;
3022 case PORT_INVALID:
3023 PM8001_MSG_DBG(pm8001_ha,
3024 pm8001_printk(" PortInvalid portID %d \n", port_id));
3025 PM8001_MSG_DBG(pm8001_ha,
3026 pm8001_printk(" Last phy Down and port invalid\n"));
3027 port->port_attached = 0;
3028 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3029 port_id, phy_id, 0, 0);
3030 break;
3031 case PORT_IN_RESET:
3032 PM8001_MSG_DBG(pm8001_ha,
3033 pm8001_printk(" Port In Reset portID %d \n", port_id));
3034 break;
3035 case PORT_NOT_ESTABLISHED:
3036 PM8001_MSG_DBG(pm8001_ha,
3037 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
3038 port->port_attached = 0;
3039 break;
3040 case PORT_LOSTCOMM:
3041 PM8001_MSG_DBG(pm8001_ha,
3042 pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
3043 PM8001_MSG_DBG(pm8001_ha,
3044 pm8001_printk(" Last phy Down and port invalid\n"));
3045 port->port_attached = 0;
3046 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3047 port_id, phy_id, 0, 0);
3048 break;
3049 default:
3050 port->port_attached = 0;
3051 PM8001_MSG_DBG(pm8001_ha,
3052 pm8001_printk(" phy Down and(default) = %x\n",
3053 portstate));
3054 break;
3055
3056 }
3057}
3058
3059/**
3060 * mpi_reg_resp -process register device ID response.
3061 * @pm8001_ha: our hba card information
3062 * @piomb: IO message buffer
3063 *
3064 * when sas layer find a device it will notify LLDD, then the driver register
3065 * the domain device to FW, this event is the return device ID which the FW
3066 * has assigned, from now,inter-communication with FW is no longer using the
3067 * SAS address, use device ID which FW assigned.
3068 */
3069static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3070{
3071 u32 status;
3072 u32 device_id;
3073 u32 htag;
3074 struct pm8001_ccb_info *ccb;
3075 struct pm8001_device *pm8001_dev;
3076 struct dev_reg_resp *registerRespPayload =
3077 (struct dev_reg_resp *)(piomb + 4);
3078
3079 htag = le32_to_cpu(registerRespPayload->tag);
3080 ccb = &pm8001_ha->ccb_info[registerRespPayload->tag];
3081 pm8001_dev = ccb->device;
3082 status = le32_to_cpu(registerRespPayload->status);
3083 device_id = le32_to_cpu(registerRespPayload->device_id);
3084 PM8001_MSG_DBG(pm8001_ha,
3085 pm8001_printk(" register device is status = %d\n", status));
3086 switch (status) {
3087 case DEVREG_SUCCESS:
3088 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n"));
3089 pm8001_dev->device_id = device_id;
3090 break;
3091 case DEVREG_FAILURE_OUT_OF_RESOURCE:
3092 PM8001_MSG_DBG(pm8001_ha,
3093 pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n"));
3094 break;
3095 case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED:
3096 PM8001_MSG_DBG(pm8001_ha,
3097 pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"));
3098 break;
3099 case DEVREG_FAILURE_INVALID_PHY_ID:
3100 PM8001_MSG_DBG(pm8001_ha,
3101 pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n"));
3102 break;
3103 case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED:
3104 PM8001_MSG_DBG(pm8001_ha,
3105 pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"));
3106 break;
3107 case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE:
3108 PM8001_MSG_DBG(pm8001_ha,
3109 pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"));
3110 break;
3111 case DEVREG_FAILURE_PORT_NOT_VALID_STATE:
3112 PM8001_MSG_DBG(pm8001_ha,
3113 pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"));
3114 break;
3115 case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID:
3116 PM8001_MSG_DBG(pm8001_ha,
3117 pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"));
3118 break;
3119 default:
3120 PM8001_MSG_DBG(pm8001_ha,
3121 pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n"));
3122 break;
3123 }
3124 complete(pm8001_dev->dcompletion);
3125 ccb->task = NULL;
3126 ccb->ccb_tag = 0xFFFFFFFF;
3127 pm8001_ccb_free(pm8001_ha, htag);
3128 return 0;
3129}
3130
3131static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3132{
3133 u32 status;
3134 u32 device_id;
3135 struct dev_reg_resp *registerRespPayload =
3136 (struct dev_reg_resp *)(piomb + 4);
3137
3138 status = le32_to_cpu(registerRespPayload->status);
3139 device_id = le32_to_cpu(registerRespPayload->device_id);
3140 if (status != 0)
3141 PM8001_MSG_DBG(pm8001_ha,
3142 pm8001_printk(" deregister device failed ,status = %x"
3143 ", device_id = %x\n", status, device_id));
3144 return 0;
3145}
3146
3147static int
3148mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3149{
3150 u32 status;
3151 struct fw_control_ex fw_control_context;
3152 struct fw_flash_Update_resp *ppayload =
3153 (struct fw_flash_Update_resp *)(piomb + 4);
3154 u32 tag = le32_to_cpu(ppayload->tag);
3155 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
3156 status = le32_to_cpu(ppayload->status);
3157 memcpy(&fw_control_context,
3158 ccb->fw_control_context,
3159 sizeof(fw_control_context));
3160 switch (status) {
3161 case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
3162 PM8001_MSG_DBG(pm8001_ha,
3163 pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"));
3164 break;
3165 case FLASH_UPDATE_IN_PROGRESS:
3166 PM8001_MSG_DBG(pm8001_ha,
3167 pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n"));
3168 break;
3169 case FLASH_UPDATE_HDR_ERR:
3170 PM8001_MSG_DBG(pm8001_ha,
3171 pm8001_printk(": FLASH_UPDATE_HDR_ERR\n"));
3172 break;
3173 case FLASH_UPDATE_OFFSET_ERR:
3174 PM8001_MSG_DBG(pm8001_ha,
3175 pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n"));
3176 break;
3177 case FLASH_UPDATE_CRC_ERR:
3178 PM8001_MSG_DBG(pm8001_ha,
3179 pm8001_printk(": FLASH_UPDATE_CRC_ERR\n"));
3180 break;
3181 case FLASH_UPDATE_LENGTH_ERR:
3182 PM8001_MSG_DBG(pm8001_ha,
3183 pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n"));
3184 break;
3185 case FLASH_UPDATE_HW_ERR:
3186 PM8001_MSG_DBG(pm8001_ha,
3187 pm8001_printk(": FLASH_UPDATE_HW_ERR\n"));
3188 break;
3189 case FLASH_UPDATE_DNLD_NOT_SUPPORTED:
3190 PM8001_MSG_DBG(pm8001_ha,
3191 pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"));
3192 break;
3193 case FLASH_UPDATE_DISABLED:
3194 PM8001_MSG_DBG(pm8001_ha,
3195 pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
3196 break;
3197 default:
3198 PM8001_MSG_DBG(pm8001_ha,
3199 pm8001_printk("No matched status = %d\n", status));
3200 break;
3201 }
3202 ccb->fw_control_context->fw_control->retcode = status;
3203 pci_free_consistent(pm8001_ha->pdev,
3204 fw_control_context.len,
3205 fw_control_context.virtAddr,
3206 fw_control_context.phys_addr);
3207 complete(pm8001_ha->nvmd_completion);
3208 ccb->task = NULL;
3209 ccb->ccb_tag = 0xFFFFFFFF;
3210 pm8001_ccb_free(pm8001_ha, tag);
3211 return 0;
3212}
3213
3214static int
3215mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
3216{
3217 u32 status;
3218 int i;
3219 struct general_event_resp *pPayload =
3220 (struct general_event_resp *)(piomb + 4);
3221 status = le32_to_cpu(pPayload->status);
3222 PM8001_MSG_DBG(pm8001_ha,
3223 pm8001_printk(" status = 0x%x\n", status));
3224 for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
3225 PM8001_MSG_DBG(pm8001_ha,
3226 pm8001_printk("inb_IOMB_payload[0x%x] 0x%x, \n", i,
3227 pPayload->inb_IOMB_payload[i]));
3228 return 0;
3229}
3230
3231static int
3232mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3233{
3234 struct sas_task *t;
3235 struct pm8001_ccb_info *ccb;
3236 unsigned long flags;
3237 u32 status ;
3238 u32 tag, scp;
3239 struct task_status_struct *ts;
3240
3241 struct task_abort_resp *pPayload =
3242 (struct task_abort_resp *)(piomb + 4);
3243 ccb = &pm8001_ha->ccb_info[pPayload->tag];
3244 t = ccb->task;
3245
3246
3247 status = le32_to_cpu(pPayload->status);
3248 tag = le32_to_cpu(pPayload->tag);
3249 scp = le32_to_cpu(pPayload->scp);
3250 PM8001_IO_DBG(pm8001_ha,
3251 pm8001_printk(" status = 0x%x\n", status));
3252 if (t == NULL)
3253 return -1;
3254 ts = &t->task_status;
3255 if (status != 0)
3256 PM8001_FAIL_DBG(pm8001_ha,
3257 pm8001_printk("task abort failed status 0x%x ,"
3258 "tag = 0x%x, scp= 0x%x\n", status, tag, scp));
3259 switch (status) {
3260 case IO_SUCCESS:
3261 PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
3262 ts->resp = SAS_TASK_COMPLETE;
3263 ts->stat = SAM_GOOD;
3264 break;
3265 case IO_NOT_VALID:
3266 PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
3267 ts->resp = TMF_RESP_FUNC_FAILED;
3268 break;
3269 }
3270 spin_lock_irqsave(&t->task_state_lock, flags);
3271 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
3272 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
3273 t->task_state_flags |= SAS_TASK_STATE_DONE;
3274 spin_unlock_irqrestore(&t->task_state_lock, flags);
3275 pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag);
3276 mb();
3277 t->task_done(t);
3278 return 0;
3279}
3280
3281/**
3282 * mpi_hw_event -The hw event has come.
3283 * @pm8001_ha: our hba card information
3284 * @piomb: IO message buffer
3285 */
3286static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
3287{
3288 unsigned long flags;
3289 struct hw_event_resp *pPayload =
3290 (struct hw_event_resp *)(piomb + 4);
3291 u32 lr_evt_status_phyid_portid =
3292 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
3293 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
3294 u8 phy_id =
3295 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
3296 u16 eventType =
3297 (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8);
3298 u8 status =
3299 (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24);
3300 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
3301 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3302 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
3303 PM8001_MSG_DBG(pm8001_ha,
3304 pm8001_printk("outbound queue HW event & event type : "));
3305 switch (eventType) {
3306 case HW_EVENT_PHY_START_STATUS:
3307 PM8001_MSG_DBG(pm8001_ha,
3308 pm8001_printk("HW_EVENT_PHY_START_STATUS"
3309 " status = %x\n", status));
3310 if (status == 0) {
3311 phy->phy_state = 1;
3312 if (pm8001_ha->flags == PM8001F_RUN_TIME)
3313 complete(phy->enable_completion);
3314 }
3315 break;
3316 case HW_EVENT_SAS_PHY_UP:
3317 PM8001_MSG_DBG(pm8001_ha,
3318 pm8001_printk("HW_EVENT_PHY_START_STATUS \n"));
3319 hw_event_sas_phy_up(pm8001_ha, piomb);
3320 break;
3321 case HW_EVENT_SATA_PHY_UP:
3322 PM8001_MSG_DBG(pm8001_ha,
3323 pm8001_printk("HW_EVENT_SATA_PHY_UP \n"));
3324 hw_event_sata_phy_up(pm8001_ha, piomb);
3325 break;
3326 case HW_EVENT_PHY_STOP_STATUS:
3327 PM8001_MSG_DBG(pm8001_ha,
3328 pm8001_printk("HW_EVENT_PHY_STOP_STATUS "
3329 "status = %x\n", status));
3330 if (status == 0)
3331 phy->phy_state = 0;
3332 break;
3333 case HW_EVENT_SATA_SPINUP_HOLD:
3334 PM8001_MSG_DBG(pm8001_ha,
3335 pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD \n"));
3336 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
3337 break;
3338 case HW_EVENT_PHY_DOWN:
3339 PM8001_MSG_DBG(pm8001_ha,
3340 pm8001_printk("HW_EVENT_PHY_DOWN \n"));
3341 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
3342 phy->phy_attached = 0;
3343 phy->phy_state = 0;
3344 hw_event_phy_down(pm8001_ha, piomb);
3345 break;
3346 case HW_EVENT_PORT_INVALID:
3347 PM8001_MSG_DBG(pm8001_ha,
3348 pm8001_printk("HW_EVENT_PORT_INVALID\n"));
3349 sas_phy_disconnected(sas_phy);
3350 phy->phy_attached = 0;
3351 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3352 break;
3353 /* the broadcast change primitive received, tell the LIBSAS this event
3354 to revalidate the sas domain*/
3355 case HW_EVENT_BROADCAST_CHANGE:
3356 PM8001_MSG_DBG(pm8001_ha,
3357 pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
3358 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
3359 port_id, phy_id, 1, 0);
3360 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
3361 sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
3362 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
3363 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
3364 break;
3365 case HW_EVENT_PHY_ERROR:
3366 PM8001_MSG_DBG(pm8001_ha,
3367 pm8001_printk("HW_EVENT_PHY_ERROR\n"));
3368 sas_phy_disconnected(&phy->sas_phy);
3369 phy->phy_attached = 0;
3370 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
3371 break;
3372 case HW_EVENT_BROADCAST_EXP:
3373 PM8001_MSG_DBG(pm8001_ha,
3374 pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
3375 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
3376 sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
3377 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
3378 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
3379 break;
3380 case HW_EVENT_LINK_ERR_INVALID_DWORD:
3381 PM8001_MSG_DBG(pm8001_ha,
3382 pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
3383 pm8001_hw_event_ack_req(pm8001_ha, 0,
3384 HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
3385 sas_phy_disconnected(sas_phy);
3386 phy->phy_attached = 0;
3387 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3388 break;
3389 case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
3390 PM8001_MSG_DBG(pm8001_ha,
3391 pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
3392 pm8001_hw_event_ack_req(pm8001_ha, 0,
3393 HW_EVENT_LINK_ERR_DISPARITY_ERROR,
3394 port_id, phy_id, 0, 0);
3395 sas_phy_disconnected(sas_phy);
3396 phy->phy_attached = 0;
3397 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3398 break;
3399 case HW_EVENT_LINK_ERR_CODE_VIOLATION:
3400 PM8001_MSG_DBG(pm8001_ha,
3401 pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
3402 pm8001_hw_event_ack_req(pm8001_ha, 0,
3403 HW_EVENT_LINK_ERR_CODE_VIOLATION,
3404 port_id, phy_id, 0, 0);
3405 sas_phy_disconnected(sas_phy);
3406 phy->phy_attached = 0;
3407 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3408 break;
3409 case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
3410 PM8001_MSG_DBG(pm8001_ha,
3411 pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
3412 pm8001_hw_event_ack_req(pm8001_ha, 0,
3413 HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
3414 port_id, phy_id, 0, 0);
3415 sas_phy_disconnected(sas_phy);
3416 phy->phy_attached = 0;
3417 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3418 break;
3419 case HW_EVENT_MALFUNCTION:
3420 PM8001_MSG_DBG(pm8001_ha,
3421 pm8001_printk("HW_EVENT_MALFUNCTION\n"));
3422 break;
3423 case HW_EVENT_BROADCAST_SES:
3424 PM8001_MSG_DBG(pm8001_ha,
3425 pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
3426 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
3427 sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
3428 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
3429 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
3430 break;
3431 case HW_EVENT_INBOUND_CRC_ERROR:
3432 PM8001_MSG_DBG(pm8001_ha,
3433 pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
3434 pm8001_hw_event_ack_req(pm8001_ha, 0,
3435 HW_EVENT_INBOUND_CRC_ERROR,
3436 port_id, phy_id, 0, 0);
3437 break;
3438 case HW_EVENT_HARD_RESET_RECEIVED:
3439 PM8001_MSG_DBG(pm8001_ha,
3440 pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
3441 sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
3442 break;
3443 case HW_EVENT_ID_FRAME_TIMEOUT:
3444 PM8001_MSG_DBG(pm8001_ha,
3445 pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
3446 sas_phy_disconnected(sas_phy);
3447 phy->phy_attached = 0;
3448 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3449 break;
3450 case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
3451 PM8001_MSG_DBG(pm8001_ha,
3452 pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED \n"));
3453 pm8001_hw_event_ack_req(pm8001_ha, 0,
3454 HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
3455 port_id, phy_id, 0, 0);
3456 sas_phy_disconnected(sas_phy);
3457 phy->phy_attached = 0;
3458 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3459 break;
3460 case HW_EVENT_PORT_RESET_TIMER_TMO:
3461 PM8001_MSG_DBG(pm8001_ha,
3462 pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO \n"));
3463 sas_phy_disconnected(sas_phy);
3464 phy->phy_attached = 0;
3465 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3466 break;
3467 case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
3468 PM8001_MSG_DBG(pm8001_ha,
3469 pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO \n"));
3470 sas_phy_disconnected(sas_phy);
3471 phy->phy_attached = 0;
3472 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3473 break;
3474 case HW_EVENT_PORT_RECOVER:
3475 PM8001_MSG_DBG(pm8001_ha,
3476 pm8001_printk("HW_EVENT_PORT_RECOVER \n"));
3477 break;
3478 case HW_EVENT_PORT_RESET_COMPLETE:
3479 PM8001_MSG_DBG(pm8001_ha,
3480 pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE \n"));
3481 break;
3482 case EVENT_BROADCAST_ASYNCH_EVENT:
3483 PM8001_MSG_DBG(pm8001_ha,
3484 pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
3485 break;
3486 default:
3487 PM8001_MSG_DBG(pm8001_ha,
3488 pm8001_printk("Unknown event type = %x\n", eventType));
3489 break;
3490 }
3491 return 0;
3492}
3493
3494/**
3495 * process_one_iomb - process one outbound Queue memory block
3496 * @pm8001_ha: our hba card information
3497 * @piomb: IO message buffer
3498 */
3499static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3500{
3501 u32 pHeader = (u32)*(u32 *)piomb;
3502 u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
3503
3504 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
3505
3506 switch (opc) {
3507 case OPC_OUB_ECHO:
3508 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO \n"));
3509 break;
3510 case OPC_OUB_HW_EVENT:
3511 PM8001_MSG_DBG(pm8001_ha,
3512 pm8001_printk("OPC_OUB_HW_EVENT \n"));
3513 mpi_hw_event(pm8001_ha, piomb);
3514 break;
3515 case OPC_OUB_SSP_COMP:
3516 PM8001_MSG_DBG(pm8001_ha,
3517 pm8001_printk("OPC_OUB_SSP_COMP \n"));
3518 mpi_ssp_completion(pm8001_ha, piomb);
3519 break;
3520 case OPC_OUB_SMP_COMP:
3521 PM8001_MSG_DBG(pm8001_ha,
3522 pm8001_printk("OPC_OUB_SMP_COMP \n"));
3523 mpi_smp_completion(pm8001_ha, piomb);
3524 break;
3525 case OPC_OUB_LOCAL_PHY_CNTRL:
3526 PM8001_MSG_DBG(pm8001_ha,
3527 pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
3528 mpi_local_phy_ctl(pm8001_ha, piomb);
3529 break;
3530 case OPC_OUB_DEV_REGIST:
3531 PM8001_MSG_DBG(pm8001_ha,
3532 pm8001_printk("OPC_OUB_DEV_REGIST \n"));
3533 mpi_reg_resp(pm8001_ha, piomb);
3534 break;
3535 case OPC_OUB_DEREG_DEV:
3536 PM8001_MSG_DBG(pm8001_ha,
3537 pm8001_printk("unresgister the deviece \n"));
3538 mpi_dereg_resp(pm8001_ha, piomb);
3539 break;
3540 case OPC_OUB_GET_DEV_HANDLE:
3541 PM8001_MSG_DBG(pm8001_ha,
3542 pm8001_printk("OPC_OUB_GET_DEV_HANDLE \n"));
3543 break;
3544 case OPC_OUB_SATA_COMP:
3545 PM8001_MSG_DBG(pm8001_ha,
3546 pm8001_printk("OPC_OUB_SATA_COMP \n"));
3547 mpi_sata_completion(pm8001_ha, piomb);
3548 break;
3549 case OPC_OUB_SATA_EVENT:
3550 PM8001_MSG_DBG(pm8001_ha,
3551 pm8001_printk("OPC_OUB_SATA_EVENT \n"));
3552 mpi_sata_event(pm8001_ha, piomb);
3553 break;
3554 case OPC_OUB_SSP_EVENT:
3555 PM8001_MSG_DBG(pm8001_ha,
3556 pm8001_printk("OPC_OUB_SSP_EVENT\n"));
3557 mpi_ssp_event(pm8001_ha, piomb);
3558 break;
3559 case OPC_OUB_DEV_HANDLE_ARRIV:
3560 PM8001_MSG_DBG(pm8001_ha,
3561 pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
3562 /*This is for target*/
3563 break;
3564 case OPC_OUB_SSP_RECV_EVENT:
3565 PM8001_MSG_DBG(pm8001_ha,
3566 pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
3567 /*This is for target*/
3568 break;
3569 case OPC_OUB_DEV_INFO:
3570 PM8001_MSG_DBG(pm8001_ha,
3571 pm8001_printk("OPC_OUB_DEV_INFO\n"));
3572 break;
3573 case OPC_OUB_FW_FLASH_UPDATE:
3574 PM8001_MSG_DBG(pm8001_ha,
3575 pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
3576 mpi_fw_flash_update_resp(pm8001_ha, piomb);
3577 break;
3578 case OPC_OUB_GPIO_RESPONSE:
3579 PM8001_MSG_DBG(pm8001_ha,
3580 pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
3581 break;
3582 case OPC_OUB_GPIO_EVENT:
3583 PM8001_MSG_DBG(pm8001_ha,
3584 pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
3585 break;
3586 case OPC_OUB_GENERAL_EVENT:
3587 PM8001_MSG_DBG(pm8001_ha,
3588 pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
3589 mpi_general_event(pm8001_ha, piomb);
3590 break;
3591 case OPC_OUB_SSP_ABORT_RSP:
3592 PM8001_MSG_DBG(pm8001_ha,
3593 pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
3594 mpi_task_abort_resp(pm8001_ha, piomb);
3595 break;
3596 case OPC_OUB_SATA_ABORT_RSP:
3597 PM8001_MSG_DBG(pm8001_ha,
3598 pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
3599 mpi_task_abort_resp(pm8001_ha, piomb);
3600 break;
3601 case OPC_OUB_SAS_DIAG_MODE_START_END:
3602 PM8001_MSG_DBG(pm8001_ha,
3603 pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
3604 break;
3605 case OPC_OUB_SAS_DIAG_EXECUTE:
3606 PM8001_MSG_DBG(pm8001_ha,
3607 pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
3608 break;
3609 case OPC_OUB_GET_TIME_STAMP:
3610 PM8001_MSG_DBG(pm8001_ha,
3611 pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
3612 break;
3613 case OPC_OUB_SAS_HW_EVENT_ACK:
3614 PM8001_MSG_DBG(pm8001_ha,
3615 pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
3616 break;
3617 case OPC_OUB_PORT_CONTROL:
3618 PM8001_MSG_DBG(pm8001_ha,
3619 pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
3620 break;
3621 case OPC_OUB_SMP_ABORT_RSP:
3622 PM8001_MSG_DBG(pm8001_ha,
3623 pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
3624 mpi_task_abort_resp(pm8001_ha, piomb);
3625 break;
3626 case OPC_OUB_GET_NVMD_DATA:
3627 PM8001_MSG_DBG(pm8001_ha,
3628 pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
3629 mpi_get_nvmd_resp(pm8001_ha, piomb);
3630 break;
3631 case OPC_OUB_SET_NVMD_DATA:
3632 PM8001_MSG_DBG(pm8001_ha,
3633 pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
3634 mpi_set_nvmd_resp(pm8001_ha, piomb);
3635 break;
3636 case OPC_OUB_DEVICE_HANDLE_REMOVAL:
3637 PM8001_MSG_DBG(pm8001_ha,
3638 pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
3639 break;
3640 case OPC_OUB_SET_DEVICE_STATE:
3641 PM8001_MSG_DBG(pm8001_ha,
3642 pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
3643 mpi_set_dev_state_resp(pm8001_ha, piomb);
3644 break;
3645 case OPC_OUB_GET_DEVICE_STATE:
3646 PM8001_MSG_DBG(pm8001_ha,
3647 pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
3648 break;
3649 case OPC_OUB_SET_DEV_INFO:
3650 PM8001_MSG_DBG(pm8001_ha,
3651 pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
3652 break;
3653 case OPC_OUB_SAS_RE_INITIALIZE:
3654 PM8001_MSG_DBG(pm8001_ha,
3655 pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
3656 break;
3657 default:
3658 PM8001_MSG_DBG(pm8001_ha,
3659 pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
3660 opc));
3661 break;
3662 }
3663}
3664
3665static int process_oq(struct pm8001_hba_info *pm8001_ha)
3666{
3667 struct outbound_queue_table *circularQ;
3668 void *pMsg1 = NULL;
3669 u8 bc = 0;
3670 u32 ret = MPI_IO_STATUS_FAIL;
3671
3672 circularQ = &pm8001_ha->outbnd_q_tbl[0];
3673 do {
3674 ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
3675 if (MPI_IO_STATUS_SUCCESS == ret) {
3676 /* process the outbound message */
3677 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
3678 /* free the message from the outbound circular buffer */
3679 mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
3680 }
3681 if (MPI_IO_STATUS_BUSY == ret) {
3682 u32 producer_idx;
3683 /* Update the producer index from SPC */
3684 producer_idx = pm8001_read_32(circularQ->pi_virt);
3685 circularQ->producer_index = cpu_to_le32(producer_idx);
3686 if (circularQ->producer_index ==
3687 circularQ->consumer_idx)
3688 /* OQ is empty */
3689 break;
3690 }
3691 } while (1);
3692 return ret;
3693}
3694
3695/* PCI_DMA_... to our direction translation. */
3696static const u8 data_dir_flags[] = {
3697 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
3698 [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
3699 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
3700 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
3701};
3702static void
3703pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
3704{
3705 int i;
3706 struct scatterlist *sg;
3707 struct pm8001_prd *buf_prd = prd;
3708
3709 for_each_sg(scatter, sg, nr, i) {
3710 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
3711 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
3712 buf_prd->im_len.e = 0;
3713 buf_prd++;
3714 }
3715}
3716
3717static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd)
3718{
3719 psmp_cmd->tag = cpu_to_le32(hTag);
3720 psmp_cmd->device_id = cpu_to_le32(deviceID);
3721 psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
3722}
3723
3724/**
3725 * pm8001_chip_smp_req - send a SMP task to FW
3726 * @pm8001_ha: our hba card information.
3727 * @ccb: the ccb information this request used.
3728 */
3729static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
3730 struct pm8001_ccb_info *ccb)
3731{
3732 int elem, rc;
3733 struct sas_task *task = ccb->task;
3734 struct domain_device *dev = task->dev;
3735 struct pm8001_device *pm8001_dev = dev->lldd_dev;
3736 struct scatterlist *sg_req, *sg_resp;
3737 u32 req_len, resp_len;
3738 struct smp_req smp_cmd;
3739 u32 opc;
3740 struct inbound_queue_table *circularQ;
3741
3742 memset(&smp_cmd, 0, sizeof(smp_cmd));
3743 /*
3744 * DMA-map SMP request, response buffers
3745 */
3746 sg_req = &task->smp_task.smp_req;
3747 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
3748 if (!elem)
3749 return -ENOMEM;
3750 req_len = sg_dma_len(sg_req);
3751
3752 sg_resp = &task->smp_task.smp_resp;
3753 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
3754 if (!elem) {
3755 rc = -ENOMEM;
3756 goto err_out;
3757 }
3758 resp_len = sg_dma_len(sg_resp);
3759 /* must be in dwords */
3760 if ((req_len & 0x3) || (resp_len & 0x3)) {
3761 rc = -EINVAL;
3762 goto err_out_2;
3763 }
3764
3765 opc = OPC_INB_SMP_REQUEST;
3766 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3767 smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
3768 smp_cmd.long_smp_req.long_req_addr =
3769 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
3770 smp_cmd.long_smp_req.long_req_size =
3771 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
3772 smp_cmd.long_smp_req.long_resp_addr =
3773 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
3774 smp_cmd.long_smp_req.long_resp_size =
3775 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
3776 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
3777 mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd);
3778 return 0;
3779
3780err_out_2:
3781 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
3782 PCI_DMA_FROMDEVICE);
3783err_out:
3784 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
3785 PCI_DMA_TODEVICE);
3786 return rc;
3787}
3788
3789/**
3790 * pm8001_chip_ssp_io_req - send a SSP task to FW
3791 * @pm8001_ha: our hba card information.
3792 * @ccb: the ccb information this request used.
3793 */
3794static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
3795 struct pm8001_ccb_info *ccb)
3796{
3797 struct sas_task *task = ccb->task;
3798 struct domain_device *dev = task->dev;
3799 struct pm8001_device *pm8001_dev = dev->lldd_dev;
3800 struct ssp_ini_io_start_req ssp_cmd;
3801 u32 tag = ccb->ccb_tag;
3802 int ret;
3803 __le64 phys_addr;
3804 struct inbound_queue_table *circularQ;
3805 u32 opc = OPC_INB_SSPINIIOSTART;
3806 memset(&ssp_cmd, 0, sizeof(ssp_cmd));
3807 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
3808 ssp_cmd.dir_m_tlr =
3809 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
3810 SAS 1.1 compatible TLR*/
3811 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3812 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
3813 ssp_cmd.tag = cpu_to_le32(tag);
3814 if (task->ssp_task.enable_first_burst)
3815 ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
3816 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
3817 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
3818 memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
3819 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3820
3821 /* fill in PRD (scatter/gather) table, if any */
3822 if (task->num_scatter > 1) {
3823 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
3824 phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
3825 offsetof(struct pm8001_ccb_info, buf_prd[0]));
3826 ssp_cmd.addr_low = lower_32_bits(phys_addr);
3827 ssp_cmd.addr_high = upper_32_bits(phys_addr);
3828 ssp_cmd.esgl = cpu_to_le32(1<<31);
3829 } else if (task->num_scatter == 1) {
3830 __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
3831 ssp_cmd.addr_low = lower_32_bits(dma_addr);
3832 ssp_cmd.addr_high = upper_32_bits(dma_addr);
3833 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
3834 ssp_cmd.esgl = 0;
3835 } else if (task->num_scatter == 0) {
3836 ssp_cmd.addr_low = 0;
3837 ssp_cmd.addr_high = 0;
3838 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
3839 ssp_cmd.esgl = 0;
3840 }
3841 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
3842 return ret;
3843}
3844
3845static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3846 struct pm8001_ccb_info *ccb)
3847{
3848 struct sas_task *task = ccb->task;
3849 struct domain_device *dev = task->dev;
3850 struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
3851 u32 tag = ccb->ccb_tag;
3852 int ret;
3853 struct sata_start_req sata_cmd;
3854 u32 hdr_tag, ncg_tag = 0;
3855 __le64 phys_addr;
3856 u32 ATAP = 0x0;
3857 u32 dir;
3858 struct inbound_queue_table *circularQ;
3859 u32 opc = OPC_INB_SATA_HOST_OPSTART;
3860 memset(&sata_cmd, 0, sizeof(sata_cmd));
3861 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3862 if (task->data_dir == PCI_DMA_NONE) {
3863 ATAP = 0x04; /* no data*/
3864 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data \n"));
3865 } else if (likely(!task->ata_task.device_control_reg_update)) {
3866 if (task->ata_task.dma_xfer) {
3867 ATAP = 0x06; /* DMA */
3868 PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA \n"));
3869 } else {
3870 ATAP = 0x05; /* PIO*/
3871 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO \n"));
3872 }
3873 if (task->ata_task.use_ncq &&
3874 dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
3875 ATAP = 0x07; /* FPDMA */
3876 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA \n"));
3877 }
3878 }
3879 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
3880 ncg_tag = hdr_tag;
3881 dir = data_dir_flags[task->data_dir] << 8;
3882 sata_cmd.tag = cpu_to_le32(tag);
3883 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
3884 sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3885 sata_cmd.ncqtag_atap_dir_m =
3886 cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir);
3887 sata_cmd.sata_fis = task->ata_task.fis;
3888 if (likely(!task->ata_task.device_control_reg_update))
3889 sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
3890 sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
3891 /* fill in PRD (scatter/gather) table, if any */
3892 if (task->num_scatter > 1) {
3893 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
3894 phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
3895 offsetof(struct pm8001_ccb_info, buf_prd[0]));
3896 sata_cmd.addr_low = lower_32_bits(phys_addr);
3897 sata_cmd.addr_high = upper_32_bits(phys_addr);
3898 sata_cmd.esgl = cpu_to_le32(1 << 31);
3899 } else if (task->num_scatter == 1) {
3900 __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
3901 sata_cmd.addr_low = lower_32_bits(dma_addr);
3902 sata_cmd.addr_high = upper_32_bits(dma_addr);
3903 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
3904 sata_cmd.esgl = 0;
3905 } else if (task->num_scatter == 0) {
3906 sata_cmd.addr_low = 0;
3907 sata_cmd.addr_high = 0;
3908 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
3909 sata_cmd.esgl = 0;
3910 }
3911 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
3912 return ret;
3913}
3914
3915/**
3916 * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND
3917 * @pm8001_ha: our hba card information.
3918 * @num: the inbound queue number
3919 * @phy_id: the phy id which we wanted to start up.
3920 */
3921static int
3922pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
3923{
3924 struct phy_start_req payload;
3925 struct inbound_queue_table *circularQ;
3926 int ret;
3927 u32 tag = 0x01;
3928 u32 opcode = OPC_INB_PHYSTART;
3929 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3930 memset(&payload, 0, sizeof(payload));
3931 payload.tag = cpu_to_le32(tag);
3932 /*
3933 ** [0:7] PHY Identifier
3934 ** [8:11] link rate 1.5G, 3G, 6G
3935 ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both
3936 ** [14] 0b disable spin up hold; 1b enable spin up hold
3937 */
3938 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
3939 LINKMODE_AUTO | LINKRATE_15 |
3940 LINKRATE_30 | LINKRATE_60 | phy_id);
3941 payload.sas_identify.dev_type = SAS_END_DEV;
3942 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
3943 memcpy(payload.sas_identify.sas_addr,
3944 pm8001_ha->sas_addr, SAS_ADDR_SIZE);
3945 payload.sas_identify.phy_id = phy_id;
3946 ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
3947 return ret;
3948}
3949
3950/**
3951 * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
3952 * @pm8001_ha: our hba card information.
3953 * @num: the inbound queue number
3954 * @phy_id: the phy id which we wanted to start up.
3955 */
3956static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
3957 u8 phy_id)
3958{
3959 struct phy_stop_req payload;
3960 struct inbound_queue_table *circularQ;
3961 int ret;
3962 u32 tag = 0x01;
3963 u32 opcode = OPC_INB_PHYSTOP;
3964 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3965 memset(&payload, 0, sizeof(payload));
3966 payload.tag = cpu_to_le32(tag);
3967 payload.phy_id = cpu_to_le32(phy_id);
3968 ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
3969 return ret;
3970}
3971
3972/**
3973 * see comments on mpi_reg_resp.
3974 */
3975static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
3976 struct pm8001_device *pm8001_dev, u32 flag)
3977{
3978 struct reg_dev_req payload;
3979 u32 opc;
3980 u32 stp_sspsmp_sata = 0x4;
3981 struct inbound_queue_table *circularQ;
3982 u32 linkrate, phy_id;
3983 int rc, tag = 0xdeadbeef;
3984 struct pm8001_ccb_info *ccb;
3985 u8 retryFlag = 0x1;
3986 u16 firstBurstSize = 0;
3987 u16 ITNT = 2000;
3988 struct domain_device *dev = pm8001_dev->sas_device;
3989 struct domain_device *parent_dev = dev->parent;
3990 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3991
3992 memset(&payload, 0, sizeof(payload));
3993 rc = pm8001_tag_alloc(pm8001_ha, &tag);
3994 if (rc)
3995 return rc;
3996 ccb = &pm8001_ha->ccb_info[tag];
3997 ccb->device = pm8001_dev;
3998 ccb->ccb_tag = tag;
3999 payload.tag = cpu_to_le32(tag);
4000 if (flag == 1)
4001 stp_sspsmp_sata = 0x02; /*direct attached sata */
4002 else {
4003 if (pm8001_dev->dev_type == SATA_DEV)
4004 stp_sspsmp_sata = 0x00; /* stp*/
4005 else if (pm8001_dev->dev_type == SAS_END_DEV ||
4006 pm8001_dev->dev_type == EDGE_DEV ||
4007 pm8001_dev->dev_type == FANOUT_DEV)
4008 stp_sspsmp_sata = 0x01; /*ssp or smp*/
4009 }
4010 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
4011 phy_id = parent_dev->ex_dev.ex_phy->phy_id;
4012 else
4013 phy_id = pm8001_dev->attached_phy;
4014 opc = OPC_INB_REG_DEV;
4015 linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
4016 pm8001_dev->sas_device->linkrate : dev->port->linkrate;
4017 payload.phyid_portid =
4018 cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) |
4019 ((phy_id & 0x0F) << 4));
4020 payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) |
4021 ((linkrate & 0x0F) * 0x1000000) |
4022 ((stp_sspsmp_sata & 0x03) * 0x10000000));
4023 payload.firstburstsize_ITNexustimeout =
4024 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
4025 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
4026 SAS_ADDR_SIZE);
4027 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
4028 return rc;
4029}
4030
4031/**
4032 * see comments on mpi_reg_resp.
4033 */
4034static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
4035 u32 device_id)
4036{
4037 struct dereg_dev_req payload;
4038 u32 opc = OPC_INB_DEREG_DEV_HANDLE;
4039 int ret;
4040 struct inbound_queue_table *circularQ;
4041
4042 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4043 memset(&payload, 0, sizeof(payload));
4044 payload.tag = 1;
4045 payload.device_id = cpu_to_le32(device_id);
4046 PM8001_MSG_DBG(pm8001_ha,
4047 pm8001_printk("unregister device device_id = %d\n", device_id));
4048 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
4049 return ret;
4050}
4051
4052/**
4053 * pm8001_chip_phy_ctl_req - support the local phy operation
4054 * @pm8001_ha: our hba card information.
4055 * @num: the inbound queue number
4056 * @phy_id: the phy id which we wanted to operate
4057 * @phy_op:
4058 */
4059static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4060 u32 phyId, u32 phy_op)
4061{
4062 struct local_phy_ctl_req payload;
4063 struct inbound_queue_table *circularQ;
4064 int ret;
4065 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4066 memset(&payload, 0, sizeof(payload));
4067 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4068 payload.tag = 1;
4069 payload.phyop_phyid =
4070 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
4071 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
4072 return ret;
4073}
4074
4075static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
4076{
4077 u32 value;
4078#ifdef PM8001_USE_MSIX
4079 return 1;
4080#endif
4081 value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
4082 if (value)
4083 return 1;
4084 return 0;
4085
4086}
4087
4088/**
4089 * pm8001_chip_isr - PM8001 isr handler.
4090 * @pm8001_ha: our hba card information.
4091 * @irq: irq number.
4092 * @stat: stat.
4093 */
4094static irqreturn_t
4095pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
4096{
4097 unsigned long flags;
4098 spin_lock_irqsave(&pm8001_ha->lock, flags);
4099 pm8001_chip_interrupt_disable(pm8001_ha);
4100 process_oq(pm8001_ha);
4101 pm8001_chip_interrupt_enable(pm8001_ha);
4102 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
4103 return IRQ_HANDLED;
4104}
4105
4106static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
4107 u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag)
4108{
4109 struct task_abort_req task_abort;
4110 struct inbound_queue_table *circularQ;
4111 int ret;
4112 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4113 memset(&task_abort, 0, sizeof(task_abort));
4114 if (ABORT_SINGLE == (flag & ABORT_MASK)) {
4115 task_abort.abort_all = 0;
4116 task_abort.device_id = cpu_to_le32(dev_id);
4117 task_abort.tag_to_abort = cpu_to_le32(task_tag);
4118 task_abort.tag = cpu_to_le32(cmd_tag);
4119 } else if (ABORT_ALL == (flag & ABORT_MASK)) {
4120 task_abort.abort_all = cpu_to_le32(1);
4121 task_abort.device_id = cpu_to_le32(dev_id);
4122 task_abort.tag = cpu_to_le32(cmd_tag);
4123 }
4124 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
4125 return ret;
4126}
4127
4128/**
4129 * pm8001_chip_abort_task - SAS abort task when error or exception happened.
4130 * @task: the task we wanted to aborted.
4131 * @flag: the abort flag.
4132 */
4133static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
4134 struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
4135{
4136 u32 opc, device_id;
4137 int rc = TMF_RESP_FUNC_FAILED;
4138 PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag"
4139 " = %x", cmd_tag, task_tag));
4140 if (pm8001_dev->dev_type == SAS_END_DEV)
4141 opc = OPC_INB_SSP_ABORT;
4142 else if (pm8001_dev->dev_type == SATA_DEV)
4143 opc = OPC_INB_SATA_ABORT;
4144 else
4145 opc = OPC_INB_SMP_ABORT;/* SMP */
4146 device_id = pm8001_dev->device_id;
4147 rc = send_task_abort(pm8001_ha, opc, device_id, flag,
4148 task_tag, cmd_tag);
4149 if (rc != TMF_RESP_FUNC_COMPLETE)
4150 PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc));
4151 return rc;
4152}
4153
4154/**
4155 * pm8001_chip_ssp_tm_req - built the task managment command.
4156 * @pm8001_ha: our hba card information.
4157 * @ccb: the ccb information.
4158 * @tmf: task management function.
4159 */
4160static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
4161 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
4162{
4163 struct sas_task *task = ccb->task;
4164 struct domain_device *dev = task->dev;
4165 struct pm8001_device *pm8001_dev = dev->lldd_dev;
4166 u32 opc = OPC_INB_SSPINITMSTART;
4167 struct inbound_queue_table *circularQ;
4168 struct ssp_ini_tm_start_req sspTMCmd;
4169 int ret;
4170
4171 memset(&sspTMCmd, 0, sizeof(sspTMCmd));
4172 sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
4173 sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
4174 sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
4175 memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
4176 sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
4177 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4178 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
4179 return ret;
4180}
4181
4182static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4183 void *payload)
4184{
4185 u32 opc = OPC_INB_GET_NVMD_DATA;
4186 u32 nvmd_type;
4187 int rc;
4188 u32 tag;
4189 struct pm8001_ccb_info *ccb;
4190 struct inbound_queue_table *circularQ;
4191 struct get_nvm_data_req nvmd_req;
4192 struct fw_control_ex *fw_control_context;
4193 struct pm8001_ioctl_payload *ioctl_payload = payload;
4194
4195 nvmd_type = ioctl_payload->minor_function;
4196 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4197 fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
4198 fw_control_context->len = ioctl_payload->length;
4199 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4200 memset(&nvmd_req, 0, sizeof(nvmd_req));
4201 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4202 if (rc)
4203 return rc;
4204 ccb = &pm8001_ha->ccb_info[tag];
4205 ccb->ccb_tag = tag;
4206 ccb->fw_control_context = fw_control_context;
4207 nvmd_req.tag = cpu_to_le32(tag);
4208
4209 switch (nvmd_type) {
4210 case TWI_DEVICE: {
4211 u32 twi_addr, twi_page_size;
4212 twi_addr = 0xa8;
4213 twi_page_size = 2;
4214
4215 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
4216 twi_page_size << 8 | TWI_DEVICE);
4217 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
4218 nvmd_req.resp_addr_hi =
4219 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4220 nvmd_req.resp_addr_lo =
4221 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4222 break;
4223 }
4224 case C_SEEPROM: {
4225 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
4226 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
4227 nvmd_req.resp_addr_hi =
4228 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4229 nvmd_req.resp_addr_lo =
4230 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4231 break;
4232 }
4233 case VPD_FLASH: {
4234 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
4235 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
4236 nvmd_req.resp_addr_hi =
4237 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4238 nvmd_req.resp_addr_lo =
4239 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4240 break;
4241 }
4242 case EXPAN_ROM: {
4243 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
4244 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
4245 nvmd_req.resp_addr_hi =
4246 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4247 nvmd_req.resp_addr_lo =
4248 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4249 break;
4250 }
4251 default:
4252 break;
4253 }
4254 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
4255 return rc;
4256}
4257
4258static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4259 void *payload)
4260{
4261 u32 opc = OPC_INB_SET_NVMD_DATA;
4262 u32 nvmd_type;
4263 int rc;
4264 u32 tag;
4265 struct pm8001_ccb_info *ccb;
4266 struct inbound_queue_table *circularQ;
4267 struct set_nvm_data_req nvmd_req;
4268 struct fw_control_ex *fw_control_context;
4269 struct pm8001_ioctl_payload *ioctl_payload = payload;
4270
4271 nvmd_type = ioctl_payload->minor_function;
4272 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4273 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4274 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
4275 ioctl_payload->func_specific,
4276 ioctl_payload->length);
4277 memset(&nvmd_req, 0, sizeof(nvmd_req));
4278 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4279 if (rc)
4280 return rc;
4281 ccb = &pm8001_ha->ccb_info[tag];
4282 ccb->fw_control_context = fw_control_context;
4283 ccb->ccb_tag = tag;
4284 nvmd_req.tag = cpu_to_le32(tag);
4285 switch (nvmd_type) {
4286 case TWI_DEVICE: {
4287 u32 twi_addr, twi_page_size;
4288 twi_addr = 0xa8;
4289 twi_page_size = 2;
4290 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
4291 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
4292 twi_page_size << 8 | TWI_DEVICE);
4293 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
4294 nvmd_req.resp_addr_hi =
4295 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4296 nvmd_req.resp_addr_lo =
4297 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4298 break;
4299 }
4300 case C_SEEPROM:
4301 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
4302 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
4303 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
4304 nvmd_req.resp_addr_hi =
4305 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4306 nvmd_req.resp_addr_lo =
4307 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4308 break;
4309 case VPD_FLASH:
4310 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
4311 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
4312 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
4313 nvmd_req.resp_addr_hi =
4314 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4315 nvmd_req.resp_addr_lo =
4316 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4317 break;
4318 case EXPAN_ROM:
4319 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
4320 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
4321 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
4322 nvmd_req.resp_addr_hi =
4323 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4324 nvmd_req.resp_addr_lo =
4325 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4326 break;
4327 default:
4328 break;
4329 }
4330 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
4331 return rc;
4332}
4333
4334/**
4335 * pm8001_chip_fw_flash_update_build - support the firmware update operation
4336 * @pm8001_ha: our hba card information.
4337 * @fw_flash_updata_info: firmware flash update param
4338 */
4339static int
4340pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
4341 void *fw_flash_updata_info, u32 tag)
4342{
4343 struct fw_flash_Update_req payload;
4344 struct fw_flash_updata_info *info;
4345 struct inbound_queue_table *circularQ;
4346 int ret;
4347 u32 opc = OPC_INB_FW_FLASH_UPDATE;
4348
4349 memset(&payload, 0, sizeof(struct fw_flash_Update_req));
4350 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4351 info = fw_flash_updata_info;
4352 payload.tag = cpu_to_le32(tag);
4353 payload.cur_image_len = cpu_to_le32(info->cur_image_len);
4354 payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
4355 payload.total_image_len = cpu_to_le32(info->total_image_len);
4356 payload.len = info->sgl.im_len.len ;
4357 payload.sgl_addr_lo = lower_32_bits(info->sgl.addr);
4358 payload.sgl_addr_hi = upper_32_bits(info->sgl.addr);
4359 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
4360 return ret;
4361}
4362
4363static int
4364pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4365 void *payload)
4366{
4367 struct fw_flash_updata_info flash_update_info;
4368 struct fw_control_info *fw_control;
4369 struct fw_control_ex *fw_control_context;
4370 int rc;
4371 u32 tag;
4372 struct pm8001_ccb_info *ccb;
4373 void *buffer = NULL;
4374 dma_addr_t phys_addr;
4375 u32 phys_addr_hi;
4376 u32 phys_addr_lo;
4377 struct pm8001_ioctl_payload *ioctl_payload = payload;
4378
4379 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4380 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
4381 if (fw_control->len != 0) {
4382 if (pm8001_mem_alloc(pm8001_ha->pdev,
4383 (void **)&buffer,
4384 &phys_addr,
4385 &phys_addr_hi,
4386 &phys_addr_lo,
4387 fw_control->len, 0) != 0) {
4388 PM8001_FAIL_DBG(pm8001_ha,
4389 pm8001_printk("Mem alloc failure\n"));
4390 return -ENOMEM;
4391 }
4392 }
4393 memset(buffer, 0, fw_control->len);
4394 memcpy(buffer, fw_control->buffer, fw_control->len);
4395 flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
4396 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
4397 flash_update_info.sgl.im_len.e = 0;
4398 flash_update_info.cur_image_offset = fw_control->offset;
4399 flash_update_info.cur_image_len = fw_control->len;
4400 flash_update_info.total_image_len = fw_control->size;
4401 fw_control_context->fw_control = fw_control;
4402 fw_control_context->virtAddr = buffer;
4403 fw_control_context->len = fw_control->len;
4404 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4405 if (rc)
4406 return rc;
4407 ccb = &pm8001_ha->ccb_info[tag];
4408 ccb->fw_control_context = fw_control_context;
4409 ccb->ccb_tag = tag;
4410 rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
4411 tag);
4412 return rc;
4413}
4414
4415static int
4416pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
4417 struct pm8001_device *pm8001_dev, u32 state)
4418{
4419 struct set_dev_state_req payload;
4420 struct inbound_queue_table *circularQ;
4421 struct pm8001_ccb_info *ccb;
4422 int rc;
4423 u32 tag;
4424 u32 opc = OPC_INB_SET_DEVICE_STATE;
4425 memset(&payload, 0, sizeof(payload));
4426 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4427 if (rc)
4428 return -1;
4429 ccb = &pm8001_ha->ccb_info[tag];
4430 ccb->ccb_tag = tag;
4431 ccb->device = pm8001_dev;
4432 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4433 payload.tag = cpu_to_le32(tag);
4434 payload.device_id = cpu_to_le32(pm8001_dev->device_id);
4435 payload.nds = cpu_to_le32(state);
4436 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
4437 return rc;
4438
4439}
4440
4441static int
4442pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
4443{
4444 struct sas_re_initialization_req payload;
4445 struct inbound_queue_table *circularQ;
4446 struct pm8001_ccb_info *ccb;
4447 int rc;
4448 u32 tag;
4449 u32 opc = OPC_INB_SAS_RE_INITIALIZE;
4450 memset(&payload, 0, sizeof(payload));
4451 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4452 if (rc)
4453 return -1;
4454 ccb = &pm8001_ha->ccb_info[tag];
4455 ccb->ccb_tag = tag;
4456 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4457 payload.tag = cpu_to_le32(tag);
4458 payload.SSAHOLT = cpu_to_le32(0xd << 25);
4459 payload.sata_hol_tmo = cpu_to_le32(80);
4460 payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
4461 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
4462 return rc;
4463
4464}
4465
4466const struct pm8001_dispatch pm8001_8001_dispatch = {
4467 .name = "pmc8001",
4468 .chip_init = pm8001_chip_init,
4469 .chip_soft_rst = pm8001_chip_soft_rst,
4470 .chip_rst = pm8001_hw_chip_rst,
4471 .chip_iounmap = pm8001_chip_iounmap,
4472 .isr = pm8001_chip_isr,
4473 .is_our_interupt = pm8001_chip_is_our_interupt,
4474 .isr_process_oq = process_oq,
4475 .interrupt_enable = pm8001_chip_interrupt_enable,
4476 .interrupt_disable = pm8001_chip_interrupt_disable,
4477 .make_prd = pm8001_chip_make_sg,
4478 .smp_req = pm8001_chip_smp_req,
4479 .ssp_io_req = pm8001_chip_ssp_io_req,
4480 .sata_req = pm8001_chip_sata_req,
4481 .phy_start_req = pm8001_chip_phy_start_req,
4482 .phy_stop_req = pm8001_chip_phy_stop_req,
4483 .reg_dev_req = pm8001_chip_reg_dev_req,
4484 .dereg_dev_req = pm8001_chip_dereg_dev_req,
4485 .phy_ctl_req = pm8001_chip_phy_ctl_req,
4486 .task_abort = pm8001_chip_abort_task,
4487 .ssp_tm_req = pm8001_chip_ssp_tm_req,
4488 .get_nvmd_req = pm8001_chip_get_nvmd_req,
4489 .set_nvmd_req = pm8001_chip_set_nvmd_req,
4490 .fw_flash_update_req = pm8001_chip_fw_flash_update_req,
4491 .set_dev_state_req = pm8001_chip_set_dev_state_req,
4492 .sas_re_init_req = pm8001_chip_sas_re_initialization,
4493};
4494
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
new file mode 100644
index 000000000000..833a5201eda4
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -0,0 +1,1029 @@
1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40#ifndef _PMC8001_REG_H_
41#define _PMC8001_REG_H_
42
43#include <linux/types.h>
44#include <scsi/libsas.h>
45
46
47/* for Request Opcode of IOMB */
48#define OPC_INB_ECHO 1 /* 0x000 */
49#define OPC_INB_PHYSTART 4 /* 0x004 */
50#define OPC_INB_PHYSTOP 5 /* 0x005 */
51#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */
52#define OPC_INB_SSPINITMSTART 7 /* 0x007 */
53#define OPC_INB_SSPINIEXTIOSTART 8 /* 0x008 */
54#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */
55#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */
56#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */
57#define OPC_INB_SSPINIEDCIOSTART 12 /* 0x00C */
58#define OPC_INB_SSPINIEXTEDCIOSTART 13 /* 0x00D */
59#define OPC_INB_SSPTGTEDCIOSTART 14 /* 0x00E */
60#define OPC_INB_SSP_ABORT 15 /* 0x00F */
61#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */
62#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */
63#define OPC_INB_SMP_REQUEST 18 /* 0x012 */
64/* SMP_RESPONSE is removed */
65#define OPC_INB_SMP_RESPONSE 19 /* 0x013 */
66#define OPC_INB_SMP_ABORT 20 /* 0x014 */
67#define OPC_INB_REG_DEV 22 /* 0x016 */
68#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */
69#define OPC_INB_SATA_ABORT 24 /* 0x018 */
70#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */
71#define OPC_INB_GET_DEV_INFO 26 /* 0x01A */
72#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */
73#define OPC_INB_GPIO 34 /* 0x022 */
74#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */
75#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */
76#define OPC_INB_SAS_HW_EVENT_ACK 37 /* 0x025 */
77#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */
78#define OPC_INB_PORT_CONTROL 39 /* 0x027 */
79#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */
80#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */
81#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */
82#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */
83#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */
84#define OPC_INB_SAS_RE_INITIALIZE 45 /* 0x02D */
85
86/* for Response Opcode of IOMB */
87#define OPC_OUB_ECHO 1 /* 0x001 */
88#define OPC_OUB_HW_EVENT 4 /* 0x004 */
89#define OPC_OUB_SSP_COMP 5 /* 0x005 */
90#define OPC_OUB_SMP_COMP 6 /* 0x006 */
91#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */
92#define OPC_OUB_DEV_REGIST 10 /* 0x00A */
93#define OPC_OUB_DEREG_DEV 11 /* 0x00B */
94#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */
95#define OPC_OUB_SATA_COMP 13 /* 0x00D */
96#define OPC_OUB_SATA_EVENT 14 /* 0x00E */
97#define OPC_OUB_SSP_EVENT 15 /* 0x00F */
98#define OPC_OUB_DEV_HANDLE_ARRIV 16 /* 0x010 */
99/* SMP_RECEIVED Notification is removed */
100#define OPC_OUB_SMP_RECV_EVENT 17 /* 0x011 */
101#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */
102#define OPC_OUB_DEV_INFO 19 /* 0x013 */
103#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */
104#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */
105#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */
106#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */
107#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */
108#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */
109#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */
110#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */
111#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */
112#define OPC_OUB_SAS_HW_EVENT_ACK 31 /* 0x01F */
113#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */
114#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */
115#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */
116#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */
117#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */
118#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */
119#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */
120#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */
121#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */
122#define OPC_OUB_SAS_RE_INITIALIZE 41 /* 0x029 */
123
124/* for phy start*/
125#define SPINHOLD_DISABLE (0x00 << 14)
126#define SPINHOLD_ENABLE (0x01 << 14)
127#define LINKMODE_SAS (0x01 << 12)
128#define LINKMODE_DSATA (0x02 << 12)
129#define LINKMODE_AUTO (0x03 << 12)
130#define LINKRATE_15 (0x01 << 8)
131#define LINKRATE_30 (0x02 << 8)
132#define LINKRATE_60 (0x04 << 8)
133
134struct mpi_msg_hdr{
135 __le32 header; /* Bits [11:0] - Message operation code */
136 /* Bits [15:12] - Message Category */
137 /* Bits [21:16] - Outboundqueue ID for the
138 operation completion message */
139 /* Bits [23:22] - Reserved */
140 /* Bits [28:24] - Buffer Count, indicates how
141 many buffer are allocated for the massage */
142 /* Bits [30:29] - Reserved */
143 /* Bits [31] - Message Valid bit */
144} __attribute__((packed, aligned(4)));
145
146
147/*
148 * brief the data structure of PHY Start Command
149 * use to describe enable the phy (64 bytes)
150 */
151struct phy_start_req {
152 __le32 tag;
153 __le32 ase_sh_lm_slr_phyid;
154 struct sas_identify_frame sas_identify;
155 u32 reserved[5];
156} __attribute__((packed, aligned(4)));
157
158
159/*
160 * brief the data structure of PHY Start Command
161 * use to disable the phy (64 bytes)
162 */
163struct phy_stop_req {
164 __le32 tag;
165 __le32 phy_id;
166 u32 reserved[13];
167} __attribute__((packed, aligned(4)));
168
169
170/* set device bits fis - device to host */
171struct set_dev_bits_fis {
172 u8 fis_type; /* 0xA1*/
173 u8 n_i_pmport;
174 /* b7 : n Bit. Notification bit. If set device needs attention. */
175 /* b6 : i Bit. Interrupt Bit */
176 /* b5-b4: reserved2 */
177 /* b3-b0: PM Port */
178 u8 status;
179 u8 error;
180 u32 _r_a;
181} __attribute__ ((packed));
182/* PIO setup FIS - device to host */
183struct pio_setup_fis {
184 u8 fis_type; /* 0x5f */
185 u8 i_d_pmPort;
186 /* b7 : reserved */
187 /* b6 : i bit. Interrupt bit */
188 /* b5 : d bit. data transfer direction. set to 1 for device to host
189 xfer */
190 /* b4 : reserved */
191 /* b3-b0: PM Port */
192 u8 status;
193 u8 error;
194 u8 lbal;
195 u8 lbam;
196 u8 lbah;
197 u8 device;
198 u8 lbal_exp;
199 u8 lbam_exp;
200 u8 lbah_exp;
201 u8 _r_a;
202 u8 sector_count;
203 u8 sector_count_exp;
204 u8 _r_b;
205 u8 e_status;
206 u8 _r_c[2];
207 u8 transfer_count;
208} __attribute__ ((packed));
209
210/*
211 * brief the data structure of SATA Completion Response
212 * use to discribe the sata task response (64 bytes)
213 */
214struct sata_completion_resp {
215 __le32 tag;
216 __le32 status;
217 __le32 param;
218 u32 sata_resp[12];
219} __attribute__((packed, aligned(4)));
220
221
222/*
223 * brief the data structure of SAS HW Event Notification
224 * use to alert the host about the hardware event(64 bytes)
225 */
226struct hw_event_resp {
227 __le32 lr_evt_status_phyid_portid;
228 __le32 evt_param;
229 __le32 npip_portstate;
230 struct sas_identify_frame sas_identify;
231 struct dev_to_host_fis sata_fis;
232} __attribute__((packed, aligned(4)));
233
234
235/*
236 * brief the data structure of REGISTER DEVICE Command
237 * use to describe MPI REGISTER DEVICE Command (64 bytes)
238 */
239
240struct reg_dev_req {
241 __le32 tag;
242 __le32 phyid_portid;
243 __le32 dtype_dlr_retry;
244 __le32 firstburstsize_ITNexustimeout;
245 u8 sas_addr[SAS_ADDR_SIZE];
246 __le32 upper_device_id;
247 u32 reserved[8];
248} __attribute__((packed, aligned(4)));
249
250
251/*
252 * brief the data structure of DEREGISTER DEVICE Command
253 * use to request spc to remove all internal resources associated
254 * with the device id (64 bytes)
255 */
256
257struct dereg_dev_req {
258 __le32 tag;
259 __le32 device_id;
260 u32 reserved[13];
261} __attribute__((packed, aligned(4)));
262
263
264/*
265 * brief the data structure of DEVICE_REGISTRATION Response
266 * use to notify the completion of the device registration (64 bytes)
267 */
268
269struct dev_reg_resp {
270 __le32 tag;
271 __le32 status;
272 __le32 device_id;
273 u32 reserved[12];
274} __attribute__((packed, aligned(4)));
275
276
277/*
278 * brief the data structure of Local PHY Control Command
279 * use to issue PHY CONTROL to local phy (64 bytes)
280 */
281struct local_phy_ctl_req {
282 __le32 tag;
283 __le32 phyop_phyid;
284 u32 reserved1[13];
285} __attribute__((packed, aligned(4)));
286
287
288/**
289 * brief the data structure of Local Phy Control Response
290 * use to describe MPI Local Phy Control Response (64 bytes)
291 */
292struct local_phy_ctl_resp {
293 __le32 tag;
294 __le32 phyop_phyid;
295 __le32 status;
296 u32 reserved[12];
297} __attribute__((packed, aligned(4)));
298
299
300#define OP_BITS 0x0000FF00
301#define ID_BITS 0x0000000F
302
303/*
304 * brief the data structure of PORT Control Command
305 * use to control port properties (64 bytes)
306 */
307
308struct port_ctl_req {
309 __le32 tag;
310 __le32 portop_portid;
311 __le32 param0;
312 __le32 param1;
313 u32 reserved1[11];
314} __attribute__((packed, aligned(4)));
315
316
317/*
318 * brief the data structure of HW Event Ack Command
319 * use to acknowledge receive HW event (64 bytes)
320 */
321
322struct hw_event_ack_req {
323 __le32 tag;
324 __le32 sea_phyid_portid;
325 __le32 param0;
326 __le32 param1;
327 u32 reserved1[11];
328} __attribute__((packed, aligned(4)));
329
330
331/*
332 * brief the data structure of SSP Completion Response
333 * use to indicate a SSP Completion (n bytes)
334 */
335struct ssp_completion_resp {
336 __le32 tag;
337 __le32 status;
338 __le32 param;
339 __le32 ssptag_rescv_rescpad;
340 struct ssp_response_iu ssp_resp_iu;
341 __le32 residual_count;
342} __attribute__((packed, aligned(4)));
343
344
345#define SSP_RESCV_BIT 0x00010000
346
347/*
348 * brief the data structure of SATA EVNET esponse
349 * use to indicate a SATA Completion (64 bytes)
350 */
351
352struct sata_event_resp {
353 __le32 tag;
354 __le32 event;
355 __le32 port_id;
356 __le32 device_id;
357 u32 reserved[11];
358} __attribute__((packed, aligned(4)));
359
360/*
361 * brief the data structure of SSP EVNET esponse
362 * use to indicate a SSP Completion (64 bytes)
363 */
364
365struct ssp_event_resp {
366 __le32 tag;
367 __le32 event;
368 __le32 port_id;
369 __le32 device_id;
370 u32 reserved[11];
371} __attribute__((packed, aligned(4)));
372
373/**
374 * brief the data structure of General Event Notification Response
375 * use to describe MPI General Event Notification Response (64 bytes)
376 */
377struct general_event_resp {
378 __le32 status;
379 __le32 inb_IOMB_payload[14];
380} __attribute__((packed, aligned(4)));
381
382
383#define GENERAL_EVENT_PAYLOAD 14
384#define OPCODE_BITS 0x00000fff
385
386/*
387 * brief the data structure of SMP Request Command
388 * use to describe MPI SMP REQUEST Command (64 bytes)
389 */
390struct smp_req {
391 __le32 tag;
392 __le32 device_id;
393 __le32 len_ip_ir;
394 /* Bits [0] - Indirect response */
395 /* Bits [1] - Indirect Payload */
396 /* Bits [15:2] - Reserved */
397 /* Bits [23:16] - direct payload Len */
398 /* Bits [31:24] - Reserved */
399 u8 smp_req16[16];
400 union {
401 u8 smp_req[32];
402 struct {
403 __le64 long_req_addr;/* sg dma address, LE */
404 __le32 long_req_size;/* LE */
405 u32 _r_a;
406 __le64 long_resp_addr;/* sg dma address, LE */
407 __le32 long_resp_size;/* LE */
408 u32 _r_b;
409 } long_smp_req;/* sequencer extension */
410 };
411} __attribute__((packed, aligned(4)));
412/*
413 * brief the data structure of SMP Completion Response
414 * use to describe MPI SMP Completion Response (64 bytes)
415 */
416struct smp_completion_resp {
417 __le32 tag;
418 __le32 status;
419 __le32 param;
420 __le32 _r_a[12];
421} __attribute__((packed, aligned(4)));
422
423/*
424 *brief the data structure of SSP SMP SATA Abort Command
425 * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
426 */
427struct task_abort_req {
428 __le32 tag;
429 __le32 device_id;
430 __le32 tag_to_abort;
431 __le32 abort_all;
432 u32 reserved[11];
433} __attribute__((packed, aligned(4)));
434
435/* These flags used for SSP SMP & SATA Abort */
436#define ABORT_MASK 0x3
437#define ABORT_SINGLE 0x0
438#define ABORT_ALL 0x1
439
440/**
441 * brief the data structure of SSP SATA SMP Abort Response
442 * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
443 */
444struct task_abort_resp {
445 __le32 tag;
446 __le32 status;
447 __le32 scp;
448 u32 reserved[12];
449} __attribute__((packed, aligned(4)));
450
451
452/**
453 * brief the data structure of SAS Diagnostic Start/End Command
454 * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
455 */
456struct sas_diag_start_end_req {
457 __le32 tag;
458 __le32 operation_phyid;
459 u32 reserved[13];
460} __attribute__((packed, aligned(4)));
461
462
463/**
464 * brief the data structure of SAS Diagnostic Execute Command
465 * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
466 */
467struct sas_diag_execute_req{
468 __le32 tag;
469 __le32 cmdtype_cmddesc_phyid;
470 __le32 pat1_pat2;
471 __le32 threshold;
472 __le32 codepat_errmsk;
473 __le32 pmon;
474 __le32 pERF1CTL;
475 u32 reserved[8];
476} __attribute__((packed, aligned(4)));
477
478
479#define SAS_DIAG_PARAM_BYTES 24
480
481/*
482 * brief the data structure of Set Device State Command
483 * use to describe MPI Set Device State Command (64 bytes)
484 */
485struct set_dev_state_req {
486 __le32 tag;
487 __le32 device_id;
488 __le32 nds;
489 u32 reserved[12];
490} __attribute__((packed, aligned(4)));
491
492/*
493 * brief the data structure of sas_re_initialization
494 */
495struct sas_re_initialization_req {
496
497 __le32 tag;
498 __le32 SSAHOLT;/* bit29-set max port;
499 ** bit28-set open reject cmd retries.
500 ** bit27-set open reject data retries.
501 ** bit26-set open reject option, remap:1 or not:0.
502 ** bit25-set sata head of line time out.
503 */
504 __le32 reserved_maxPorts;
505 __le32 open_reject_cmdretries_data_retries;/* cmd retries: 31-bit16;
506 * data retries: bit15-bit0.
507 */
508 __le32 sata_hol_tmo;
509 u32 reserved1[10];
510} __attribute__((packed, aligned(4)));
511
512/*
513 * brief the data structure of SATA Start Command
514 * use to describe MPI SATA IO Start Command (64 bytes)
515 */
516
517struct sata_start_req {
518 __le32 tag;
519 __le32 device_id;
520 __le32 data_len;
521 __le32 ncqtag_atap_dir_m;
522 struct host_to_dev_fis sata_fis;
523 u32 reserved1;
524 u32 reserved2;
525 u32 addr_low;
526 u32 addr_high;
527 __le32 len;
528 __le32 esgl;
529} __attribute__((packed, aligned(4)));
530
531/**
532 * brief the data structure of SSP INI TM Start Command
533 * use to describe MPI SSP INI TM Start Command (64 bytes)
534 */
535struct ssp_ini_tm_start_req {
536 __le32 tag;
537 __le32 device_id;
538 __le32 relate_tag;
539 __le32 tmf;
540 u8 lun[8];
541 __le32 ds_ads_m;
542 u32 reserved[8];
543} __attribute__((packed, aligned(4)));
544
545
546struct ssp_info_unit {
547 u8 lun[8];/* SCSI Logical Unit Number */
548 u8 reserved1;/* reserved */
549 u8 efb_prio_attr;
550 /* B7 : enabledFirstBurst */
551 /* B6-3 : taskPriority */
552 /* B2-0 : taskAttribute */
553 u8 reserved2; /* reserved */
554 u8 additional_cdb_len;
555 /* B7-2 : additional_cdb_len */
556 /* B1-0 : reserved */
557 u8 cdb[16];/* The SCSI CDB up to 16 bytes length */
558} __attribute__((packed, aligned(4)));
559
560
561/**
562 * brief the data structure of SSP INI IO Start Command
563 * use to describe MPI SSP INI IO Start Command (64 bytes)
564 */
565struct ssp_ini_io_start_req {
566 __le32 tag;
567 __le32 device_id;
568 __le32 data_len;
569 __le32 dir_m_tlr;
570 struct ssp_info_unit ssp_iu;
571 __le32 addr_low;
572 __le32 addr_high;
573 __le32 len;
574 __le32 esgl;
575} __attribute__((packed, aligned(4)));
576
577
578/**
579 * brief the data structure of Firmware download
580 * use to describe MPI FW DOWNLOAD Command (64 bytes)
581 */
582struct fw_flash_Update_req {
583 __le32 tag;
584 __le32 cur_image_offset;
585 __le32 cur_image_len;
586 __le32 total_image_len;
587 u32 reserved0[7];
588 __le32 sgl_addr_lo;
589 __le32 sgl_addr_hi;
590 __le32 len;
591 __le32 ext_reserved;
592} __attribute__((packed, aligned(4)));
593
594
595#define FWFLASH_IOMB_RESERVED_LEN 0x07
596/**
597 * brief the data structure of FW_FLASH_UPDATE Response
598 * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
599 *
600 */
601struct fw_flash_Update_resp {
602 dma_addr_t tag;
603 __le32 status;
604 u32 reserved[13];
605} __attribute__((packed, aligned(4)));
606
607
608/**
609 * brief the data structure of Get NVM Data Command
610 * use to get data from NVM in HBA(64 bytes)
611 */
612struct get_nvm_data_req {
613 __le32 tag;
614 __le32 len_ir_vpdd;
615 __le32 vpd_offset;
616 u32 reserved[8];
617 __le32 resp_addr_lo;
618 __le32 resp_addr_hi;
619 __le32 resp_len;
620 u32 reserved1;
621} __attribute__((packed, aligned(4)));
622
623
624struct set_nvm_data_req {
625 __le32 tag;
626 __le32 len_ir_vpdd;
627 __le32 vpd_offset;
628 u32 reserved[8];
629 __le32 resp_addr_lo;
630 __le32 resp_addr_hi;
631 __le32 resp_len;
632 u32 reserved1;
633} __attribute__((packed, aligned(4)));
634
635
636#define TWI_DEVICE 0x0
637#define C_SEEPROM 0x1
638#define VPD_FLASH 0x4
639#define AAP1_RDUMP 0x5
640#define IOP_RDUMP 0x6
641#define EXPAN_ROM 0x7
642
643#define IPMode 0x80000000
644#define NVMD_TYPE 0x0000000F
645#define NVMD_STAT 0x0000FFFF
646#define NVMD_LEN 0xFF000000
647/**
648 * brief the data structure of Get NVMD Data Response
649 * use to describe MPI Get NVMD Data Response (64 bytes)
650 */
651struct get_nvm_data_resp {
652 __le32 tag;
653 __le32 ir_tda_bn_dps_das_nvm;
654 __le32 dlen_status;
655 __le32 nvm_data[12];
656} __attribute__((packed, aligned(4)));
657
658
659/**
660 * brief the data structure of SAS Diagnostic Start/End Response
661 * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
662 *
663 */
664struct sas_diag_start_end_resp {
665 __le32 tag;
666 __le32 status;
667 u32 reserved[13];
668} __attribute__((packed, aligned(4)));
669
670
671/**
672 * brief the data structure of SAS Diagnostic Execute Response
673 * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
674 *
675 */
676struct sas_diag_execute_resp {
677 __le32 tag;
678 __le32 cmdtype_cmddesc_phyid;
679 __le32 Status;
680 __le32 ReportData;
681 u32 reserved[11];
682} __attribute__((packed, aligned(4)));
683
684
685/**
686 * brief the data structure of Set Device State Response
687 * use to describe MPI Set Device State Response (64 bytes)
688 *
689 */
690struct set_dev_state_resp {
691 __le32 tag;
692 __le32 status;
693 __le32 device_id;
694 __le32 pds_nds;
695 u32 reserved[11];
696} __attribute__((packed, aligned(4)));
697
698
699#define NDS_BITS 0x0F
700#define PDS_BITS 0xF0
701
702/*
703 * HW Events type
704 */
705
706#define HW_EVENT_RESET_START 0x01
707#define HW_EVENT_CHIP_RESET_COMPLETE 0x02
708#define HW_EVENT_PHY_STOP_STATUS 0x03
709#define HW_EVENT_SAS_PHY_UP 0x04
710#define HW_EVENT_SATA_PHY_UP 0x05
711#define HW_EVENT_SATA_SPINUP_HOLD 0x06
712#define HW_EVENT_PHY_DOWN 0x07
713#define HW_EVENT_PORT_INVALID 0x08
714#define HW_EVENT_BROADCAST_CHANGE 0x09
715#define HW_EVENT_PHY_ERROR 0x0A
716#define HW_EVENT_BROADCAST_SES 0x0B
717#define HW_EVENT_INBOUND_CRC_ERROR 0x0C
718#define HW_EVENT_HARD_RESET_RECEIVED 0x0D
719#define HW_EVENT_MALFUNCTION 0x0E
720#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F
721#define HW_EVENT_BROADCAST_EXP 0x10
722#define HW_EVENT_PHY_START_STATUS 0x11
723#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12
724#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13
725#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14
726#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15
727#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16
728#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17
729#define HW_EVENT_PORT_RECOVER 0x18
730#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19
731#define HW_EVENT_PORT_RESET_COMPLETE 0x20
732#define EVENT_BROADCAST_ASYNCH_EVENT 0x21
733
734/* port state */
735#define PORT_NOT_ESTABLISHED 0x00
736#define PORT_VALID 0x01
737#define PORT_LOSTCOMM 0x02
738#define PORT_IN_RESET 0x04
739#define PORT_INVALID 0x08
740
741/*
742 * SSP/SMP/SATA IO Completion Status values
743 */
744
745#define IO_SUCCESS 0x00
746#define IO_ABORTED 0x01
747#define IO_OVERFLOW 0x02
748#define IO_UNDERFLOW 0x03
749#define IO_FAILED 0x04
750#define IO_ABORT_RESET 0x05
751#define IO_NOT_VALID 0x06
752#define IO_NO_DEVICE 0x07
753#define IO_ILLEGAL_PARAMETER 0x08
754#define IO_LINK_FAILURE 0x09
755#define IO_PROG_ERROR 0x0A
756#define IO_EDC_IN_ERROR 0x0B
757#define IO_EDC_OUT_ERROR 0x0C
758#define IO_ERROR_HW_TIMEOUT 0x0D
759#define IO_XFER_ERROR_BREAK 0x0E
760#define IO_XFER_ERROR_PHY_NOT_READY 0x0F
761#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10
762#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11
763#define IO_OPEN_CNX_ERROR_BREAK 0x12
764#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13
765#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14
766#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15
767#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16
768#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17
769#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18
770#define IO_XFER_ERROR_NAK_RECEIVED 0x19
771#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A
772#define IO_XFER_ERROR_PEER_ABORTED 0x1B
773#define IO_XFER_ERROR_RX_FRAME 0x1C
774#define IO_XFER_ERROR_DMA 0x1D
775#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E
776#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F
777#define IO_XFER_ERROR_SATA 0x20
778#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22
779#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21
780#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23
781#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24
782#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25
783#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26
784#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27
785#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28
786
787#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30
788#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31
789#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32
790
791#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34
792#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35
793#define IO_XFER_CMD_FRAME_ISSUED 0x36
794#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37
795#define IO_PORT_IN_RESET 0x38
796#define IO_DS_NON_OPERATIONAL 0x39
797#define IO_DS_IN_RECOVERY 0x3A
798#define IO_TM_TAG_NOT_FOUND 0x3B
799#define IO_XFER_PIO_SETUP_ERROR 0x3C
800#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D
801#define IO_DS_IN_ERROR 0x3E
802#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F
803#define IO_ABORT_IN_PROGRESS 0x40
804#define IO_ABORT_DELAYED 0x41
805#define IO_INVALID_LENGTH 0x42
806
807/* WARNING: This error code must always be the last number.
808 * If you add error code, modify this code also
809 * It is used as an index
810 */
811#define IO_ERROR_UNKNOWN_GENERIC 0x43
812
813/* MSGU CONFIGURATION TABLE*/
814
815#define SPC_MSGU_CFG_TABLE_UPDATE 0x01/* Inbound doorbell bit0 */
816#define SPC_MSGU_CFG_TABLE_RESET 0x02/* Inbound doorbell bit1 */
817#define SPC_MSGU_CFG_TABLE_FREEZE 0x04/* Inbound doorbell bit2 */
818#define SPC_MSGU_CFG_TABLE_UNFREEZE 0x08/* Inbound doorbell bit4 */
819#define MSGU_IBDB_SET 0x04
820#define MSGU_HOST_INT_STATUS 0x08
821#define MSGU_HOST_INT_MASK 0x0C
822#define MSGU_IOPIB_INT_STATUS 0x18
823#define MSGU_IOPIB_INT_MASK 0x1C
824#define MSGU_IBDB_CLEAR 0x20/* RevB - Host not use */
825#define MSGU_MSGU_CONTROL 0x24
826#define MSGU_ODR 0x3C/* RevB */
827#define MSGU_ODCR 0x40/* RevB */
828#define MSGU_SCRATCH_PAD_0 0x44
829#define MSGU_SCRATCH_PAD_1 0x48
830#define MSGU_SCRATCH_PAD_2 0x4C
831#define MSGU_SCRATCH_PAD_3 0x50
832#define MSGU_HOST_SCRATCH_PAD_0 0x54
833#define MSGU_HOST_SCRATCH_PAD_1 0x58
834#define MSGU_HOST_SCRATCH_PAD_2 0x5C
835#define MSGU_HOST_SCRATCH_PAD_3 0x60
836#define MSGU_HOST_SCRATCH_PAD_4 0x64
837#define MSGU_HOST_SCRATCH_PAD_5 0x68
838#define MSGU_HOST_SCRATCH_PAD_6 0x6C
839#define MSGU_HOST_SCRATCH_PAD_7 0x70
840#define MSGU_ODMR 0x74/* RevB */
841
842/* bit definition for ODMR register */
843#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all
844 interrupt vector */
845#define ODMR_CLEAR_ALL 0/* clear all
846 interrupt vector */
847/* bit definition for ODCR register */
848#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all
849 interrupt vector*/
850/* MSIX Interupts */
851#define MSIX_TABLE_OFFSET 0x2000
852#define MSIX_TABLE_ELEMENT_SIZE 0x10
853#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC
854#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + MSIX_INTERRUPT_CONTROL_OFFSET)
855#define MSIX_INTERRUPT_DISABLE 0x1
856#define MSIX_INTERRUPT_ENABLE 0x0
857
858
859/* state definition for Scratch Pad1 register */
860#define SCRATCH_PAD1_POR 0x00 /* power on reset state */
861#define SCRATCH_PAD1_SFR 0x01 /* soft reset state */
862#define SCRATCH_PAD1_ERR 0x02 /* error state */
863#define SCRATCH_PAD1_RDY 0x03 /* ready state */
864#define SCRATCH_PAD1_RST 0x04 /* soft reset toggle flag */
865#define SCRATCH_PAD1_AAP1RDY_RST 0x08 /* AAP1 ready for soft reset */
866#define SCRATCH_PAD1_STATE_MASK 0xFFFFFFF0 /* ScratchPad1
867 Mask, bit1-0 State, bit2 Soft Reset, bit3 FW RDY for Soft Reset */
868#define SCRATCH_PAD1_RESERVED 0x000003F8 /* Scratch Pad1
869 Reserved bit 3 to 9 */
870
871 /* state definition for Scratch Pad2 register */
872#define SCRATCH_PAD2_POR 0x00 /* power on state */
873#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */
874#define SCRATCH_PAD2_ERR 0x02 /* error state */
875#define SCRATCH_PAD2_RDY 0x03 /* ready state */
876#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW ready for soft reset flag*/
877#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */
878#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2
879 Mask, bit1-0 State */
880#define SCRATCH_PAD2_RESERVED 0x000003FC /* Scratch Pad1
881 Reserved bit 2 to 9 */
882
883#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */
884#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */
885
886/* main configuration offset - byte offset */
887#define MAIN_SIGNATURE_OFFSET 0x00/* DWORD 0x00 */
888#define MAIN_INTERFACE_REVISION 0x04/* DWORD 0x01 */
889#define MAIN_FW_REVISION 0x08/* DWORD 0x02 */
890#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C/* DWORD 0x03 */
891#define MAIN_MAX_SGL_OFFSET 0x10/* DWORD 0x04 */
892#define MAIN_CNTRL_CAP_OFFSET 0x14/* DWORD 0x05 */
893#define MAIN_GST_OFFSET 0x18/* DWORD 0x06 */
894#define MAIN_IBQ_OFFSET 0x1C/* DWORD 0x07 */
895#define MAIN_OBQ_OFFSET 0x20/* DWORD 0x08 */
896#define MAIN_IQNPPD_HPPD_OFFSET 0x24/* DWORD 0x09 */
897#define MAIN_OB_HW_EVENT_PID03_OFFSET 0x28/* DWORD 0x0A */
898#define MAIN_OB_HW_EVENT_PID47_OFFSET 0x2C/* DWORD 0x0B */
899#define MAIN_OB_NCQ_EVENT_PID03_OFFSET 0x30/* DWORD 0x0C */
900#define MAIN_OB_NCQ_EVENT_PID47_OFFSET 0x34/* DWORD 0x0D */
901#define MAIN_TITNX_EVENT_PID03_OFFSET 0x38/* DWORD 0x0E */
902#define MAIN_TITNX_EVENT_PID47_OFFSET 0x3C/* DWORD 0x0F */
903#define MAIN_OB_SSP_EVENT_PID03_OFFSET 0x40/* DWORD 0x10 */
904#define MAIN_OB_SSP_EVENT_PID47_OFFSET 0x44/* DWORD 0x11 */
905#define MAIN_OB_SMP_EVENT_PID03_OFFSET 0x48/* DWORD 0x12 */
906#define MAIN_OB_SMP_EVENT_PID47_OFFSET 0x4C/* DWORD 0x13 */
907#define MAIN_EVENT_LOG_ADDR_HI 0x50/* DWORD 0x14 */
908#define MAIN_EVENT_LOG_ADDR_LO 0x54/* DWORD 0x15 */
909#define MAIN_EVENT_LOG_BUFF_SIZE 0x58/* DWORD 0x16 */
910#define MAIN_EVENT_LOG_OPTION 0x5C/* DWORD 0x17 */
911#define MAIN_IOP_EVENT_LOG_ADDR_HI 0x60/* DWORD 0x18 */
912#define MAIN_IOP_EVENT_LOG_ADDR_LO 0x64/* DWORD 0x19 */
913#define MAIN_IOP_EVENT_LOG_BUFF_SIZE 0x68/* DWORD 0x1A */
914#define MAIN_IOP_EVENT_LOG_OPTION 0x6C/* DWORD 0x1B */
915#define MAIN_FATAL_ERROR_INTERRUPT 0x70/* DWORD 0x1C */
916#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74/* DWORD 0x1D */
917#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78/* DWORD 0x1E */
918#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C/* DWORD 0x1F */
919#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80/* DWORD 0x20 */
920#define MAIN_HDA_FLAGS_OFFSET 0x84/* DWORD 0x21 */
921#define MAIN_ANALOG_SETUP_OFFSET 0x88/* DWORD 0x22 */
922
923/* Gereral Status Table offset - byte offset */
924#define GST_GSTLEN_MPIS_OFFSET 0x00
925#define GST_IQ_FREEZE_STATE0_OFFSET 0x04
926#define GST_IQ_FREEZE_STATE1_OFFSET 0x08
927#define GST_MSGUTCNT_OFFSET 0x0C
928#define GST_IOPTCNT_OFFSET 0x10
929#define GST_PHYSTATE_OFFSET 0x18
930#define GST_PHYSTATE0_OFFSET 0x18
931#define GST_PHYSTATE1_OFFSET 0x1C
932#define GST_PHYSTATE2_OFFSET 0x20
933#define GST_PHYSTATE3_OFFSET 0x24
934#define GST_PHYSTATE4_OFFSET 0x28
935#define GST_PHYSTATE5_OFFSET 0x2C
936#define GST_PHYSTATE6_OFFSET 0x30
937#define GST_PHYSTATE7_OFFSET 0x34
938#define GST_RERRINFO_OFFSET 0x44
939
940/* General Status Table - MPI state */
941#define GST_MPI_STATE_UNINIT 0x00
942#define GST_MPI_STATE_INIT 0x01
943#define GST_MPI_STATE_TERMINATION 0x02
944#define GST_MPI_STATE_ERROR 0x03
945#define GST_MPI_STATE_MASK 0x07
946
947#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418
948#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418
949/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
950#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040
951#define PCIE_EVENT_INTERRUPT 0x003044
952#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048
953#define PCIE_ERROR_INTERRUPT 0x00304C
954/* signature defintion for host scratch pad0 register */
955#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd
956/* Signature for Soft Reset */
957
958/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
959#define SPC_REG_RESET 0x000000/* reset register */
960
961/* bit difination for SPC_RESET register */
962#define SPC_REG_RESET_OSSP 0x00000001
963#define SPC_REG_RESET_RAAE 0x00000002
964#define SPC_REG_RESET_PCS_SPBC 0x00000004
965#define SPC_REG_RESET_PCS_IOP_SS 0x00000008
966#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010
967#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020
968#define SPC_REG_RESET_PCS_LM 0x00000040
969#define SPC_REG_RESET_PCS 0x00000080
970#define SPC_REG_RESET_GSM 0x00000100
971#define SPC_REG_RESET_DDR2 0x00010000
972#define SPC_REG_RESET_BDMA_CORE 0x00020000
973#define SPC_REG_RESET_BDMA_SXCBI 0x00040000
974#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000
975#define SPC_REG_RESET_PCIE_PWR 0x00100000
976#define SPC_REG_RESET_PCIE_SFT 0x00200000
977#define SPC_REG_RESET_PCS_SXCBI 0x00400000
978#define SPC_REG_RESET_LMS_SXCBI 0x00800000
979#define SPC_REG_RESET_PMIC_SXCBI 0x01000000
980#define SPC_REG_RESET_PMIC_CORE 0x02000000
981#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000
982#define SPC_REG_RESET_DEVICE 0x80000000
983
984/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
985#define SPC_IBW_AXI_TRANSLATION_LOW 0x003258
986
987#define MBIC_AAP1_ADDR_BASE 0x060000
988#define MBIC_IOP_ADDR_BASE 0x070000
989#define GSM_ADDR_BASE 0x0700000
990/* Dynamic map through Bar4 - 0x00700000 */
991#define GSM_CONFIG_RESET 0x00000000
992#define RAM_ECC_DB_ERR 0x00000018
993#define GSM_READ_ADDR_PARITY_INDIC 0x00000058
994#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060
995#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068
996#define GSM_READ_ADDR_PARITY_CHECK 0x00000038
997#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040
998#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048
999
1000#define RB6_ACCESS_REG 0x6A0000
1001#define HDAC_EXEC_CMD 0x0002
1002#define HDA_C_PA 0xcb
1003#define HDA_SEQ_ID_BITS 0x00ff0000
1004#define HDA_GSM_OFFSET_BITS 0x00FFFFFF
1005#define MBIC_AAP1_ADDR_BASE 0x060000
1006#define MBIC_IOP_ADDR_BASE 0x070000
1007#define GSM_ADDR_BASE 0x0700000
1008#define SPC_TOP_LEVEL_ADDR_BASE 0x000000
1009#define GSM_CONFIG_RESET_VALUE 0x00003b00
1010#define GPIO_ADDR_BASE 0x00090000
1011#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c
1012
1013/* RB6 offset */
1014#define SPC_RB6_OFFSET 0x80C0
1015/* Magic number of soft reset for RB6 */
1016#define RB6_MAGIC_NUMBER_RST 0x1234
1017
1018/* Device Register status */
1019#define DEVREG_SUCCESS 0x00
1020#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01
1021#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02
1022#define DEVREG_FAILURE_INVALID_PHY_ID 0x03
1023#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04
1024#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05
1025#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
1026#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
1027
1028#endif
1029
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
new file mode 100644
index 000000000000..f8c86b28f03f
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -0,0 +1,901 @@
1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#include <linux/slab.h>
42#include "pm8001_sas.h"
43#include "pm8001_chips.h"
44
45static struct scsi_transport_template *pm8001_stt;
46
47static const struct pm8001_chip_info pm8001_chips[] = {
48 [chip_8001] = { 8, &pm8001_8001_dispatch,},
49};
50static int pm8001_id;
51
52LIST_HEAD(hba_list);
53
54/**
55 * The main structure which LLDD must register for scsi core.
56 */
57static struct scsi_host_template pm8001_sht = {
58 .module = THIS_MODULE,
59 .name = DRV_NAME,
60 .queuecommand = sas_queuecommand,
61 .target_alloc = sas_target_alloc,
62 .slave_configure = pm8001_slave_configure,
63 .slave_destroy = sas_slave_destroy,
64 .scan_finished = pm8001_scan_finished,
65 .scan_start = pm8001_scan_start,
66 .change_queue_depth = sas_change_queue_depth,
67 .change_queue_type = sas_change_queue_type,
68 .bios_param = sas_bios_param,
69 .can_queue = 1,
70 .cmd_per_lun = 1,
71 .this_id = -1,
72 .sg_tablesize = SG_ALL,
73 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
74 .use_clustering = ENABLE_CLUSTERING,
75 .eh_device_reset_handler = sas_eh_device_reset_handler,
76 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
77 .slave_alloc = pm8001_slave_alloc,
78 .target_destroy = sas_target_destroy,
79 .ioctl = sas_ioctl,
80 .shost_attrs = pm8001_host_attrs,
81};
82
83/**
84 * Sas layer call this function to execute specific task.
85 */
86static struct sas_domain_function_template pm8001_transport_ops = {
87 .lldd_dev_found = pm8001_dev_found,
88 .lldd_dev_gone = pm8001_dev_gone,
89
90 .lldd_execute_task = pm8001_queue_command,
91 .lldd_control_phy = pm8001_phy_control,
92
93 .lldd_abort_task = pm8001_abort_task,
94 .lldd_abort_task_set = pm8001_abort_task_set,
95 .lldd_clear_aca = pm8001_clear_aca,
96 .lldd_clear_task_set = pm8001_clear_task_set,
97 .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
98 .lldd_lu_reset = pm8001_lu_reset,
99 .lldd_query_task = pm8001_query_task,
100};
101
102/**
103 *pm8001_phy_init - initiate our adapter phys
104 *@pm8001_ha: our hba structure.
105 *@phy_id: phy id.
106 */
107static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
108 int phy_id)
109{
110 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
111 struct asd_sas_phy *sas_phy = &phy->sas_phy;
112 phy->phy_state = 0;
113 phy->pm8001_ha = pm8001_ha;
114 sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
115 sas_phy->class = SAS;
116 sas_phy->iproto = SAS_PROTOCOL_ALL;
117 sas_phy->tproto = 0;
118 sas_phy->type = PHY_TYPE_PHYSICAL;
119 sas_phy->role = PHY_ROLE_INITIATOR;
120 sas_phy->oob_mode = OOB_NOT_CONNECTED;
121 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
122 sas_phy->id = phy_id;
123 sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
124 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
125 sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
126 sas_phy->lldd_phy = phy;
127}
128
129/**
130 *pm8001_free - free hba
131 *@pm8001_ha: our hba structure.
132 *
133 */
134static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
135{
136 int i;
137 struct pm8001_wq *wq;
138
139 if (!pm8001_ha)
140 return;
141
142 for (i = 0; i < USI_MAX_MEMCNT; i++) {
143 if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
144 pci_free_consistent(pm8001_ha->pdev,
145 pm8001_ha->memoryMap.region[i].element_size,
146 pm8001_ha->memoryMap.region[i].virt_ptr,
147 pm8001_ha->memoryMap.region[i].phys_addr);
148 }
149 }
150 PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
151 if (pm8001_ha->shost)
152 scsi_host_put(pm8001_ha->shost);
153 list_for_each_entry(wq, &pm8001_ha->wq_list, entry)
154 cancel_delayed_work(&wq->work_q);
155 kfree(pm8001_ha->tags);
156 kfree(pm8001_ha);
157}
158
159#ifdef PM8001_USE_TASKLET
160static void pm8001_tasklet(unsigned long opaque)
161{
162 struct pm8001_hba_info *pm8001_ha;
163 pm8001_ha = (struct pm8001_hba_info *)opaque;;
164 if (unlikely(!pm8001_ha))
165 BUG_ON(1);
166 PM8001_CHIP_DISP->isr(pm8001_ha);
167}
168#endif
169
170
171 /**
172 * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
173 * dispatcher to handle each case.
174 * @irq: irq number.
175 * @opaque: the passed general host adapter struct
176 */
177static irqreturn_t pm8001_interrupt(int irq, void *opaque)
178{
179 struct pm8001_hba_info *pm8001_ha;
180 irqreturn_t ret = IRQ_HANDLED;
181 struct sas_ha_struct *sha = opaque;
182 pm8001_ha = sha->lldd_ha;
183 if (unlikely(!pm8001_ha))
184 return IRQ_NONE;
185 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
186 return IRQ_NONE;
187#ifdef PM8001_USE_TASKLET
188 tasklet_schedule(&pm8001_ha->tasklet);
189#else
190 ret = PM8001_CHIP_DISP->isr(pm8001_ha);
191#endif
192 return ret;
193}
194
195/**
196 * pm8001_alloc - initiate our hba structure and 6 DMAs area.
197 * @pm8001_ha:our hba structure.
198 *
199 */
200static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
201{
202 int i;
203 spin_lock_init(&pm8001_ha->lock);
204 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
205 pm8001_phy_init(pm8001_ha, i);
206 pm8001_ha->port[i].wide_port_phymap = 0;
207 pm8001_ha->port[i].port_attached = 0;
208 pm8001_ha->port[i].port_state = 0;
209 INIT_LIST_HEAD(&pm8001_ha->port[i].list);
210 }
211
212 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
213 if (!pm8001_ha->tags)
214 goto err_out;
215 /* MPI Memory region 1 for AAP Event Log for fw */
216 pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
217 pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
218 pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
219 pm8001_ha->memoryMap.region[AAP1].alignment = 32;
220
221 /* MPI Memory region 2 for IOP Event Log for fw */
222 pm8001_ha->memoryMap.region[IOP].num_elements = 1;
223 pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
224 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
225 pm8001_ha->memoryMap.region[IOP].alignment = 32;
226
227 /* MPI Memory region 3 for consumer Index of inbound queues */
228 pm8001_ha->memoryMap.region[CI].num_elements = 1;
229 pm8001_ha->memoryMap.region[CI].element_size = 4;
230 pm8001_ha->memoryMap.region[CI].total_len = 4;
231 pm8001_ha->memoryMap.region[CI].alignment = 4;
232
233 /* MPI Memory region 4 for producer Index of outbound queues */
234 pm8001_ha->memoryMap.region[PI].num_elements = 1;
235 pm8001_ha->memoryMap.region[PI].element_size = 4;
236 pm8001_ha->memoryMap.region[PI].total_len = 4;
237 pm8001_ha->memoryMap.region[PI].alignment = 4;
238
239 /* MPI Memory region 5 inbound queues */
240 pm8001_ha->memoryMap.region[IB].num_elements = 256;
241 pm8001_ha->memoryMap.region[IB].element_size = 64;
242 pm8001_ha->memoryMap.region[IB].total_len = 256 * 64;
243 pm8001_ha->memoryMap.region[IB].alignment = 64;
244
245 /* MPI Memory region 6 inbound queues */
246 pm8001_ha->memoryMap.region[OB].num_elements = 256;
247 pm8001_ha->memoryMap.region[OB].element_size = 64;
248 pm8001_ha->memoryMap.region[OB].total_len = 256 * 64;
249 pm8001_ha->memoryMap.region[OB].alignment = 64;
250
251 /* Memory region write DMA*/
252 pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
253 pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
254 pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
255 /* Memory region for devices*/
256 pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
257 pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
258 sizeof(struct pm8001_device);
259 pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
260 sizeof(struct pm8001_device);
261
262 /* Memory region for ccb_info*/
263 pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
264 pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
265 sizeof(struct pm8001_ccb_info);
266 pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
267 sizeof(struct pm8001_ccb_info);
268
269 for (i = 0; i < USI_MAX_MEMCNT; i++) {
270 if (pm8001_mem_alloc(pm8001_ha->pdev,
271 &pm8001_ha->memoryMap.region[i].virt_ptr,
272 &pm8001_ha->memoryMap.region[i].phys_addr,
273 &pm8001_ha->memoryMap.region[i].phys_addr_hi,
274 &pm8001_ha->memoryMap.region[i].phys_addr_lo,
275 pm8001_ha->memoryMap.region[i].total_len,
276 pm8001_ha->memoryMap.region[i].alignment) != 0) {
277 PM8001_FAIL_DBG(pm8001_ha,
278 pm8001_printk("Mem%d alloc failed\n",
279 i));
280 goto err_out;
281 }
282 }
283
284 pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
285 for (i = 0; i < PM8001_MAX_DEVICES; i++) {
286 pm8001_ha->devices[i].dev_type = NO_DEVICE;
287 pm8001_ha->devices[i].id = i;
288 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
289 pm8001_ha->devices[i].running_req = 0;
290 }
291 pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
292 for (i = 0; i < PM8001_MAX_CCB; i++) {
293 pm8001_ha->ccb_info[i].ccb_dma_handle =
294 pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
295 i * sizeof(struct pm8001_ccb_info);
296 pm8001_ha->ccb_info[i].task = NULL;
297 pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
298 pm8001_ha->ccb_info[i].device = NULL;
299 ++pm8001_ha->tags_num;
300 }
301 pm8001_ha->flags = PM8001F_INIT_TIME;
302 /* Initialize tags */
303 pm8001_tag_init(pm8001_ha);
304 return 0;
305err_out:
306 return 1;
307}
308
309/**
310 * pm8001_ioremap - remap the pci high physical address to kernal virtual
311 * address so that we can access them.
312 * @pm8001_ha:our hba structure.
313 */
314static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
315{
316 u32 bar;
317 u32 logicalBar = 0;
318 struct pci_dev *pdev;
319
320 pdev = pm8001_ha->pdev;
321 /* map pci mem (PMC pci base 0-3)*/
322 for (bar = 0; bar < 6; bar++) {
323 /*
324 ** logical BARs for SPC:
325 ** bar 0 and 1 - logical BAR0
326 ** bar 2 and 3 - logical BAR1
327 ** bar4 - logical BAR2
328 ** bar5 - logical BAR3
329 ** Skip the appropriate assignments:
330 */
331 if ((bar == 1) || (bar == 3))
332 continue;
333 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
334 pm8001_ha->io_mem[logicalBar].membase =
335 pci_resource_start(pdev, bar);
336 pm8001_ha->io_mem[logicalBar].membase &=
337 (u32)PCI_BASE_ADDRESS_MEM_MASK;
338 pm8001_ha->io_mem[logicalBar].memsize =
339 pci_resource_len(pdev, bar);
340 pm8001_ha->io_mem[logicalBar].memvirtaddr =
341 ioremap(pm8001_ha->io_mem[logicalBar].membase,
342 pm8001_ha->io_mem[logicalBar].memsize);
343 PM8001_INIT_DBG(pm8001_ha,
344 pm8001_printk("PCI: bar %d, logicalBar %d "
345 "virt_addr=%lx,len=%d\n", bar, logicalBar,
346 (unsigned long)
347 pm8001_ha->io_mem[logicalBar].memvirtaddr,
348 pm8001_ha->io_mem[logicalBar].memsize));
349 } else {
350 pm8001_ha->io_mem[logicalBar].membase = 0;
351 pm8001_ha->io_mem[logicalBar].memsize = 0;
352 pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
353 }
354 logicalBar++;
355 }
356 return 0;
357}
358
359/**
360 * pm8001_pci_alloc - initialize our ha card structure
361 * @pdev: pci device.
362 * @ent: ent
363 * @shost: scsi host struct which has been initialized before.
364 */
365static struct pm8001_hba_info *__devinit
366pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
367{
368 struct pm8001_hba_info *pm8001_ha;
369 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
370
371
372 pm8001_ha = sha->lldd_ha;
373 if (!pm8001_ha)
374 return NULL;
375
376 pm8001_ha->pdev = pdev;
377 pm8001_ha->dev = &pdev->dev;
378 pm8001_ha->chip_id = chip_id;
379 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
380 pm8001_ha->irq = pdev->irq;
381 pm8001_ha->sas = sha;
382 pm8001_ha->shost = shost;
383 pm8001_ha->id = pm8001_id++;
384 INIT_LIST_HEAD(&pm8001_ha->wq_list);
385 pm8001_ha->logging_level = 0x01;
386 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
387#ifdef PM8001_USE_TASKLET
388 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
389 (unsigned long)pm8001_ha);
390#endif
391 pm8001_ioremap(pm8001_ha);
392 if (!pm8001_alloc(pm8001_ha))
393 return pm8001_ha;
394 pm8001_free(pm8001_ha);
395 return NULL;
396}
397
398/**
399 * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
400 * @pdev: pci device.
401 */
402static int pci_go_44(struct pci_dev *pdev)
403{
404 int rc;
405
406 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
407 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
408 if (rc) {
409 rc = pci_set_consistent_dma_mask(pdev,
410 DMA_BIT_MASK(32));
411 if (rc) {
412 dev_printk(KERN_ERR, &pdev->dev,
413 "44-bit DMA enable failed\n");
414 return rc;
415 }
416 }
417 } else {
418 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
419 if (rc) {
420 dev_printk(KERN_ERR, &pdev->dev,
421 "32-bit DMA enable failed\n");
422 return rc;
423 }
424 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
425 if (rc) {
426 dev_printk(KERN_ERR, &pdev->dev,
427 "32-bit consistent DMA enable failed\n");
428 return rc;
429 }
430 }
431 return rc;
432}
433
434/**
435 * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
436 * @shost: scsi host which has been allocated outside.
437 * @chip_info: our ha struct.
438 */
439static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost,
440 const struct pm8001_chip_info *chip_info)
441{
442 int phy_nr, port_nr;
443 struct asd_sas_phy **arr_phy;
444 struct asd_sas_port **arr_port;
445 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
446
447 phy_nr = chip_info->n_phy;
448 port_nr = phy_nr;
449 memset(sha, 0x00, sizeof(*sha));
450 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
451 if (!arr_phy)
452 goto exit;
453 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
454 if (!arr_port)
455 goto exit_free2;
456
457 sha->sas_phy = arr_phy;
458 sha->sas_port = arr_port;
459 sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
460 if (!sha->lldd_ha)
461 goto exit_free1;
462
463 shost->transportt = pm8001_stt;
464 shost->max_id = PM8001_MAX_DEVICES;
465 shost->max_lun = 8;
466 shost->max_channel = 0;
467 shost->unique_id = pm8001_id;
468 shost->max_cmd_len = 16;
469 shost->can_queue = PM8001_CAN_QUEUE;
470 shost->cmd_per_lun = 32;
471 return 0;
472exit_free1:
473 kfree(arr_port);
474exit_free2:
475 kfree(arr_phy);
476exit:
477 return -1;
478}
479
480/**
481 * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
482 * @shost: scsi host which has been allocated outside
483 * @chip_info: our ha struct.
484 */
485static void __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost,
486 const struct pm8001_chip_info *chip_info)
487{
488 int i = 0;
489 struct pm8001_hba_info *pm8001_ha;
490 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
491
492 pm8001_ha = sha->lldd_ha;
493 for (i = 0; i < chip_info->n_phy; i++) {
494 sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
495 sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
496 }
497 sha->sas_ha_name = DRV_NAME;
498 sha->dev = pm8001_ha->dev;
499
500 sha->lldd_module = THIS_MODULE;
501 sha->sas_addr = &pm8001_ha->sas_addr[0];
502 sha->num_phys = chip_info->n_phy;
503 sha->lldd_max_execute_num = 1;
504 sha->lldd_queue_size = PM8001_CAN_QUEUE;
505 sha->core.shost = shost;
506}
507
508/**
509 * pm8001_init_sas_add - initialize sas address
510 * @chip_info: our ha struct.
511 *
512 * Currently we just set the fixed SAS address to our HBA,for manufacture,
513 * it should read from the EEPROM
514 */
515static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
516{
517 u8 i;
518#ifdef PM8001_READ_VPD
519 DECLARE_COMPLETION_ONSTACK(completion);
520 struct pm8001_ioctl_payload payload;
521 pm8001_ha->nvmd_completion = &completion;
522 payload.minor_function = 0;
523 payload.length = 128;
524 payload.func_specific = kzalloc(128, GFP_KERNEL);
525 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
526 wait_for_completion(&completion);
527 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
528 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
529 SAS_ADDR_SIZE);
530 PM8001_INIT_DBG(pm8001_ha,
531 pm8001_printk("phy %d sas_addr = %016llx \n", i,
532 pm8001_ha->phy[i].dev_sas_addr));
533 }
534#else
535 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
536 pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
537 pm8001_ha->phy[i].dev_sas_addr =
538 cpu_to_be64((u64)
539 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
540 }
541 memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
542 SAS_ADDR_SIZE);
543#endif
544}
545
546#ifdef PM8001_USE_MSIX
547/**
548 * pm8001_setup_msix - enable MSI-X interrupt
549 * @chip_info: our ha struct.
550 * @irq_handler: irq_handler
551 */
552static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
553 irq_handler_t irq_handler)
554{
555 u32 i = 0, j = 0;
556 u32 number_of_intr = 1;
557 int flag = 0;
558 u32 max_entry;
559 int rc;
560 max_entry = sizeof(pm8001_ha->msix_entries) /
561 sizeof(pm8001_ha->msix_entries[0]);
562 flag |= IRQF_DISABLED;
563 for (i = 0; i < max_entry ; i++)
564 pm8001_ha->msix_entries[i].entry = i;
565 rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
566 number_of_intr);
567 pm8001_ha->number_of_intr = number_of_intr;
568 if (!rc) {
569 for (i = 0; i < number_of_intr; i++) {
570 if (request_irq(pm8001_ha->msix_entries[i].vector,
571 irq_handler, flag, DRV_NAME,
572 SHOST_TO_SAS_HA(pm8001_ha->shost))) {
573 for (j = 0; j < i; j++)
574 free_irq(
575 pm8001_ha->msix_entries[j].vector,
576 SHOST_TO_SAS_HA(pm8001_ha->shost));
577 pci_disable_msix(pm8001_ha->pdev);
578 break;
579 }
580 }
581 }
582 return rc;
583}
584#endif
585
586/**
587 * pm8001_request_irq - register interrupt
588 * @chip_info: our ha struct.
589 */
590static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
591{
592 struct pci_dev *pdev;
593 irq_handler_t irq_handler = pm8001_interrupt;
594 int rc;
595
596 pdev = pm8001_ha->pdev;
597
598#ifdef PM8001_USE_MSIX
599 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
600 return pm8001_setup_msix(pm8001_ha, irq_handler);
601 else
602 goto intx;
603#endif
604
605intx:
606 /* intialize the INT-X interrupt */
607 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
608 SHOST_TO_SAS_HA(pm8001_ha->shost));
609 return rc;
610}
611
612/**
613 * pm8001_pci_probe - probe supported device
614 * @pdev: pci device which kernel has been prepared for.
615 * @ent: pci device id
616 *
617 * This function is the main initialization function, when register a new
618 * pci driver it is invoked, all struct an hardware initilization should be done
619 * here, also, register interrupt
620 */
621static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
622 const struct pci_device_id *ent)
623{
624 unsigned int rc;
625 u32 pci_reg;
626 struct pm8001_hba_info *pm8001_ha;
627 struct Scsi_Host *shost = NULL;
628 const struct pm8001_chip_info *chip;
629
630 dev_printk(KERN_INFO, &pdev->dev,
631 "pm8001: driver version %s\n", DRV_VERSION);
632 rc = pci_enable_device(pdev);
633 if (rc)
634 goto err_out_enable;
635 pci_set_master(pdev);
636 /*
637 * Enable pci slot busmaster by setting pci command register.
638 * This is required by FW for Cyclone card.
639 */
640
641 pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
642 pci_reg |= 0x157;
643 pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
644 rc = pci_request_regions(pdev, DRV_NAME);
645 if (rc)
646 goto err_out_disable;
647 rc = pci_go_44(pdev);
648 if (rc)
649 goto err_out_regions;
650
651 shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
652 if (!shost) {
653 rc = -ENOMEM;
654 goto err_out_regions;
655 }
656 chip = &pm8001_chips[ent->driver_data];
657 SHOST_TO_SAS_HA(shost) =
658 kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
659 if (!SHOST_TO_SAS_HA(shost)) {
660 rc = -ENOMEM;
661 goto err_out_free_host;
662 }
663
664 rc = pm8001_prep_sas_ha_init(shost, chip);
665 if (rc) {
666 rc = -ENOMEM;
667 goto err_out_free;
668 }
669 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
670 pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
671 if (!pm8001_ha) {
672 rc = -ENOMEM;
673 goto err_out_free;
674 }
675 list_add_tail(&pm8001_ha->list, &hba_list);
676 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
677 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
678 if (rc)
679 goto err_out_ha_free;
680
681 rc = scsi_add_host(shost, &pdev->dev);
682 if (rc)
683 goto err_out_ha_free;
684 rc = pm8001_request_irq(pm8001_ha);
685 if (rc)
686 goto err_out_shost;
687
688 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
689 pm8001_init_sas_add(pm8001_ha);
690 pm8001_post_sas_ha_init(shost, chip);
691 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
692 if (rc)
693 goto err_out_shost;
694 scsi_scan_host(pm8001_ha->shost);
695 return 0;
696
697err_out_shost:
698 scsi_remove_host(pm8001_ha->shost);
699err_out_ha_free:
700 pm8001_free(pm8001_ha);
701err_out_free:
702 kfree(SHOST_TO_SAS_HA(shost));
703err_out_free_host:
704 kfree(shost);
705err_out_regions:
706 pci_release_regions(pdev);
707err_out_disable:
708 pci_disable_device(pdev);
709err_out_enable:
710 return rc;
711}
712
713static void __devexit pm8001_pci_remove(struct pci_dev *pdev)
714{
715 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
716 struct pm8001_hba_info *pm8001_ha;
717 int i;
718 pm8001_ha = sha->lldd_ha;
719 pci_set_drvdata(pdev, NULL);
720 sas_unregister_ha(sha);
721 sas_remove_host(pm8001_ha->shost);
722 list_del(&pm8001_ha->list);
723 scsi_remove_host(pm8001_ha->shost);
724 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
725 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
726
727#ifdef PM8001_USE_MSIX
728 for (i = 0; i < pm8001_ha->number_of_intr; i++)
729 synchronize_irq(pm8001_ha->msix_entries[i].vector);
730 for (i = 0; i < pm8001_ha->number_of_intr; i++)
731 free_irq(pm8001_ha->msix_entries[i].vector, sha);
732 pci_disable_msix(pdev);
733#else
734 free_irq(pm8001_ha->irq, sha);
735#endif
736#ifdef PM8001_USE_TASKLET
737 tasklet_kill(&pm8001_ha->tasklet);
738#endif
739 pm8001_free(pm8001_ha);
740 kfree(sha->sas_phy);
741 kfree(sha->sas_port);
742 kfree(sha);
743 pci_release_regions(pdev);
744 pci_disable_device(pdev);
745}
746
747/**
748 * pm8001_pci_suspend - power management suspend main entry point
749 * @pdev: PCI device struct
750 * @state: PM state change to (usually PCI_D3)
751 *
752 * Returns 0 success, anything else error.
753 */
754static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
755{
756 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
757 struct pm8001_hba_info *pm8001_ha;
758 int i , pos;
759 u32 device_state;
760 pm8001_ha = sha->lldd_ha;
761 flush_scheduled_work();
762 scsi_block_requests(pm8001_ha->shost);
763 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
764 if (pos == 0) {
765 printk(KERN_ERR " PCI PM not supported\n");
766 return -ENODEV;
767 }
768 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
769 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
770#ifdef PM8001_USE_MSIX
771 for (i = 0; i < pm8001_ha->number_of_intr; i++)
772 synchronize_irq(pm8001_ha->msix_entries[i].vector);
773 for (i = 0; i < pm8001_ha->number_of_intr; i++)
774 free_irq(pm8001_ha->msix_entries[i].vector, sha);
775 pci_disable_msix(pdev);
776#else
777 free_irq(pm8001_ha->irq, sha);
778#endif
779#ifdef PM8001_USE_TASKLET
780 tasklet_kill(&pm8001_ha->tasklet);
781#endif
782 device_state = pci_choose_state(pdev, state);
783 pm8001_printk("pdev=0x%p, slot=%s, entering "
784 "operating state [D%d]\n", pdev,
785 pm8001_ha->name, device_state);
786 pci_save_state(pdev);
787 pci_disable_device(pdev);
788 pci_set_power_state(pdev, device_state);
789 return 0;
790}
791
792/**
793 * pm8001_pci_resume - power management resume main entry point
794 * @pdev: PCI device struct
795 *
796 * Returns 0 success, anything else error.
797 */
798static int pm8001_pci_resume(struct pci_dev *pdev)
799{
800 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
801 struct pm8001_hba_info *pm8001_ha;
802 int rc;
803 u32 device_state;
804 pm8001_ha = sha->lldd_ha;
805 device_state = pdev->current_state;
806
807 pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
808 "operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
809
810 pci_set_power_state(pdev, PCI_D0);
811 pci_enable_wake(pdev, PCI_D0, 0);
812 pci_restore_state(pdev);
813 rc = pci_enable_device(pdev);
814 if (rc) {
815 pm8001_printk("slot=%s Enable device failed during resume\n",
816 pm8001_ha->name);
817 goto err_out_enable;
818 }
819
820 pci_set_master(pdev);
821 rc = pci_go_44(pdev);
822 if (rc)
823 goto err_out_disable;
824
825 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
826 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
827 if (rc)
828 goto err_out_disable;
829 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
830 rc = pm8001_request_irq(pm8001_ha);
831 if (rc)
832 goto err_out_disable;
833 #ifdef PM8001_USE_TASKLET
834 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
835 (unsigned long)pm8001_ha);
836 #endif
837 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
838 scsi_unblock_requests(pm8001_ha->shost);
839 return 0;
840
841err_out_disable:
842 scsi_remove_host(pm8001_ha->shost);
843 pci_disable_device(pdev);
844err_out_enable:
845 return rc;
846}
847
848static struct pci_device_id __devinitdata pm8001_pci_table[] = {
849 {
850 PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
851 },
852 {
853 PCI_DEVICE(0x117c, 0x0042),
854 .driver_data = chip_8001
855 },
856 {} /* terminate list */
857};
858
859static struct pci_driver pm8001_pci_driver = {
860 .name = DRV_NAME,
861 .id_table = pm8001_pci_table,
862 .probe = pm8001_pci_probe,
863 .remove = __devexit_p(pm8001_pci_remove),
864 .suspend = pm8001_pci_suspend,
865 .resume = pm8001_pci_resume,
866};
867
868/**
869 * pm8001_init - initialize scsi transport template
870 */
871static int __init pm8001_init(void)
872{
873 int rc;
874 pm8001_id = 0;
875 pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
876 if (!pm8001_stt)
877 return -ENOMEM;
878 rc = pci_register_driver(&pm8001_pci_driver);
879 if (rc)
880 goto err_out;
881 return 0;
882err_out:
883 sas_release_transport(pm8001_stt);
884 return rc;
885}
886
887static void __exit pm8001_exit(void)
888{
889 pci_unregister_driver(&pm8001_pci_driver);
890 sas_release_transport(pm8001_stt);
891}
892
893module_init(pm8001_init);
894module_exit(pm8001_exit);
895
896MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
897MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
898MODULE_VERSION(DRV_VERSION);
899MODULE_LICENSE("GPL");
900MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
901
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
new file mode 100644
index 000000000000..bff4f5139b9c
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -0,0 +1,1153 @@
1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#include <linux/slab.h>
42#include "pm8001_sas.h"
43
44/**
45 * pm8001_find_tag - from sas task to find out tag that belongs to this task
46 * @task: the task sent to the LLDD
47 * @tag: the found tag associated with the task
48 */
49static int pm8001_find_tag(struct sas_task *task, u32 *tag)
50{
51 if (task->lldd_task) {
52 struct pm8001_ccb_info *ccb;
53 ccb = task->lldd_task;
54 *tag = ccb->ccb_tag;
55 return 1;
56 }
57 return 0;
58}
59
60/**
61 * pm8001_tag_clear - clear the tags bitmap
62 * @pm8001_ha: our hba struct
63 * @tag: the found tag associated with the task
64 */
65static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
66{
67 void *bitmap = pm8001_ha->tags;
68 clear_bit(tag, bitmap);
69}
70
71static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
72{
73 pm8001_tag_clear(pm8001_ha, tag);
74}
75
76static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag)
77{
78 void *bitmap = pm8001_ha->tags;
79 set_bit(tag, bitmap);
80}
81
82/**
83 * pm8001_tag_alloc - allocate a empty tag for task used.
84 * @pm8001_ha: our hba struct
85 * @tag_out: the found empty tag .
86 */
87inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
88{
89 unsigned int index, tag;
90 void *bitmap = pm8001_ha->tags;
91
92 index = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
93 tag = index;
94 if (tag >= pm8001_ha->tags_num)
95 return -SAS_QUEUE_FULL;
96 pm8001_tag_set(pm8001_ha, tag);
97 *tag_out = tag;
98 return 0;
99}
100
101void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
102{
103 int i;
104 for (i = 0; i < pm8001_ha->tags_num; ++i)
105 pm8001_tag_clear(pm8001_ha, i);
106}
107
108 /**
109 * pm8001_mem_alloc - allocate memory for pm8001.
110 * @pdev: pci device.
111 * @virt_addr: the allocated virtual address
112 * @pphys_addr_hi: the physical address high byte address.
113 * @pphys_addr_lo: the physical address low byte address.
114 * @mem_size: memory size.
115 */
116int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
117 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
118 u32 *pphys_addr_lo, u32 mem_size, u32 align)
119{
120 caddr_t mem_virt_alloc;
121 dma_addr_t mem_dma_handle;
122 u64 phys_align;
123 u64 align_offset = 0;
124 if (align)
125 align_offset = (dma_addr_t)align - 1;
126 mem_virt_alloc =
127 pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle);
128 if (!mem_virt_alloc) {
129 pm8001_printk("memory allocation error\n");
130 return -1;
131 }
132 memset((void *)mem_virt_alloc, 0, mem_size+align);
133 *pphys_addr = mem_dma_handle;
134 phys_align = (*pphys_addr + align_offset) & ~align_offset;
135 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
136 *pphys_addr_hi = upper_32_bits(phys_align);
137 *pphys_addr_lo = lower_32_bits(phys_align);
138 return 0;
139}
140/**
141 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
142 * find out our hba struct.
143 * @dev: the domain device which from sas layer.
144 */
145static
146struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
147{
148 struct sas_ha_struct *sha = dev->port->ha;
149 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
150 return pm8001_ha;
151}
152
153/**
154 * pm8001_phy_control - this function should be registered to
155 * sas_domain_function_template to provide libsas used, note: this is just
156 * control the HBA phy rather than other expander phy if you want control
157 * other phy, you should use SMP command.
158 * @sas_phy: which phy in HBA phys.
159 * @func: the operation.
160 * @funcdata: always NULL.
161 */
162int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
163 void *funcdata)
164{
165 int rc = 0, phy_id = sas_phy->id;
166 struct pm8001_hba_info *pm8001_ha = NULL;
167 struct sas_phy_linkrates *rates;
168 DECLARE_COMPLETION_ONSTACK(completion);
169 pm8001_ha = sas_phy->ha->lldd_ha;
170 pm8001_ha->phy[phy_id].enable_completion = &completion;
171 switch (func) {
172 case PHY_FUNC_SET_LINK_RATE:
173 rates = funcdata;
174 if (rates->minimum_linkrate) {
175 pm8001_ha->phy[phy_id].minimum_linkrate =
176 rates->minimum_linkrate;
177 }
178 if (rates->maximum_linkrate) {
179 pm8001_ha->phy[phy_id].maximum_linkrate =
180 rates->maximum_linkrate;
181 }
182 if (pm8001_ha->phy[phy_id].phy_state == 0) {
183 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
184 wait_for_completion(&completion);
185 }
186 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
187 PHY_LINK_RESET);
188 break;
189 case PHY_FUNC_HARD_RESET:
190 if (pm8001_ha->phy[phy_id].phy_state == 0) {
191 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
192 wait_for_completion(&completion);
193 }
194 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
195 PHY_HARD_RESET);
196 break;
197 case PHY_FUNC_LINK_RESET:
198 if (pm8001_ha->phy[phy_id].phy_state == 0) {
199 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
200 wait_for_completion(&completion);
201 }
202 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
203 PHY_LINK_RESET);
204 break;
205 case PHY_FUNC_RELEASE_SPINUP_HOLD:
206 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
207 PHY_LINK_RESET);
208 break;
209 case PHY_FUNC_DISABLE:
210 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
211 break;
212 default:
213 rc = -EOPNOTSUPP;
214 }
215 msleep(300);
216 return rc;
217}
218
219int pm8001_slave_alloc(struct scsi_device *scsi_dev)
220{
221 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
222 if (dev_is_sata(dev)) {
223 /* We don't need to rescan targets
224 * if REPORT_LUNS request is failed
225 */
226 if (scsi_dev->lun > 0)
227 return -ENXIO;
228 scsi_dev->tagged_supported = 1;
229 }
230 return sas_slave_alloc(scsi_dev);
231}
232
233/**
234 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
235 * command to HBA.
236 * @shost: the scsi host data.
237 */
238void pm8001_scan_start(struct Scsi_Host *shost)
239{
240 int i;
241 struct pm8001_hba_info *pm8001_ha;
242 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
243 pm8001_ha = sha->lldd_ha;
244 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
245 for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
246 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
247}
248
249int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
250{
251 /* give the phy enabling interrupt event time to come in (1s
252 * is empirically about all it takes) */
253 if (time < HZ)
254 return 0;
255 /* Wait for discovery to finish */
256 scsi_flush_work(shost);
257 return 1;
258}
259
260/**
261 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
262 * @pm8001_ha: our hba card information
263 * @ccb: the ccb which attached to smp task
264 */
265static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
266 struct pm8001_ccb_info *ccb)
267{
268 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
269}
270
271u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
272{
273 struct ata_queued_cmd *qc = task->uldd_task;
274 if (qc) {
275 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
276 qc->tf.command == ATA_CMD_FPDMA_READ) {
277 *tag = qc->tag;
278 return 1;
279 }
280 }
281 return 0;
282}
283
284/**
285 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
286 * @pm8001_ha: our hba card information
287 * @ccb: the ccb which attached to sata task
288 */
289static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
290 struct pm8001_ccb_info *ccb)
291{
292 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
293}
294
295/**
296 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
297 * @pm8001_ha: our hba card information
298 * @ccb: the ccb which attached to TM
299 * @tmf: the task management IU
300 */
301static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
302 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
303{
304 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
305}
306
307/**
308 * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
309 * @pm8001_ha: our hba card information
310 * @ccb: the ccb which attached to ssp task
311 */
312static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
313 struct pm8001_ccb_info *ccb)
314{
315 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
316}
317int pm8001_slave_configure(struct scsi_device *sdev)
318{
319 struct domain_device *dev = sdev_to_domain_dev(sdev);
320 int ret = sas_slave_configure(sdev);
321 if (ret)
322 return ret;
323 if (dev_is_sata(dev)) {
324 #ifdef PM8001_DISABLE_NCQ
325 struct ata_port *ap = dev->sata_dev.ap;
326 struct ata_device *adev = ap->link.device;
327 adev->flags |= ATA_DFLAG_NCQ_OFF;
328 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
329 #endif
330 }
331 return 0;
332}
333 /* Find the local port id that's attached to this device */
334static int sas_find_local_port_id(struct domain_device *dev)
335{
336 struct domain_device *pdev = dev->parent;
337
338 /* Directly attached device */
339 if (!pdev)
340 return dev->port->id;
341 while (pdev) {
342 struct domain_device *pdev_p = pdev->parent;
343 if (!pdev_p)
344 return pdev->port->id;
345 pdev = pdev->parent;
346 }
347 return 0;
348}
349
350/**
351 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
352 * @task: the task to be execute.
353 * @num: if can_queue great than 1, the task can be queued up. for SMP task,
354 * we always execute one one time.
355 * @gfp_flags: gfp_flags.
356 * @is_tmf: if it is task management task.
357 * @tmf: the task management IU
358 */
359#define DEV_IS_GONE(pm8001_dev) \
360 ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
361static int pm8001_task_exec(struct sas_task *task, const int num,
362 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
363{
364 struct domain_device *dev = task->dev;
365 struct pm8001_hba_info *pm8001_ha;
366 struct pm8001_device *pm8001_dev;
367 struct pm8001_port *port = NULL;
368 struct sas_task *t = task;
369 struct pm8001_ccb_info *ccb;
370 u32 tag = 0xdeadbeef, rc, n_elem = 0;
371 u32 n = num;
372 unsigned long flags = 0, flags_libsas = 0;
373
374 if (!dev->port) {
375 struct task_status_struct *tsm = &t->task_status;
376 tsm->resp = SAS_TASK_UNDELIVERED;
377 tsm->stat = SAS_PHY_DOWN;
378 if (dev->dev_type != SATA_DEV)
379 t->task_done(t);
380 return 0;
381 }
382 pm8001_ha = pm8001_find_ha_by_dev(task->dev);
383 PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
384 spin_lock_irqsave(&pm8001_ha->lock, flags);
385 do {
386 dev = t->dev;
387 pm8001_dev = dev->lldd_dev;
388 if (DEV_IS_GONE(pm8001_dev)) {
389 if (pm8001_dev) {
390 PM8001_IO_DBG(pm8001_ha,
391 pm8001_printk("device %d not ready.\n",
392 pm8001_dev->device_id));
393 } else {
394 PM8001_IO_DBG(pm8001_ha,
395 pm8001_printk("device %016llx not "
396 "ready.\n", SAS_ADDR(dev->sas_addr)));
397 }
398 rc = SAS_PHY_DOWN;
399 goto out_done;
400 }
401 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
402 if (!port->port_attached) {
403 if (sas_protocol_ata(t->task_proto)) {
404 struct task_status_struct *ts = &t->task_status;
405 ts->resp = SAS_TASK_UNDELIVERED;
406 ts->stat = SAS_PHY_DOWN;
407
408 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
409 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
410 flags_libsas);
411 t->task_done(t);
412 spin_lock_irqsave(dev->sata_dev.ap->lock,
413 flags_libsas);
414 spin_lock_irqsave(&pm8001_ha->lock, flags);
415 if (n > 1)
416 t = list_entry(t->list.next,
417 struct sas_task, list);
418 continue;
419 } else {
420 struct task_status_struct *ts = &t->task_status;
421 ts->resp = SAS_TASK_UNDELIVERED;
422 ts->stat = SAS_PHY_DOWN;
423 t->task_done(t);
424 if (n > 1)
425 t = list_entry(t->list.next,
426 struct sas_task, list);
427 continue;
428 }
429 }
430 rc = pm8001_tag_alloc(pm8001_ha, &tag);
431 if (rc)
432 goto err_out;
433 ccb = &pm8001_ha->ccb_info[tag];
434
435 if (!sas_protocol_ata(t->task_proto)) {
436 if (t->num_scatter) {
437 n_elem = dma_map_sg(pm8001_ha->dev,
438 t->scatter,
439 t->num_scatter,
440 t->data_dir);
441 if (!n_elem) {
442 rc = -ENOMEM;
443 goto err_out_tag;
444 }
445 }
446 } else {
447 n_elem = t->num_scatter;
448 }
449
450 t->lldd_task = ccb;
451 ccb->n_elem = n_elem;
452 ccb->ccb_tag = tag;
453 ccb->task = t;
454 switch (t->task_proto) {
455 case SAS_PROTOCOL_SMP:
456 rc = pm8001_task_prep_smp(pm8001_ha, ccb);
457 break;
458 case SAS_PROTOCOL_SSP:
459 if (is_tmf)
460 rc = pm8001_task_prep_ssp_tm(pm8001_ha,
461 ccb, tmf);
462 else
463 rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
464 break;
465 case SAS_PROTOCOL_SATA:
466 case SAS_PROTOCOL_STP:
467 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
468 rc = pm8001_task_prep_ata(pm8001_ha, ccb);
469 break;
470 default:
471 dev_printk(KERN_ERR, pm8001_ha->dev,
472 "unknown sas_task proto: 0x%x\n",
473 t->task_proto);
474 rc = -EINVAL;
475 break;
476 }
477
478 if (rc) {
479 PM8001_IO_DBG(pm8001_ha,
480 pm8001_printk("rc is %x\n", rc));
481 goto err_out_tag;
482 }
483 /* TODO: select normal or high priority */
484 spin_lock(&t->task_state_lock);
485 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
486 spin_unlock(&t->task_state_lock);
487 pm8001_dev->running_req++;
488 if (n > 1)
489 t = list_entry(t->list.next, struct sas_task, list);
490 } while (--n);
491 rc = 0;
492 goto out_done;
493
494err_out_tag:
495 pm8001_tag_free(pm8001_ha, tag);
496err_out:
497 dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
498 if (!sas_protocol_ata(t->task_proto))
499 if (n_elem)
500 dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
501 t->data_dir);
502out_done:
503 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
504 return rc;
505}
506
507/**
508 * pm8001_queue_command - register for upper layer used, all IO commands sent
509 * to HBA are from this interface.
510 * @task: the task to be execute.
511 * @num: if can_queue great than 1, the task can be queued up. for SMP task,
512 * we always execute one one time
513 * @gfp_flags: gfp_flags
514 */
515int pm8001_queue_command(struct sas_task *task, const int num,
516 gfp_t gfp_flags)
517{
518 return pm8001_task_exec(task, num, gfp_flags, 0, NULL);
519}
520
521void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx)
522{
523 pm8001_tag_clear(pm8001_ha, ccb_idx);
524}
525
526/**
527 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
528 * @pm8001_ha: our hba card information
529 * @ccb: the ccb which attached to ssp task
530 * @task: the task to be free.
531 * @ccb_idx: ccb index.
532 */
533void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
534 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
535{
536 if (!ccb->task)
537 return;
538 if (!sas_protocol_ata(task->task_proto))
539 if (ccb->n_elem)
540 dma_unmap_sg(pm8001_ha->dev, task->scatter,
541 task->num_scatter, task->data_dir);
542
543 switch (task->task_proto) {
544 case SAS_PROTOCOL_SMP:
545 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
546 PCI_DMA_FROMDEVICE);
547 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
548 PCI_DMA_TODEVICE);
549 break;
550
551 case SAS_PROTOCOL_SATA:
552 case SAS_PROTOCOL_STP:
553 case SAS_PROTOCOL_SSP:
554 default:
555 /* do nothing */
556 break;
557 }
558 task->lldd_task = NULL;
559 ccb->task = NULL;
560 ccb->ccb_tag = 0xFFFFFFFF;
561 pm8001_ccb_free(pm8001_ha, ccb_idx);
562}
563
564 /**
565 * pm8001_alloc_dev - find a empty pm8001_device
566 * @pm8001_ha: our hba card information
567 */
568struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
569{
570 u32 dev;
571 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
572 if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
573 pm8001_ha->devices[dev].id = dev;
574 return &pm8001_ha->devices[dev];
575 }
576 }
577 if (dev == PM8001_MAX_DEVICES) {
578 PM8001_FAIL_DBG(pm8001_ha,
579 pm8001_printk("max support %d devices, ignore ..\n",
580 PM8001_MAX_DEVICES));
581 }
582 return NULL;
583}
584
585static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
586{
587 u32 id = pm8001_dev->id;
588 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
589 pm8001_dev->id = id;
590 pm8001_dev->dev_type = NO_DEVICE;
591 pm8001_dev->device_id = PM8001_MAX_DEVICES;
592 pm8001_dev->sas_device = NULL;
593}
594
595/**
596 * pm8001_dev_found_notify - libsas notify a device is found.
597 * @dev: the device structure which sas layer used.
598 *
599 * when libsas find a sas domain device, it should tell the LLDD that
600 * device is found, and then LLDD register this device to HBA firmware
601 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
602 * device ID(according to device's sas address) and returned it to LLDD. From
603 * now on, we communicate with HBA FW with the device ID which HBA assigned
604 * rather than sas address. it is the necessary step for our HBA but it is
605 * the optional for other HBA driver.
606 */
607static int pm8001_dev_found_notify(struct domain_device *dev)
608{
609 unsigned long flags = 0;
610 int res = 0;
611 struct pm8001_hba_info *pm8001_ha = NULL;
612 struct domain_device *parent_dev = dev->parent;
613 struct pm8001_device *pm8001_device;
614 DECLARE_COMPLETION_ONSTACK(completion);
615 u32 flag = 0;
616 pm8001_ha = pm8001_find_ha_by_dev(dev);
617 spin_lock_irqsave(&pm8001_ha->lock, flags);
618
619 pm8001_device = pm8001_alloc_dev(pm8001_ha);
620 if (!pm8001_device) {
621 res = -1;
622 goto found_out;
623 }
624 pm8001_device->sas_device = dev;
625 dev->lldd_dev = pm8001_device;
626 pm8001_device->dev_type = dev->dev_type;
627 pm8001_device->dcompletion = &completion;
628 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
629 int phy_id;
630 struct ex_phy *phy;
631 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
632 phy_id++) {
633 phy = &parent_dev->ex_dev.ex_phy[phy_id];
634 if (SAS_ADDR(phy->attached_sas_addr)
635 == SAS_ADDR(dev->sas_addr)) {
636 pm8001_device->attached_phy = phy_id;
637 break;
638 }
639 }
640 if (phy_id == parent_dev->ex_dev.num_phys) {
641 PM8001_FAIL_DBG(pm8001_ha,
642 pm8001_printk("Error: no attached dev:%016llx"
643 " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr),
644 SAS_ADDR(parent_dev->sas_addr)));
645 res = -1;
646 }
647 } else {
648 if (dev->dev_type == SATA_DEV) {
649 pm8001_device->attached_phy =
650 dev->rphy->identify.phy_identifier;
651 flag = 1; /* directly sata*/
652 }
653 } /*register this device to HBA*/
654 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n"));
655 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
656 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
657 wait_for_completion(&completion);
658 if (dev->dev_type == SAS_END_DEV)
659 msleep(50);
660 pm8001_ha->flags |= PM8001F_RUN_TIME ;
661 return 0;
662found_out:
663 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
664 return res;
665}
666
667int pm8001_dev_found(struct domain_device *dev)
668{
669 return pm8001_dev_found_notify(dev);
670}
671
672/**
673 * pm8001_alloc_task - allocate a task structure for TMF
674 */
675static struct sas_task *pm8001_alloc_task(void)
676{
677 struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
678 if (task) {
679 INIT_LIST_HEAD(&task->list);
680 spin_lock_init(&task->task_state_lock);
681 task->task_state_flags = SAS_TASK_STATE_PENDING;
682 init_timer(&task->timer);
683 init_completion(&task->completion);
684 }
685 return task;
686}
687
688static void pm8001_free_task(struct sas_task *task)
689{
690 if (task) {
691 BUG_ON(!list_empty(&task->list));
692 kfree(task);
693 }
694}
695
696static void pm8001_task_done(struct sas_task *task)
697{
698 if (!del_timer(&task->timer))
699 return;
700 complete(&task->completion);
701}
702
703static void pm8001_tmf_timedout(unsigned long data)
704{
705 struct sas_task *task = (struct sas_task *)data;
706
707 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
708 complete(&task->completion);
709}
710
711#define PM8001_TASK_TIMEOUT 20
712/**
713 * pm8001_exec_internal_tmf_task - execute some task management commands.
714 * @dev: the wanted device.
715 * @tmf: which task management wanted to be take.
716 * @para_len: para_len.
717 * @parameter: ssp task parameter.
718 *
719 * when errors or exception happened, we may want to do something, for example
720 * abort the issued task which result in this execption, it is done by calling
721 * this function, note it is also with the task execute interface.
722 */
723static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
724 void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
725{
726 int res, retry;
727 struct sas_task *task = NULL;
728 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
729
730 for (retry = 0; retry < 3; retry++) {
731 task = pm8001_alloc_task();
732 if (!task)
733 return -ENOMEM;
734
735 task->dev = dev;
736 task->task_proto = dev->tproto;
737 memcpy(&task->ssp_task, parameter, para_len);
738 task->task_done = pm8001_task_done;
739 task->timer.data = (unsigned long)task;
740 task->timer.function = pm8001_tmf_timedout;
741 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
742 add_timer(&task->timer);
743
744 res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf);
745
746 if (res) {
747 del_timer(&task->timer);
748 PM8001_FAIL_DBG(pm8001_ha,
749 pm8001_printk("Executing internal task "
750 "failed\n"));
751 goto ex_err;
752 }
753 wait_for_completion(&task->completion);
754 res = -TMF_RESP_FUNC_FAILED;
755 /* Even TMF timed out, return direct. */
756 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
757 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
758 PM8001_FAIL_DBG(pm8001_ha,
759 pm8001_printk("TMF task[%x]timeout.\n",
760 tmf->tmf));
761 goto ex_err;
762 }
763 }
764
765 if (task->task_status.resp == SAS_TASK_COMPLETE &&
766 task->task_status.stat == SAM_GOOD) {
767 res = TMF_RESP_FUNC_COMPLETE;
768 break;
769 }
770
771 if (task->task_status.resp == SAS_TASK_COMPLETE &&
772 task->task_status.stat == SAS_DATA_UNDERRUN) {
773 /* no error, but return the number of bytes of
774 * underrun */
775 res = task->task_status.residual;
776 break;
777 }
778
779 if (task->task_status.resp == SAS_TASK_COMPLETE &&
780 task->task_status.stat == SAS_DATA_OVERRUN) {
781 PM8001_FAIL_DBG(pm8001_ha,
782 pm8001_printk("Blocked task error.\n"));
783 res = -EMSGSIZE;
784 break;
785 } else {
786 PM8001_EH_DBG(pm8001_ha,
787 pm8001_printk(" Task to dev %016llx response:"
788 "0x%x status 0x%x\n",
789 SAS_ADDR(dev->sas_addr),
790 task->task_status.resp,
791 task->task_status.stat));
792 pm8001_free_task(task);
793 task = NULL;
794 }
795 }
796ex_err:
797 BUG_ON(retry == 3 && task != NULL);
798 if (task != NULL)
799 pm8001_free_task(task);
800 return res;
801}
802
803static int
804pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
805 struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
806 u32 task_tag)
807{
808 int res, retry;
809 u32 ccb_tag;
810 struct pm8001_ccb_info *ccb;
811 struct sas_task *task = NULL;
812
813 for (retry = 0; retry < 3; retry++) {
814 task = pm8001_alloc_task();
815 if (!task)
816 return -ENOMEM;
817
818 task->dev = dev;
819 task->task_proto = dev->tproto;
820 task->task_done = pm8001_task_done;
821 task->timer.data = (unsigned long)task;
822 task->timer.function = pm8001_tmf_timedout;
823 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
824 add_timer(&task->timer);
825
826 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
827 if (res)
828 return res;
829 ccb = &pm8001_ha->ccb_info[ccb_tag];
830 ccb->device = pm8001_dev;
831 ccb->ccb_tag = ccb_tag;
832 ccb->task = task;
833
834 res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
835 pm8001_dev, flag, task_tag, ccb_tag);
836
837 if (res) {
838 del_timer(&task->timer);
839 PM8001_FAIL_DBG(pm8001_ha,
840 pm8001_printk("Executing internal task "
841 "failed\n"));
842 goto ex_err;
843 }
844 wait_for_completion(&task->completion);
845 res = TMF_RESP_FUNC_FAILED;
846 /* Even TMF timed out, return direct. */
847 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
848 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
849 PM8001_FAIL_DBG(pm8001_ha,
850 pm8001_printk("TMF task timeout.\n"));
851 goto ex_err;
852 }
853 }
854
855 if (task->task_status.resp == SAS_TASK_COMPLETE &&
856 task->task_status.stat == SAM_GOOD) {
857 res = TMF_RESP_FUNC_COMPLETE;
858 break;
859
860 } else {
861 PM8001_EH_DBG(pm8001_ha,
862 pm8001_printk(" Task to dev %016llx response: "
863 "0x%x status 0x%x\n",
864 SAS_ADDR(dev->sas_addr),
865 task->task_status.resp,
866 task->task_status.stat));
867 pm8001_free_task(task);
868 task = NULL;
869 }
870 }
871ex_err:
872 BUG_ON(retry == 3 && task != NULL);
873 if (task != NULL)
874 pm8001_free_task(task);
875 return res;
876}
877
878/**
879 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
880 * @dev: the device structure which sas layer used.
881 */
882static void pm8001_dev_gone_notify(struct domain_device *dev)
883{
884 unsigned long flags = 0;
885 u32 tag;
886 struct pm8001_hba_info *pm8001_ha;
887 struct pm8001_device *pm8001_dev = dev->lldd_dev;
888 u32 device_id = pm8001_dev->device_id;
889 pm8001_ha = pm8001_find_ha_by_dev(dev);
890 spin_lock_irqsave(&pm8001_ha->lock, flags);
891 pm8001_tag_alloc(pm8001_ha, &tag);
892 if (pm8001_dev) {
893 PM8001_DISC_DBG(pm8001_ha,
894 pm8001_printk("found dev[%d:%x] is gone.\n",
895 pm8001_dev->device_id, pm8001_dev->dev_type));
896 if (pm8001_dev->running_req) {
897 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
898 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
899 dev, 1, 0);
900 spin_lock_irqsave(&pm8001_ha->lock, flags);
901 }
902 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
903 pm8001_free_dev(pm8001_dev);
904 } else {
905 PM8001_DISC_DBG(pm8001_ha,
906 pm8001_printk("Found dev has gone.\n"));
907 }
908 dev->lldd_dev = NULL;
909 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
910}
911
912void pm8001_dev_gone(struct domain_device *dev)
913{
914 pm8001_dev_gone_notify(dev);
915}
916
917static int pm8001_issue_ssp_tmf(struct domain_device *dev,
918 u8 *lun, struct pm8001_tmf_task *tmf)
919{
920 struct sas_ssp_task ssp_task;
921 if (!(dev->tproto & SAS_PROTOCOL_SSP))
922 return TMF_RESP_FUNC_ESUPP;
923
924 strncpy((u8 *)&ssp_task.LUN, lun, 8);
925 return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
926 tmf);
927}
928
929/**
930 * Standard mandates link reset for ATA (type 0) and hard reset for
931 * SSP (type 1) , only for RECOVERY
932 */
933int pm8001_I_T_nexus_reset(struct domain_device *dev)
934{
935 int rc = TMF_RESP_FUNC_FAILED;
936 struct pm8001_device *pm8001_dev;
937 struct pm8001_hba_info *pm8001_ha;
938 struct sas_phy *phy;
939 if (!dev || !dev->lldd_dev)
940 return -1;
941
942 pm8001_dev = dev->lldd_dev;
943 pm8001_ha = pm8001_find_ha_by_dev(dev);
944 phy = sas_find_local_phy(dev);
945
946 if (dev_is_sata(dev)) {
947 DECLARE_COMPLETION_ONSTACK(completion_setstate);
948 if (scsi_is_sas_phy_local(phy))
949 return 0;
950 rc = sas_phy_reset(phy, 1);
951 msleep(2000);
952 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
953 dev, 1, 0);
954 pm8001_dev->setds_completion = &completion_setstate;
955 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
956 pm8001_dev, 0x01);
957 wait_for_completion(&completion_setstate);
958 } else{
959 rc = sas_phy_reset(phy, 1);
960 msleep(2000);
961 }
962 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
963 pm8001_dev->device_id, rc));
964 return rc;
965}
966
967/* mandatory SAM-3, the task reset the specified LUN*/
968int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
969{
970 int rc = TMF_RESP_FUNC_FAILED;
971 struct pm8001_tmf_task tmf_task;
972 struct pm8001_device *pm8001_dev = dev->lldd_dev;
973 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
974 if (dev_is_sata(dev)) {
975 struct sas_phy *phy = sas_find_local_phy(dev);
976 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
977 dev, 1, 0);
978 rc = sas_phy_reset(phy, 1);
979 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
980 pm8001_dev, 0x01);
981 msleep(2000);
982 } else {
983 tmf_task.tmf = TMF_LU_RESET;
984 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
985 }
986 /* If failed, fall-through I_T_Nexus reset */
987 PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n",
988 pm8001_dev->device_id, rc));
989 return rc;
990}
991
992/* optional SAM-3 */
993int pm8001_query_task(struct sas_task *task)
994{
995 u32 tag = 0xdeadbeef;
996 int i = 0;
997 struct scsi_lun lun;
998 struct pm8001_tmf_task tmf_task;
999 int rc = TMF_RESP_FUNC_FAILED;
1000 if (unlikely(!task || !task->lldd_task || !task->dev))
1001 return rc;
1002
1003 if (task->task_proto & SAS_PROTOCOL_SSP) {
1004 struct scsi_cmnd *cmnd = task->uldd_task;
1005 struct domain_device *dev = task->dev;
1006 struct pm8001_hba_info *pm8001_ha =
1007 pm8001_find_ha_by_dev(dev);
1008
1009 int_to_scsilun(cmnd->device->lun, &lun);
1010 rc = pm8001_find_tag(task, &tag);
1011 if (rc == 0) {
1012 rc = TMF_RESP_FUNC_FAILED;
1013 return rc;
1014 }
1015 PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:["));
1016 for (i = 0; i < 16; i++)
1017 printk(KERN_INFO "%02x ", cmnd->cmnd[i]);
1018 printk(KERN_INFO "]\n");
1019 tmf_task.tmf = TMF_QUERY_TASK;
1020 tmf_task.tag_of_task_to_be_managed = tag;
1021
1022 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1023 switch (rc) {
1024 /* The task is still in Lun, release it then */
1025 case TMF_RESP_FUNC_SUCC:
1026 PM8001_EH_DBG(pm8001_ha,
1027 pm8001_printk("The task is still in Lun \n"));
1028 /* The task is not in Lun or failed, reset the phy */
1029 case TMF_RESP_FUNC_FAILED:
1030 case TMF_RESP_FUNC_COMPLETE:
1031 PM8001_EH_DBG(pm8001_ha,
1032 pm8001_printk("The task is not in Lun or failed,"
1033 " reset the phy \n"));
1034 break;
1035 }
1036 }
1037 pm8001_printk(":rc= %d\n", rc);
1038 return rc;
1039}
1040
1041/* mandatory SAM-3, still need free task/ccb info, abord the specified task */
1042int pm8001_abort_task(struct sas_task *task)
1043{
1044 unsigned long flags;
1045 u32 tag = 0xdeadbeef;
1046 u32 device_id;
1047 struct domain_device *dev ;
1048 struct pm8001_hba_info *pm8001_ha = NULL;
1049 struct pm8001_ccb_info *ccb;
1050 struct scsi_lun lun;
1051 struct pm8001_device *pm8001_dev;
1052 struct pm8001_tmf_task tmf_task;
1053 int rc = TMF_RESP_FUNC_FAILED;
1054 if (unlikely(!task || !task->lldd_task || !task->dev))
1055 return rc;
1056 spin_lock_irqsave(&task->task_state_lock, flags);
1057 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1058 spin_unlock_irqrestore(&task->task_state_lock, flags);
1059 rc = TMF_RESP_FUNC_COMPLETE;
1060 goto out;
1061 }
1062 spin_unlock_irqrestore(&task->task_state_lock, flags);
1063 if (task->task_proto & SAS_PROTOCOL_SSP) {
1064 struct scsi_cmnd *cmnd = task->uldd_task;
1065 dev = task->dev;
1066 ccb = task->lldd_task;
1067 pm8001_dev = dev->lldd_dev;
1068 pm8001_ha = pm8001_find_ha_by_dev(dev);
1069 int_to_scsilun(cmnd->device->lun, &lun);
1070 rc = pm8001_find_tag(task, &tag);
1071 if (rc == 0) {
1072 printk(KERN_INFO "No such tag in %s\n", __func__);
1073 rc = TMF_RESP_FUNC_FAILED;
1074 return rc;
1075 }
1076 device_id = pm8001_dev->device_id;
1077 PM8001_EH_DBG(pm8001_ha,
1078 pm8001_printk("abort io to deviceid= %d\n", device_id));
1079 tmf_task.tmf = TMF_ABORT_TASK;
1080 tmf_task.tag_of_task_to_be_managed = tag;
1081 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1082 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1083 pm8001_dev->sas_device, 0, tag);
1084 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1085 task->task_proto & SAS_PROTOCOL_STP) {
1086 dev = task->dev;
1087 pm8001_dev = dev->lldd_dev;
1088 pm8001_ha = pm8001_find_ha_by_dev(dev);
1089 rc = pm8001_find_tag(task, &tag);
1090 if (rc == 0) {
1091 printk(KERN_INFO "No such tag in %s\n", __func__);
1092 rc = TMF_RESP_FUNC_FAILED;
1093 return rc;
1094 }
1095 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1096 pm8001_dev->sas_device, 0, tag);
1097 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1098 /* SMP */
1099 dev = task->dev;
1100 pm8001_dev = dev->lldd_dev;
1101 pm8001_ha = pm8001_find_ha_by_dev(dev);
1102 rc = pm8001_find_tag(task, &tag);
1103 if (rc == 0) {
1104 printk(KERN_INFO "No such tag in %s\n", __func__);
1105 rc = TMF_RESP_FUNC_FAILED;
1106 return rc;
1107 }
1108 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1109 pm8001_dev->sas_device, 0, tag);
1110
1111 }
1112out:
1113 if (rc != TMF_RESP_FUNC_COMPLETE)
1114 pm8001_printk("rc= %d\n", rc);
1115 return rc;
1116}
1117
1118int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
1119{
1120 int rc = TMF_RESP_FUNC_FAILED;
1121 struct pm8001_tmf_task tmf_task;
1122
1123 tmf_task.tmf = TMF_ABORT_TASK_SET;
1124 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1125 return rc;
1126}
1127
1128int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
1129{
1130 int rc = TMF_RESP_FUNC_FAILED;
1131 struct pm8001_tmf_task tmf_task;
1132
1133 tmf_task.tmf = TMF_CLEAR_ACA;
1134 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1135
1136 return rc;
1137}
1138
1139int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1140{
1141 int rc = TMF_RESP_FUNC_FAILED;
1142 struct pm8001_tmf_task tmf_task;
1143 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1144 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1145
1146 PM8001_EH_DBG(pm8001_ha,
1147 pm8001_printk("I_T_L_Q clear task set[%x]\n",
1148 pm8001_dev->device_id));
1149 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1150 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1151 return rc;
1152}
1153
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
new file mode 100644
index 000000000000..8e38ca8cd101
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -0,0 +1,497 @@
1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#ifndef _PM8001_SAS_H_
42#define _PM8001_SAS_H_
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/spinlock.h>
47#include <linux/delay.h>
48#include <linux/types.h>
49#include <linux/ctype.h>
50#include <linux/dma-mapping.h>
51#include <linux/pci.h>
52#include <linux/interrupt.h>
53#include <linux/smp_lock.h>
54#include <scsi/libsas.h>
55#include <scsi/scsi_tcq.h>
56#include <scsi/sas_ata.h>
57#include <asm/atomic.h>
58#include "pm8001_defs.h"
59
60#define DRV_NAME "pm8001"
61#define DRV_VERSION "0.1.36"
62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
65#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
66#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
70 __func__, __LINE__, ## arg)
71#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
72do { \
73 if (unlikely(HBA->logging_level & LEVEL)) \
74 do { \
75 CMD; \
76 } while (0); \
77} while (0);
78
79#define PM8001_EH_DBG(HBA, CMD) \
80 PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD)
81
82#define PM8001_INIT_DBG(HBA, CMD) \
83 PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD)
84
85#define PM8001_DISC_DBG(HBA, CMD) \
86 PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD)
87
88#define PM8001_IO_DBG(HBA, CMD) \
89 PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD)
90
91#define PM8001_FAIL_DBG(HBA, CMD) \
92 PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD)
93
94#define PM8001_IOCTL_DBG(HBA, CMD) \
95 PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD)
96
97#define PM8001_MSG_DBG(HBA, CMD) \
98 PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
99
100
101#define PM8001_USE_TASKLET
102#define PM8001_USE_MSIX
103#define PM8001_READ_VPD
104
105
106#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
107
108#define PM8001_NAME_LENGTH 32/* generic length of strings */
109extern struct list_head hba_list;
110extern const struct pm8001_dispatch pm8001_8001_dispatch;
111
112struct pm8001_hba_info;
113struct pm8001_ccb_info;
114struct pm8001_device;
115/* define task management IU */
116struct pm8001_tmf_task {
117 u8 tmf;
118 u32 tag_of_task_to_be_managed;
119};
120struct pm8001_ioctl_payload {
121 u32 signature;
122 u16 major_function;
123 u16 minor_function;
124 u16 length;
125 u16 status;
126 u16 offset;
127 u16 id;
128 u8 *func_specific;
129};
130
131struct pm8001_dispatch {
132 char *name;
133 int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
134 int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
135 void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
136 int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
137 void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
138 irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
139 u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
140 int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
141 void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
142 void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
143 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
144 int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
145 struct pm8001_ccb_info *ccb);
146 int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha,
147 struct pm8001_ccb_info *ccb);
148 int (*sata_req)(struct pm8001_hba_info *pm8001_ha,
149 struct pm8001_ccb_info *ccb);
150 int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id);
151 int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id);
152 int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha,
153 struct pm8001_device *pm8001_dev, u32 flag);
154 int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id);
155 int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha,
156 u32 phy_id, u32 phy_op);
157 int (*task_abort)(struct pm8001_hba_info *pm8001_ha,
158 struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag,
159 u32 cmd_tag);
160 int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha,
161 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf);
162 int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
163 int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
164 int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha,
165 void *payload);
166 int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha,
167 struct pm8001_device *pm8001_dev, u32 state);
168 int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha,
169 u32 state);
170 int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha,
171 u32 state);
172 int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
173};
174
175struct pm8001_chip_info {
176 u32 n_phy;
177 const struct pm8001_dispatch *dispatch;
178};
179#define PM8001_CHIP_DISP (pm8001_ha->chip->dispatch)
180
181struct pm8001_port {
182 struct asd_sas_port sas_port;
183 u8 port_attached;
184 u8 wide_port_phymap;
185 u8 port_state;
186 struct list_head list;
187};
188
189struct pm8001_phy {
190 struct pm8001_hba_info *pm8001_ha;
191 struct pm8001_port *port;
192 struct asd_sas_phy sas_phy;
193 struct sas_identify identify;
194 struct scsi_device *sdev;
195 u64 dev_sas_addr;
196 u32 phy_type;
197 struct completion *enable_completion;
198 u32 frame_rcvd_size;
199 u8 frame_rcvd[32];
200 u8 phy_attached;
201 u8 phy_state;
202 enum sas_linkrate minimum_linkrate;
203 enum sas_linkrate maximum_linkrate;
204};
205
206struct pm8001_device {
207 enum sas_dev_type dev_type;
208 struct domain_device *sas_device;
209 u32 attached_phy;
210 u32 id;
211 struct completion *dcompletion;
212 struct completion *setds_completion;
213 u32 device_id;
214 u32 running_req;
215};
216
217struct pm8001_prd_imt {
218 __le32 len;
219 __le32 e;
220};
221
222struct pm8001_prd {
223 __le64 addr; /* 64-bit buffer address */
224 struct pm8001_prd_imt im_len; /* 64-bit length */
225} __attribute__ ((packed));
226/*
227 * CCB(Command Control Block)
228 */
229struct pm8001_ccb_info {
230 struct list_head entry;
231 struct sas_task *task;
232 u32 n_elem;
233 u32 ccb_tag;
234 dma_addr_t ccb_dma_handle;
235 struct pm8001_device *device;
236 struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG];
237 struct fw_control_ex *fw_control_context;
238};
239
240struct mpi_mem {
241 void *virt_ptr;
242 dma_addr_t phys_addr;
243 u32 phys_addr_hi;
244 u32 phys_addr_lo;
245 u32 total_len;
246 u32 num_elements;
247 u32 element_size;
248 u32 alignment;
249};
250
251struct mpi_mem_req {
252 /* The number of element in the mpiMemory array */
253 u32 count;
254 /* The array of structures that define memroy regions*/
255 struct mpi_mem region[USI_MAX_MEMCNT];
256};
257
258struct main_cfg_table {
259 u32 signature;
260 u32 interface_rev;
261 u32 firmware_rev;
262 u32 max_out_io;
263 u32 max_sgl;
264 u32 ctrl_cap_flag;
265 u32 gst_offset;
266 u32 inbound_queue_offset;
267 u32 outbound_queue_offset;
268 u32 inbound_q_nppd_hppd;
269 u32 outbound_hw_event_pid0_3;
270 u32 outbound_hw_event_pid4_7;
271 u32 outbound_ncq_event_pid0_3;
272 u32 outbound_ncq_event_pid4_7;
273 u32 outbound_tgt_ITNexus_event_pid0_3;
274 u32 outbound_tgt_ITNexus_event_pid4_7;
275 u32 outbound_tgt_ssp_event_pid0_3;
276 u32 outbound_tgt_ssp_event_pid4_7;
277 u32 outbound_tgt_smp_event_pid0_3;
278 u32 outbound_tgt_smp_event_pid4_7;
279 u32 upper_event_log_addr;
280 u32 lower_event_log_addr;
281 u32 event_log_size;
282 u32 event_log_option;
283 u32 upper_iop_event_log_addr;
284 u32 lower_iop_event_log_addr;
285 u32 iop_event_log_size;
286 u32 iop_event_log_option;
287 u32 fatal_err_interrupt;
288 u32 fatal_err_dump_offset0;
289 u32 fatal_err_dump_length0;
290 u32 fatal_err_dump_offset1;
291 u32 fatal_err_dump_length1;
292 u32 hda_mode_flag;
293 u32 anolog_setup_table_offset;
294};
295struct general_status_table {
296 u32 gst_len_mpistate;
297 u32 iq_freeze_state0;
298 u32 iq_freeze_state1;
299 u32 msgu_tcnt;
300 u32 iop_tcnt;
301 u32 reserved;
302 u32 phy_state[8];
303 u32 reserved1;
304 u32 reserved2;
305 u32 reserved3;
306 u32 recover_err_info[8];
307};
308struct inbound_queue_table {
309 u32 element_pri_size_cnt;
310 u32 upper_base_addr;
311 u32 lower_base_addr;
312 u32 ci_upper_base_addr;
313 u32 ci_lower_base_addr;
314 u32 pi_pci_bar;
315 u32 pi_offset;
316 u32 total_length;
317 void *base_virt;
318 void *ci_virt;
319 u32 reserved;
320 __le32 consumer_index;
321 u32 producer_idx;
322};
323struct outbound_queue_table {
324 u32 element_size_cnt;
325 u32 upper_base_addr;
326 u32 lower_base_addr;
327 void *base_virt;
328 u32 pi_upper_base_addr;
329 u32 pi_lower_base_addr;
330 u32 ci_pci_bar;
331 u32 ci_offset;
332 u32 total_length;
333 void *pi_virt;
334 u32 interrup_vec_cnt_delay;
335 u32 dinterrup_to_pci_offset;
336 __le32 producer_index;
337 u32 consumer_idx;
338};
339struct pm8001_hba_memspace {
340 void __iomem *memvirtaddr;
341 u64 membase;
342 u32 memsize;
343};
344struct pm8001_hba_info {
345 char name[PM8001_NAME_LENGTH];
346 struct list_head list;
347 unsigned long flags;
348 spinlock_t lock;/* host-wide lock */
349 struct pci_dev *pdev;/* our device */
350 struct device *dev;
351 struct pm8001_hba_memspace io_mem[6];
352 struct mpi_mem_req memoryMap;
353 void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
354 void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/
355 void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/
356 void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
357 void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
358 struct main_cfg_table main_cfg_tbl;
359 struct general_status_table gs_tbl;
360 struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM];
361 struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
362 u8 sas_addr[SAS_ADDR_SIZE];
363 struct sas_ha_struct *sas;/* SCSI/SAS glue */
364 struct Scsi_Host *shost;
365 u32 chip_id;
366 const struct pm8001_chip_info *chip;
367 struct completion *nvmd_completion;
368 int tags_num;
369 unsigned long *tags;
370 struct pm8001_phy phy[PM8001_MAX_PHYS];
371 struct pm8001_port port[PM8001_MAX_PHYS];
372 u32 id;
373 u32 irq;
374 struct pm8001_device *devices;
375 struct pm8001_ccb_info *ccb_info;
376#ifdef PM8001_USE_MSIX
377 struct msix_entry msix_entries[16];/*for msi-x interrupt*/
378 int number_of_intr;/*will be used in remove()*/
379#endif
380#ifdef PM8001_USE_TASKLET
381 struct tasklet_struct tasklet;
382#endif
383 struct list_head wq_list;
384 u32 logging_level;
385 u32 fw_status;
386 const struct firmware *fw_image;
387};
388
389struct pm8001_wq {
390 struct delayed_work work_q;
391 struct pm8001_hba_info *pm8001_ha;
392 void *data;
393 int handler;
394 struct list_head entry;
395};
396
397struct pm8001_fw_image_header {
398 u8 vender_id[8];
399 u8 product_id;
400 u8 hardware_rev;
401 u8 dest_partition;
402 u8 reserved;
403 u8 fw_rev[4];
404 __be32 image_length;
405 __be32 image_crc;
406 __be32 startup_entry;
407} __attribute__((packed, aligned(4)));
408
409
410/**
411 * FW Flash Update status values
412 */
413#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT 0x00
414#define FLASH_UPDATE_IN_PROGRESS 0x01
415#define FLASH_UPDATE_HDR_ERR 0x02
416#define FLASH_UPDATE_OFFSET_ERR 0x03
417#define FLASH_UPDATE_CRC_ERR 0x04
418#define FLASH_UPDATE_LENGTH_ERR 0x05
419#define FLASH_UPDATE_HW_ERR 0x06
420#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10
421#define FLASH_UPDATE_DISABLED 0x11
422
423/**
424 * brief param structure for firmware flash update.
425 */
426struct fw_flash_updata_info {
427 u32 cur_image_offset;
428 u32 cur_image_len;
429 u32 total_image_len;
430 struct pm8001_prd sgl;
431};
432
433struct fw_control_info {
434 u32 retcode;/*ret code (status)*/
435 u32 phase;/*ret code phase*/
436 u32 phaseCmplt;/*percent complete for the current
437 update phase */
438 u32 version;/*Hex encoded firmware version number*/
439 u32 offset;/*Used for downloading firmware */
440 u32 len; /*len of buffer*/
441 u32 size;/* Used in OS VPD and Trace get size
442 operations.*/
443 u32 reserved;/* padding required for 64 bit
444 alignment */
445 u8 buffer[1];/* Start of buffer */
446};
447struct fw_control_ex {
448 struct fw_control_info *fw_control;
449 void *buffer;/* keep buffer pointer to be
450 freed when the responce comes*/
451 void *virtAddr;/* keep virtual address of the data */
452 void *usrAddr;/* keep virtual address of the
453 user data */
454 dma_addr_t phys_addr;
455 u32 len; /* len of buffer */
456 void *payload; /* pointer to IOCTL Payload */
457 u8 inProgress;/*if 1 - the IOCTL request is in
458 progress */
459 void *param1;
460 void *param2;
461 void *param3;
462};
463
464/******************** function prototype *********************/
465int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
466void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
467u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
468void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx);
469void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
470 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
471int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
472 void *funcdata);
473int pm8001_slave_alloc(struct scsi_device *scsi_dev);
474int pm8001_slave_configure(struct scsi_device *sdev);
475void pm8001_scan_start(struct Scsi_Host *shost);
476int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
477int pm8001_queue_command(struct sas_task *task, const int num,
478 gfp_t gfp_flags);
479int pm8001_abort_task(struct sas_task *task);
480int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
481int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
482int pm8001_clear_task_set(struct domain_device *dev, u8 *lun);
483int pm8001_dev_found(struct domain_device *dev);
484void pm8001_dev_gone(struct domain_device *dev);
485int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
486int pm8001_I_T_nexus_reset(struct domain_device *dev);
487int pm8001_query_task(struct sas_task *task);
488int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
489 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
490 u32 mem_size, u32 align);
491
492
493/* ctl shared API */
494extern struct device_attribute *pm8001_host_attrs[];
495
496#endif
497
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 0a97bc9074bb..53aefffbaead 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters 2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3 * 3 *
4 * Written By: PMC Sierra Corporation 4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
5 * 6 *
6 * Copyright (C) 2008, 2009 PMC Sierra Inc 7 * Copyright (C) 2008, 2009 PMC Sierra Inc
7 * 8 *
@@ -40,6 +41,7 @@
40#include <linux/hdreg.h> 41#include <linux/hdreg.h>
41#include <linux/version.h> 42#include <linux/version.h>
42#include <linux/io.h> 43#include <linux/io.h>
44#include <linux/slab.h>
43#include <asm/irq.h> 45#include <asm/irq.h>
44#include <asm/processor.h> 46#include <asm/processor.h>
45#include <linux/libata.h> 47#include <linux/libata.h>
@@ -79,7 +81,7 @@ DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
79/* 81/*
80 * Module parameters 82 * Module parameters
81 */ 83 */
82MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com"); 84MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
83MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver"); 85MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
84MODULE_LICENSE("GPL"); 86MODULE_LICENSE("GPL");
85MODULE_VERSION(PMCRAID_DRIVER_VERSION); 87MODULE_VERSION(PMCRAID_DRIVER_VERSION);
@@ -162,10 +164,10 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
162 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 164 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
163 list_for_each_entry(temp, &pinstance->used_res_q, queue) { 165 list_for_each_entry(temp, &pinstance->used_res_q, queue) {
164 166
165 /* do not expose VSETs with order-ids >= 240 */ 167 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
166 if (RES_IS_VSET(temp->cfg_entry)) { 168 if (RES_IS_VSET(temp->cfg_entry)) {
167 target = temp->cfg_entry.unique_flags1; 169 target = temp->cfg_entry.unique_flags1;
168 if (target >= PMCRAID_MAX_VSET_TARGETS) 170 if (target > PMCRAID_MAX_VSET_TARGETS)
169 continue; 171 continue;
170 bus = PMCRAID_VSET_BUS_ID; 172 bus = PMCRAID_VSET_BUS_ID;
171 lun = 0; 173 lun = 0;
@@ -234,7 +236,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
234 scsi_dev->allow_restart = 1; 236 scsi_dev->allow_restart = 1;
235 blk_queue_rq_timeout(scsi_dev->request_queue, 237 blk_queue_rq_timeout(scsi_dev->request_queue,
236 PMCRAID_VSET_IO_TIMEOUT); 238 PMCRAID_VSET_IO_TIMEOUT);
237 blk_queue_max_sectors(scsi_dev->request_queue, 239 blk_queue_max_hw_sectors(scsi_dev->request_queue,
238 PMCRAID_VSET_MAX_SECTORS); 240 PMCRAID_VSET_MAX_SECTORS);
239 } 241 }
240 242
@@ -278,12 +280,17 @@ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
278 * pmcraid_change_queue_depth - Change the device's queue depth 280 * pmcraid_change_queue_depth - Change the device's queue depth
279 * @scsi_dev: scsi device struct 281 * @scsi_dev: scsi device struct
280 * @depth: depth to set 282 * @depth: depth to set
283 * @reason: calling context
281 * 284 *
282 * Return value 285 * Return value
283 * actual depth set 286 * actual depth set
284 */ 287 */
285static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth) 288static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth,
289 int reason)
286{ 290{
291 if (reason != SCSI_QDEPTH_DEFAULT)
292 return -EOPNOTSUPP;
293
287 if (depth > PMCRAID_MAX_CMD_PER_LUN) 294 if (depth > PMCRAID_MAX_CMD_PER_LUN)
288 depth = PMCRAID_MAX_CMD_PER_LUN; 295 depth = PMCRAID_MAX_CMD_PER_LUN;
289 296
@@ -1205,7 +1212,7 @@ static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
1205 int retval = 0; 1212 int retval = 0;
1206 1213
1207 if (cfgte->resource_type == RES_TYPE_VSET) 1214 if (cfgte->resource_type == RES_TYPE_VSET)
1208 retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE); 1215 retval = ((cfgte->unique_flags1 & 0x80) == 0);
1209 else if (cfgte->resource_type == RES_TYPE_GSCSI) 1216 else if (cfgte->resource_type == RES_TYPE_GSCSI)
1210 retval = (RES_BUS(cfgte->resource_address) != 1217 retval = (RES_BUS(cfgte->resource_address) !=
1211 PMCRAID_VIRTUAL_ENCL_BUS_ID); 1218 PMCRAID_VIRTUAL_ENCL_BUS_ID);
@@ -1356,6 +1363,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1356 * Return value: 1363 * Return value:
1357 * none 1364 * none
1358 */ 1365 */
1366
1359static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) 1367static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1360{ 1368{
1361 struct pmcraid_config_table_entry *cfg_entry; 1369 struct pmcraid_config_table_entry *cfg_entry;
@@ -1363,9 +1371,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1363 struct pmcraid_cmd *cmd; 1371 struct pmcraid_cmd *cmd;
1364 struct pmcraid_cmd *cfgcmd; 1372 struct pmcraid_cmd *cfgcmd;
1365 struct pmcraid_resource_entry *res = NULL; 1373 struct pmcraid_resource_entry *res = NULL;
1366 u32 new_entry = 1;
1367 unsigned long lock_flags; 1374 unsigned long lock_flags;
1368 unsigned long host_lock_flags; 1375 unsigned long host_lock_flags;
1376 u32 new_entry = 1;
1377 u32 hidden_entry = 0;
1369 int rc; 1378 int rc;
1370 1379
1371 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam; 1380 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
@@ -1401,9 +1410,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1401 } 1410 }
1402 1411
1403 /* If this resource is not going to be added to mid-layer, just notify 1412 /* If this resource is not going to be added to mid-layer, just notify
1404 * applications and return 1413 * applications and return. If this notification is about hiding a VSET
1414 * resource, check if it was exposed already.
1405 */ 1415 */
1406 if (!pmcraid_expose_resource(cfg_entry)) 1416 if (pinstance->ccn.hcam->notification_type ==
1417 NOTIFICATION_TYPE_ENTRY_CHANGED &&
1418 cfg_entry->resource_type == RES_TYPE_VSET &&
1419 cfg_entry->unique_flags1 & 0x80) {
1420 hidden_entry = 1;
1421 } else if (!pmcraid_expose_resource(cfg_entry))
1407 goto out_notify_apps; 1422 goto out_notify_apps;
1408 1423
1409 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 1424 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
@@ -1419,6 +1434,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1419 1434
1420 if (new_entry) { 1435 if (new_entry) {
1421 1436
1437 if (hidden_entry) {
1438 spin_unlock_irqrestore(&pinstance->resource_lock,
1439 lock_flags);
1440 goto out_notify_apps;
1441 }
1442
1422 /* If there are more number of resources than what driver can 1443 /* If there are more number of resources than what driver can
1423 * manage, do not notify the applications about the CCN. Just 1444 * manage, do not notify the applications about the CCN. Just
1424 * ignore this notifications and re-register the same HCAM 1445 * ignore this notifications and re-register the same HCAM
@@ -1449,8 +1470,9 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1449 sizeof(struct pmcraid_config_table_entry)); 1470 sizeof(struct pmcraid_config_table_entry));
1450 1471
1451 if (pinstance->ccn.hcam->notification_type == 1472 if (pinstance->ccn.hcam->notification_type ==
1452 NOTIFICATION_TYPE_ENTRY_DELETED) { 1473 NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
1453 if (res->scsi_dev) { 1474 if (res->scsi_dev) {
1475 res->cfg_entry.unique_flags1 &= 0x7F;
1454 res->change_detected = RES_CHANGE_DEL; 1476 res->change_detected = RES_CHANGE_DEL;
1455 res->cfg_entry.resource_handle = 1477 res->cfg_entry.resource_handle =
1456 PMCRAID_INVALID_RES_HANDLE; 1478 PMCRAID_INVALID_RES_HANDLE;
@@ -2462,14 +2484,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2462 sense_copied = 1; 2484 sense_copied = 1;
2463 } 2485 }
2464 2486
2465 if (RES_IS_GSCSI(res->cfg_entry)) { 2487 if (RES_IS_GSCSI(res->cfg_entry))
2466 pmcraid_cancel_all(cmd, sense_copied); 2488 pmcraid_cancel_all(cmd, sense_copied);
2467 } else if (sense_copied) { 2489 else if (sense_copied)
2468 pmcraid_erp_done(cmd); 2490 pmcraid_erp_done(cmd);
2469 return 0; 2491 else
2470 } else {
2471 pmcraid_request_sense(cmd); 2492 pmcraid_request_sense(cmd);
2472 }
2473 2493
2474 return 1; 2494 return 1;
2475 2495
@@ -3342,7 +3362,7 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
3342 * @direction : data transfer direction 3362 * @direction : data transfer direction
3343 * 3363 *
3344 * Return value 3364 * Return value
3345 * 0 on sucess, non-zero error code on failure 3365 * 0 on success, non-zero error code on failure
3346 */ 3366 */
3347static int pmcraid_build_passthrough_ioadls( 3367static int pmcraid_build_passthrough_ioadls(
3348 struct pmcraid_cmd *cmd, 3368 struct pmcraid_cmd *cmd,
@@ -3401,7 +3421,7 @@ static int pmcraid_build_passthrough_ioadls(
3401 * @direction: data transfer direction 3421 * @direction: data transfer direction
3402 * 3422 *
3403 * Return value 3423 * Return value
3404 * 0 on sucess, non-zero error code on failure 3424 * 0 on success, non-zero error code on failure
3405 */ 3425 */
3406static void pmcraid_release_passthrough_ioadls( 3426static void pmcraid_release_passthrough_ioadls(
3407 struct pmcraid_cmd *cmd, 3427 struct pmcraid_cmd *cmd,
@@ -3429,7 +3449,7 @@ static void pmcraid_release_passthrough_ioadls(
3429 * @arg: pointer to pmcraid_passthrough_buffer user buffer 3449 * @arg: pointer to pmcraid_passthrough_buffer user buffer
3430 * 3450 *
3431 * Return value 3451 * Return value
3432 * 0 on sucess, non-zero error code on failure 3452 * 0 on success, non-zero error code on failure
3433 */ 3453 */
3434static long pmcraid_ioctl_passthrough( 3454static long pmcraid_ioctl_passthrough(
3435 struct pmcraid_instance *pinstance, 3455 struct pmcraid_instance *pinstance,
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 3441b3f90827..b8ad07c3449e 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1,6 +1,9 @@
1/* 1/*
2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file 2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
3 * 3 *
4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
6 *
4 * Copyright (C) 2008, 2009 PMC Sierra Inc. 7 * Copyright (C) 2008, 2009 PMC Sierra Inc.
5 * 8 *
6 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
@@ -106,7 +109,7 @@
106#define PMCRAID_VSET_LUN_ID 0x0 109#define PMCRAID_VSET_LUN_ID 0x0
107#define PMCRAID_PHYS_BUS_ID 0x0 110#define PMCRAID_PHYS_BUS_ID 0x0
108#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8 111#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
109#define PMCRAID_MAX_VSET_TARGETS 240 112#define PMCRAID_MAX_VSET_TARGETS 0x7F
110#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8 113#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
111 114
112#define PMCRAID_IOA_MAX_SECTORS 32767 115#define PMCRAID_IOA_MAX_SECTORS 32767
@@ -771,11 +774,11 @@ static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
771 {0x01180600, IOASC_LOG_LEVEL_MUST, 774 {0x01180600, IOASC_LOG_LEVEL_MUST,
772 "Recovered Error, soft media error, sector reassignment suggested"}, 775 "Recovered Error, soft media error, sector reassignment suggested"},
773 {0x015D0000, IOASC_LOG_LEVEL_MUST, 776 {0x015D0000, IOASC_LOG_LEVEL_MUST,
774 "Recovered Error, failure prediction thresold exceeded"}, 777 "Recovered Error, failure prediction threshold exceeded"},
775 {0x015D9200, IOASC_LOG_LEVEL_MUST, 778 {0x015D9200, IOASC_LOG_LEVEL_MUST,
776 "Recovered Error, soft Cache Card Battery error thresold"}, 779 "Recovered Error, soft Cache Card Battery error threshold"},
777 {0x015D9200, IOASC_LOG_LEVEL_MUST, 780 {0x015D9200, IOASC_LOG_LEVEL_MUST,
778 "Recovered Error, soft Cache Card Battery error thresold"}, 781 "Recovered Error, soft Cache Card Battery error threshold"},
779 {0x02048000, IOASC_LOG_LEVEL_MUST, 782 {0x02048000, IOASC_LOG_LEVEL_MUST,
780 "Not Ready, IOA Reset Required"}, 783 "Not Ready, IOA Reset Required"},
781 {0x02408500, IOASC_LOG_LEVEL_MUST, 784 {0x02408500, IOASC_LOG_LEVEL_MUST,
@@ -935,7 +938,7 @@ static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
935 938
936/* 939/*
937 * pmcraid_ioctl_header - definition of header structure that preceeds all the 940 * pmcraid_ioctl_header - definition of header structure that preceeds all the
938 * buffers given as ioctl arguements. 941 * buffers given as ioctl arguments.
939 * 942 *
940 * .signature : always ASCII string, "PMCRAID" 943 * .signature : always ASCII string, "PMCRAID"
941 * .reserved : not used 944 * .reserved : not used
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 8aa0bd987e29..7bc2d796e403 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/slab.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/blkdev.h> 15#include <linux/blkdev.h>
15#include <linux/parport.h> 16#include <linux/parport.h>
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index db90caf43f42..92ffbb510498 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/cdrom.h> 21#include <linux/cdrom.h>
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23#include <linux/slab.h>
23 24
24#include <scsi/scsi.h> 25#include <scsi/scsi.h>
25#include <scsi/scsi_cmnd.h> 26#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8371d917a9a2..b8166ecfd0e3 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -17,9 +17,11 @@
17* General Public License for more details. 17* General Public License for more details.
18* 18*
19******************************************************************************/ 19******************************************************************************/
20#define QLA1280_VERSION "3.27" 20#define QLA1280_VERSION "3.27.1"
21/***************************************************************************** 21/*****************************************************************************
22 Revision History: 22 Revision History:
23 Rev 3.27.1, February 8, 2010, Michael Reed
24 - Retain firmware image for error recovery.
23 Rev 3.27, February 10, 2009, Michael Reed 25 Rev 3.27, February 10, 2009, Michael Reed
24 - General code cleanup. 26 - General code cleanup.
25 - Improve error recovery. 27 - Improve error recovery.
@@ -346,7 +348,6 @@
346#include <linux/pci.h> 348#include <linux/pci.h>
347#include <linux/proc_fs.h> 349#include <linux/proc_fs.h>
348#include <linux/stat.h> 350#include <linux/stat.h>
349#include <linux/slab.h>
350#include <linux/pci_ids.h> 351#include <linux/pci_ids.h>
351#include <linux/interrupt.h> 352#include <linux/interrupt.h>
352#include <linux/init.h> 353#include <linux/init.h>
@@ -538,9 +539,9 @@ __setup("qla1280=", qla1280_setup);
538/*****************************************/ 539/*****************************************/
539 540
540struct qla_boards { 541struct qla_boards {
541 unsigned char name[9]; /* Board ID String */ 542 char *name; /* Board ID String */
542 int numPorts; /* Number of SCSI ports */ 543 int numPorts; /* Number of SCSI ports */
543 char *fwname; /* firmware name */ 544 int fw_index; /* index into qla1280_fw_tbl for firmware */
544}; 545};
545 546
546/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */ 547/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
@@ -561,15 +562,30 @@ static struct pci_device_id qla1280_pci_tbl[] = {
561}; 562};
562MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl); 563MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
563 564
565DEFINE_MUTEX(qla1280_firmware_mutex);
566
567struct qla_fw {
568 char *fwname;
569 const struct firmware *fw;
570};
571
572#define QL_NUM_FW_IMAGES 3
573
574struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
575 {"qlogic/1040.bin", NULL}, /* image 0 */
576 {"qlogic/1280.bin", NULL}, /* image 1 */
577 {"qlogic/12160.bin", NULL}, /* image 2 */
578};
579
580/* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
564static struct qla_boards ql1280_board_tbl[] = { 581static struct qla_boards ql1280_board_tbl[] = {
565 /* Name , Number of ports, FW details */ 582 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
566 {"QLA12160", 2, "qlogic/12160.bin"}, 583 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
567 {"QLA1040", 1, "qlogic/1040.bin"}, 584 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
568 {"QLA1080", 1, "qlogic/1280.bin"}, 585 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
569 {"QLA1240", 2, "qlogic/1280.bin"}, 586 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
570 {"QLA1280", 2, "qlogic/1280.bin"}, 587 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
571 {"QLA10160", 1, "qlogic/12160.bin"}, 588 {.name = " ", .numPorts = 0, .fw_index = -1},
572 {" ", 0, " "},
573}; 589};
574 590
575static int qla1280_verbose = 1; 591static int qla1280_verbose = 1;
@@ -1512,6 +1528,63 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
1512} 1528}
1513 1529
1514/* 1530/*
1531 * qla1280_request_firmware
1532 * Acquire firmware for chip. Retain in memory
1533 * for error recovery.
1534 *
1535 * Input:
1536 * ha = adapter block pointer.
1537 *
1538 * Returns:
1539 * Pointer to firmware image or an error code
1540 * cast to pointer via ERR_PTR().
1541 */
1542static const struct firmware *
1543qla1280_request_firmware(struct scsi_qla_host *ha)
1544{
1545 const struct firmware *fw;
1546 int err;
1547 int index;
1548 char *fwname;
1549
1550 spin_unlock_irq(ha->host->host_lock);
1551 mutex_lock(&qla1280_firmware_mutex);
1552
1553 index = ql1280_board_tbl[ha->devnum].fw_index;
1554 fw = qla1280_fw_tbl[index].fw;
1555 if (fw)
1556 goto out;
1557
1558 fwname = qla1280_fw_tbl[index].fwname;
1559 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1560
1561 if (err) {
1562 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1563 fwname, err);
1564 fw = ERR_PTR(err);
1565 goto unlock;
1566 }
1567 if ((fw->size % 2) || (fw->size < 6)) {
1568 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1569 fw->size, fwname);
1570 release_firmware(fw);
1571 fw = ERR_PTR(-EINVAL);
1572 goto unlock;
1573 }
1574
1575 qla1280_fw_tbl[index].fw = fw;
1576
1577 out:
1578 ha->fwver1 = fw->data[0];
1579 ha->fwver2 = fw->data[1];
1580 ha->fwver3 = fw->data[2];
1581 unlock:
1582 mutex_unlock(&qla1280_firmware_mutex);
1583 spin_lock_irq(ha->host->host_lock);
1584 return fw;
1585}
1586
1587/*
1515 * Chip diagnostics 1588 * Chip diagnostics
1516 * Test chip for proper operation. 1589 * Test chip for proper operation.
1517 * 1590 *
@@ -1634,28 +1707,18 @@ qla1280_chip_diag(struct scsi_qla_host *ha)
1634static int 1707static int
1635qla1280_load_firmware_pio(struct scsi_qla_host *ha) 1708qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1636{ 1709{
1710 /* enter with host_lock acquired */
1711
1637 const struct firmware *fw; 1712 const struct firmware *fw;
1638 const __le16 *fw_data; 1713 const __le16 *fw_data;
1639 uint16_t risc_address, risc_code_size; 1714 uint16_t risc_address, risc_code_size;
1640 uint16_t mb[MAILBOX_REGISTER_COUNT], i; 1715 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1641 int err; 1716 int err = 0;
1717
1718 fw = qla1280_request_firmware(ha);
1719 if (IS_ERR(fw))
1720 return PTR_ERR(fw);
1642 1721
1643 err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
1644 &ha->pdev->dev);
1645 if (err) {
1646 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1647 ql1280_board_tbl[ha->devnum].fwname, err);
1648 return err;
1649 }
1650 if ((fw->size % 2) || (fw->size < 6)) {
1651 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
1652 fw->size, ql1280_board_tbl[ha->devnum].fwname);
1653 err = -EINVAL;
1654 goto out;
1655 }
1656 ha->fwver1 = fw->data[0];
1657 ha->fwver2 = fw->data[1];
1658 ha->fwver3 = fw->data[2];
1659 fw_data = (const __le16 *)&fw->data[0]; 1722 fw_data = (const __le16 *)&fw->data[0];
1660 ha->fwstart = __le16_to_cpu(fw_data[2]); 1723 ha->fwstart = __le16_to_cpu(fw_data[2]);
1661 1724
@@ -1673,11 +1736,10 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1673 if (err) { 1736 if (err) {
1674 printk(KERN_ERR "scsi(%li): Failed to load firmware\n", 1737 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1675 ha->host_no); 1738 ha->host_no);
1676 goto out; 1739 break;
1677 } 1740 }
1678 } 1741 }
1679out: 1742
1680 release_firmware(fw);
1681 return err; 1743 return err;
1682} 1744}
1683 1745
@@ -1685,6 +1747,7 @@ out:
1685static int 1747static int
1686qla1280_load_firmware_dma(struct scsi_qla_host *ha) 1748qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1687{ 1749{
1750 /* enter with host_lock acquired */
1688 const struct firmware *fw; 1751 const struct firmware *fw;
1689 const __le16 *fw_data; 1752 const __le16 *fw_data;
1690 uint16_t risc_address, risc_code_size; 1753 uint16_t risc_address, risc_code_size;
@@ -1699,22 +1762,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1699 return -ENOMEM; 1762 return -ENOMEM;
1700#endif 1763#endif
1701 1764
1702 err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname, 1765 fw = qla1280_request_firmware(ha);
1703 &ha->pdev->dev); 1766 if (IS_ERR(fw))
1704 if (err) { 1767 return PTR_ERR(fw);
1705 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 1768
1706 ql1280_board_tbl[ha->devnum].fwname, err);
1707 return err;
1708 }
1709 if ((fw->size % 2) || (fw->size < 6)) {
1710 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
1711 fw->size, ql1280_board_tbl[ha->devnum].fwname);
1712 err = -EINVAL;
1713 goto out;
1714 }
1715 ha->fwver1 = fw->data[0];
1716 ha->fwver2 = fw->data[1];
1717 ha->fwver3 = fw->data[2];
1718 fw_data = (const __le16 *)&fw->data[0]; 1769 fw_data = (const __le16 *)&fw->data[0];
1719 ha->fwstart = __le16_to_cpu(fw_data[2]); 1770 ha->fwstart = __le16_to_cpu(fw_data[2]);
1720 1771
@@ -1799,7 +1850,6 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1799#if DUMP_IT_BACK 1850#if DUMP_IT_BACK
1800 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf); 1851 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1801#endif 1852#endif
1802 release_firmware(fw);
1803 return err; 1853 return err;
1804} 1854}
1805 1855
@@ -1838,6 +1888,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
1838static int 1888static int
1839qla1280_load_firmware(struct scsi_qla_host *ha) 1889qla1280_load_firmware(struct scsi_qla_host *ha)
1840{ 1890{
1891 /* enter with host_lock taken */
1841 int err; 1892 int err;
1842 1893
1843 err = qla1280_chip_diag(ha); 1894 err = qla1280_chip_diag(ha);
@@ -4416,7 +4467,16 @@ qla1280_init(void)
4416static void __exit 4467static void __exit
4417qla1280_exit(void) 4468qla1280_exit(void)
4418{ 4469{
4470 int i;
4471
4419 pci_unregister_driver(&qla1280_pci_driver); 4472 pci_unregister_driver(&qla1280_pci_driver);
4473 /* release any allocated firmware images */
4474 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4475 if (qla1280_fw_tbl[i].fw) {
4476 release_firmware(qla1280_fw_tbl[i].fw);
4477 qla1280_fw_tbl[i].fw = NULL;
4478 }
4479 }
4420} 4480}
4421 4481
4422module_init(qla1280_init); 4482module_init(qla1280_init);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fbcb82a2f7f4..1c7ef55966fb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -8,10 +8,13 @@
8 8
9#include <linux/kthread.h> 9#include <linux/kthread.h>
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <linux/slab.h>
11#include <linux/delay.h> 12#include <linux/delay.h>
12 13
13static int qla24xx_vport_disable(struct fc_vport *, bool); 14static int qla24xx_vport_disable(struct fc_vport *, bool);
14 15static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
16int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
17static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
15/* SYSFS attributes --------------------------------------------------------- */ 18/* SYSFS attributes --------------------------------------------------------- */
16 19
17static ssize_t 20static ssize_t
@@ -232,6 +235,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
232 if (off) 235 if (off)
233 return 0; 236 return 0;
234 237
238 if (unlikely(pci_channel_offline(ha->pdev)))
239 return 0;
240
235 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 241 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
236 return -EINVAL; 242 return -EINVAL;
237 if (start > ha->optrom_size) 243 if (start > ha->optrom_size)
@@ -379,6 +385,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
379 struct device, kobj))); 385 struct device, kobj)));
380 struct qla_hw_data *ha = vha->hw; 386 struct qla_hw_data *ha = vha->hw;
381 387
388 if (unlikely(pci_channel_offline(ha->pdev)))
389 return 0;
390
382 if (!capable(CAP_SYS_ADMIN)) 391 if (!capable(CAP_SYS_ADMIN))
383 return 0; 392 return 0;
384 393
@@ -398,6 +407,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
398 struct qla_hw_data *ha = vha->hw; 407 struct qla_hw_data *ha = vha->hw;
399 uint8_t *tmp_data; 408 uint8_t *tmp_data;
400 409
410 if (unlikely(pci_channel_offline(ha->pdev)))
411 return 0;
412
401 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 413 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
402 !ha->isp_ops->write_nvram) 414 !ha->isp_ops->write_nvram)
403 return 0; 415 return 0;
@@ -1159,6 +1171,28 @@ qla2x00_total_isp_aborts_show(struct device *dev,
1159} 1171}
1160 1172
1161static ssize_t 1173static ssize_t
1174qla24xx_84xx_fw_version_show(struct device *dev,
1175 struct device_attribute *attr, char *buf)
1176{
1177 int rval = QLA_SUCCESS;
1178 uint16_t status[2] = {0, 0};
1179 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1180 struct qla_hw_data *ha = vha->hw;
1181
1182 if (IS_QLA84XX(ha) && ha->cs84xx) {
1183 if (ha->cs84xx->op_fw_version == 0) {
1184 rval = qla84xx_verify_chip(vha, status);
1185 }
1186
1187 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1188 return snprintf(buf, PAGE_SIZE, "%u\n",
1189 (uint32_t)ha->cs84xx->op_fw_version);
1190 }
1191
1192 return snprintf(buf, PAGE_SIZE, "\n");
1193}
1194
1195static ssize_t
1162qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1196qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1163 char *buf) 1197 char *buf)
1164{ 1198{
@@ -1238,10 +1272,15 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1238 char *buf) 1272 char *buf)
1239{ 1273{
1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1274 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1241 int rval; 1275 int rval = QLA_FUNCTION_FAILED;
1242 uint16_t state[5]; 1276 uint16_t state[5];
1243 1277
1244 rval = qla2x00_get_firmware_state(vha, state); 1278 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1279 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1280 DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n",
1281 __func__, vha->host_no));
1282 else if (!vha->hw->flags.eeh_busy)
1283 rval = qla2x00_get_firmware_state(vha, state);
1245 if (rval != QLA_SUCCESS) 1284 if (rval != QLA_SUCCESS)
1246 memset(state, -1, sizeof(state)); 1285 memset(state, -1, sizeof(state));
1247 1286
@@ -1271,6 +1310,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1271 qla2x00_optrom_fcode_version_show, NULL); 1310 qla2x00_optrom_fcode_version_show, NULL);
1272static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 1311static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1273 NULL); 1312 NULL);
1313static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1314 NULL);
1274static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 1315static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1275 NULL); 1316 NULL);
1276static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 1317static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
@@ -1300,6 +1341,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
1300 &dev_attr_optrom_efi_version, 1341 &dev_attr_optrom_efi_version,
1301 &dev_attr_optrom_fcode_version, 1342 &dev_attr_optrom_fcode_version,
1302 &dev_attr_optrom_fw_version, 1343 &dev_attr_optrom_fw_version,
1344 &dev_attr_84xx_fw_version,
1303 &dev_attr_total_isp_aborts, 1345 &dev_attr_total_isp_aborts,
1304 &dev_attr_mpi_version, 1346 &dev_attr_mpi_version,
1305 &dev_attr_phy_version, 1347 &dev_attr_phy_version,
@@ -1452,10 +1494,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1452 if (!fcport) 1494 if (!fcport)
1453 return; 1495 return;
1454 1496
1455 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) 1497 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1498 return;
1499
1500 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1456 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1501 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1457 else 1502 return;
1458 qla2x00_abort_fcport_cmds(fcport); 1503 }
1459 1504
1460 /* 1505 /*
1461 * Transport has effectively 'deleted' the rport, clear 1506 * Transport has effectively 'deleted' the rport, clear
@@ -1475,6 +1520,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1475 if (!fcport) 1520 if (!fcport)
1476 return; 1521 return;
1477 1522
1523 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1524 return;
1525
1478 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1526 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1479 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1527 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1480 return; 1528 return;
@@ -1488,8 +1536,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1488 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1536 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1489 fcport->loop_id, fcport->d_id.b.domain, 1537 fcport->loop_id, fcport->d_id.b.domain,
1490 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1538 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1491
1492 qla2x00_abort_fcport_cmds(fcport);
1493} 1539}
1494 1540
1495static int 1541static int
@@ -1515,6 +1561,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1515 pfc_host_stat = &ha->fc_host_stat; 1561 pfc_host_stat = &ha->fc_host_stat;
1516 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1562 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1517 1563
1564 if (test_bit(UNLOADING, &vha->dpc_flags))
1565 goto done;
1566
1567 if (unlikely(pci_channel_offline(ha->pdev)))
1568 goto done;
1569
1518 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1570 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1519 if (stats == NULL) { 1571 if (stats == NULL) {
1520 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1572 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
@@ -1654,7 +1706,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1654 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1706 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1655 } 1707 }
1656 1708
1657 if (scsi_add_host(vha->host, &fc_vport->dev)) { 1709 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1710 &ha->pdev->dev)) {
1658 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1711 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1659 vha->host_no, vha->vp_idx)); 1712 vha->host_no, vha->vp_idx));
1660 goto vport_create_failed_2; 1713 goto vport_create_failed_2;
@@ -1772,6 +1825,582 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1772 return 0; 1825 return 0;
1773} 1826}
1774 1827
1828/* BSG support for ELS/CT pass through */
1829inline srb_t *
1830qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1831{
1832 srb_t *sp;
1833 struct qla_hw_data *ha = vha->hw;
1834 struct srb_bsg_ctx *ctx;
1835
1836 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1837 if (!sp)
1838 goto done;
1839 ctx = kzalloc(size, GFP_KERNEL);
1840 if (!ctx) {
1841 mempool_free(sp, ha->srb_mempool);
1842 goto done;
1843 }
1844
1845 memset(sp, 0, sizeof(*sp));
1846 sp->fcport = fcport;
1847 sp->ctx = ctx;
1848done:
1849 return sp;
1850}
1851
1852static int
1853qla2x00_process_els(struct fc_bsg_job *bsg_job)
1854{
1855 struct fc_rport *rport;
1856 fc_port_t *fcport;
1857 struct Scsi_Host *host;
1858 scsi_qla_host_t *vha;
1859 struct qla_hw_data *ha;
1860 srb_t *sp;
1861 const char *type;
1862 int req_sg_cnt, rsp_sg_cnt;
1863 int rval = (DRIVER_ERROR << 16);
1864 uint16_t nextlid = 0;
1865 struct srb_bsg *els;
1866
1867 /* Multiple SG's are not supported for ELS requests */
1868 if (bsg_job->request_payload.sg_cnt > 1 ||
1869 bsg_job->reply_payload.sg_cnt > 1) {
1870 DEBUG2(printk(KERN_INFO
1871 "multiple SG's are not supported for ELS requests"
1872 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1873 bsg_job->request_payload.sg_cnt,
1874 bsg_job->reply_payload.sg_cnt));
1875 rval = -EPERM;
1876 goto done;
1877 }
1878
1879 /* ELS request for rport */
1880 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1881 rport = bsg_job->rport;
1882 fcport = *(fc_port_t **) rport->dd_data;
1883 host = rport_to_shost(rport);
1884 vha = shost_priv(host);
1885 ha = vha->hw;
1886 type = "FC_BSG_RPT_ELS";
1887
1888 /* make sure the rport is logged in,
1889 * if not perform fabric login
1890 */
1891 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1892 DEBUG2(qla_printk(KERN_WARNING, ha,
1893 "failed to login port %06X for ELS passthru\n",
1894 fcport->d_id.b24));
1895 rval = -EIO;
1896 goto done;
1897 }
1898 } else {
1899 host = bsg_job->shost;
1900 vha = shost_priv(host);
1901 ha = vha->hw;
1902 type = "FC_BSG_HST_ELS_NOLOGIN";
1903
1904 /* Allocate a dummy fcport structure, since functions
1905 * preparing the IOCB and mailbox command retrieves port
1906 * specific information from fcport structure. For Host based
1907 * ELS commands there will be no fcport structure allocated
1908 */
1909 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1910 if (!fcport) {
1911 rval = -ENOMEM;
1912 goto done;
1913 }
1914
1915 /* Initialize all required fields of fcport */
1916 fcport->vha = vha;
1917 fcport->vp_idx = vha->vp_idx;
1918 fcport->d_id.b.al_pa =
1919 bsg_job->request->rqst_data.h_els.port_id[0];
1920 fcport->d_id.b.area =
1921 bsg_job->request->rqst_data.h_els.port_id[1];
1922 fcport->d_id.b.domain =
1923 bsg_job->request->rqst_data.h_els.port_id[2];
1924 fcport->loop_id =
1925 (fcport->d_id.b.al_pa == 0xFD) ?
1926 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1927 }
1928
1929 if (!vha->flags.online) {
1930 DEBUG2(qla_printk(KERN_WARNING, ha,
1931 "host not online\n"));
1932 rval = -EIO;
1933 goto done;
1934 }
1935
1936 req_sg_cnt =
1937 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1938 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1939 if (!req_sg_cnt) {
1940 rval = -ENOMEM;
1941 goto done_free_fcport;
1942 }
1943 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1944 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1945 if (!rsp_sg_cnt) {
1946 rval = -ENOMEM;
1947 goto done_free_fcport;
1948 }
1949
1950 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1951 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1952 {
1953 DEBUG2(printk(KERN_INFO
1954 "dma mapping resulted in different sg counts \
1955 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1956 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1957 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1958 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1959 rval = -EAGAIN;
1960 goto done_unmap_sg;
1961 }
1962
1963 /* Alloc SRB structure */
1964 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1965 if (!sp) {
1966 rval = -ENOMEM;
1967 goto done_unmap_sg;
1968 }
1969
1970 els = sp->ctx;
1971 els->ctx.type =
1972 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1973 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1974 els->bsg_job = bsg_job;
1975
1976 DEBUG2(qla_printk(KERN_INFO, ha,
1977 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1978 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1979 bsg_job->request->rqst_data.h_els.command_code,
1980 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1981 fcport->d_id.b.al_pa));
1982
1983 rval = qla2x00_start_sp(sp);
1984 if (rval != QLA_SUCCESS) {
1985 kfree(sp->ctx);
1986 mempool_free(sp, ha->srb_mempool);
1987 rval = -EIO;
1988 goto done_unmap_sg;
1989 }
1990 return rval;
1991
1992done_unmap_sg:
1993 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1994 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1995 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1996 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1997 goto done_free_fcport;
1998
1999done_free_fcport:
2000 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
2001 kfree(fcport);
2002done:
2003 return rval;
2004}
2005
2006static int
2007qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2008{
2009 srb_t *sp;
2010 struct Scsi_Host *host = bsg_job->shost;
2011 scsi_qla_host_t *vha = shost_priv(host);
2012 struct qla_hw_data *ha = vha->hw;
2013 int rval = (DRIVER_ERROR << 16);
2014 int req_sg_cnt, rsp_sg_cnt;
2015 uint16_t loop_id;
2016 struct fc_port *fcport;
2017 char *type = "FC_BSG_HST_CT";
2018 struct srb_bsg *ct;
2019
2020 /* pass through is supported only for ISP 4Gb or higher */
2021 if (!IS_FWI2_CAPABLE(ha)) {
2022 DEBUG2(qla_printk(KERN_INFO, ha,
2023 "scsi(%ld):Firmware is not capable to support FC "
2024 "CT pass thru\n", vha->host_no));
2025 rval = -EPERM;
2026 goto done;
2027 }
2028
2029 req_sg_cnt =
2030 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2031 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2032 if (!req_sg_cnt) {
2033 rval = -ENOMEM;
2034 goto done;
2035 }
2036
2037 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2038 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2039 if (!rsp_sg_cnt) {
2040 rval = -ENOMEM;
2041 goto done;
2042 }
2043
2044 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2045 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2046 {
2047 DEBUG2(qla_printk(KERN_WARNING, ha,
2048 "dma mapping resulted in different sg counts \
2049 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2050 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2051 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2052 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2053 rval = -EAGAIN;
2054 goto done_unmap_sg;
2055 }
2056
2057 if (!vha->flags.online) {
2058 DEBUG2(qla_printk(KERN_WARNING, ha,
2059 "host not online\n"));
2060 rval = -EIO;
2061 goto done_unmap_sg;
2062 }
2063
2064 loop_id =
2065 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2066 >> 24;
2067 switch (loop_id) {
2068 case 0xFC:
2069 loop_id = cpu_to_le16(NPH_SNS);
2070 break;
2071 case 0xFA:
2072 loop_id = vha->mgmt_svr_loop_id;
2073 break;
2074 default:
2075 DEBUG2(qla_printk(KERN_INFO, ha,
2076 "Unknown loop id: %x\n", loop_id));
2077 rval = -EINVAL;
2078 goto done_unmap_sg;
2079 }
2080
2081 /* Allocate a dummy fcport structure, since functions preparing the
2082 * IOCB and mailbox command retrieves port specific information
2083 * from fcport structure. For Host based ELS commands there will be
2084 * no fcport structure allocated
2085 */
2086 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2087 if (!fcport)
2088 {
2089 rval = -ENOMEM;
2090 goto done_unmap_sg;
2091 }
2092
2093 /* Initialize all required fields of fcport */
2094 fcport->vha = vha;
2095 fcport->vp_idx = vha->vp_idx;
2096 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2097 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2098 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2099 fcport->loop_id = loop_id;
2100
2101 /* Alloc SRB structure */
2102 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2103 if (!sp) {
2104 rval = -ENOMEM;
2105 goto done_free_fcport;
2106 }
2107
2108 ct = sp->ctx;
2109 ct->ctx.type = SRB_CT_CMD;
2110 ct->bsg_job = bsg_job;
2111
2112 DEBUG2(qla_printk(KERN_INFO, ha,
2113 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2114 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2115 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2116 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2117 fcport->d_id.b.al_pa));
2118
2119 rval = qla2x00_start_sp(sp);
2120 if (rval != QLA_SUCCESS) {
2121 kfree(sp->ctx);
2122 mempool_free(sp, ha->srb_mempool);
2123 rval = -EIO;
2124 goto done_free_fcport;
2125 }
2126 return rval;
2127
2128done_free_fcport:
2129 kfree(fcport);
2130done_unmap_sg:
2131 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2132 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2133 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2134 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2135done:
2136 return rval;
2137}
2138
2139static int
2140qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2141{
2142 struct Scsi_Host *host = bsg_job->shost;
2143 scsi_qla_host_t *vha = shost_priv(host);
2144 struct qla_hw_data *ha = vha->hw;
2145 int rval;
2146 uint8_t command_sent;
2147 uint32_t vendor_cmd;
2148 char *type;
2149 struct msg_echo_lb elreq;
2150 uint16_t response[MAILBOX_REGISTER_COUNT];
2151 uint8_t* fw_sts_ptr;
2152 uint8_t *req_data;
2153 dma_addr_t req_data_dma;
2154 uint32_t req_data_len;
2155 uint8_t *rsp_data;
2156 dma_addr_t rsp_data_dma;
2157 uint32_t rsp_data_len;
2158
2159 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2160 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2161 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2162 rval = -EBUSY;
2163 goto done;
2164 }
2165
2166 if (!vha->flags.online) {
2167 DEBUG2(qla_printk(KERN_WARNING, ha,
2168 "host not online\n"));
2169 rval = -EIO;
2170 goto done;
2171 }
2172
2173 elreq.req_sg_cnt =
2174 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2175 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2176 if (!elreq.req_sg_cnt) {
2177 rval = -ENOMEM;
2178 goto done;
2179 }
2180 elreq.rsp_sg_cnt =
2181 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2182 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2183 if (!elreq.rsp_sg_cnt) {
2184 rval = -ENOMEM;
2185 goto done;
2186 }
2187
2188 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2189 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2190 {
2191 DEBUG2(printk(KERN_INFO
2192 "dma mapping resulted in different sg counts \
2193 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2194 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2195 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2196 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2197 rval = -EAGAIN;
2198 goto done_unmap_sg;
2199 }
2200 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2201 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2202 &req_data_dma, GFP_KERNEL);
2203
2204 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2205 &rsp_data_dma, GFP_KERNEL);
2206
2207 /* Copy the request buffer in req_data now */
2208 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2209 bsg_job->request_payload.sg_cnt, req_data,
2210 req_data_len);
2211
2212 elreq.send_dma = req_data_dma;
2213 elreq.rcv_dma = rsp_data_dma;
2214 elreq.transfer_size = req_data_len;
2215
2216 /* Vendor cmd : loopback or ECHO diagnostic
2217 * Options:
2218 * Loopback : Either internal or external loopback
2219 * ECHO: ECHO ELS or Vendor specific FC4 link data
2220 */
2221 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2222 elreq.options =
2223 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2224 + 1);
2225
2226 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2227 case QL_VND_LOOPBACK:
2228 if (ha->current_topology != ISP_CFG_F) {
2229 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2230
2231 DEBUG2(qla_printk(KERN_INFO, ha,
2232 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2233 vha->host_no, type, vendor_cmd, elreq.options));
2234
2235 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2236 rval = qla2x00_loopback_test(vha, &elreq, response);
2237 if (IS_QLA81XX(ha)) {
2238 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2239 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2240 "ISP\n", __func__, vha->host_no));
2241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2242 qla2xxx_wake_dpc(vha);
2243 }
2244 }
2245 } else {
2246 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2247 DEBUG2(qla_printk(KERN_INFO, ha,
2248 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2249 vha->host_no, type, vendor_cmd, elreq.options));
2250
2251 command_sent = INT_DEF_LB_ECHO_CMD;
2252 rval = qla2x00_echo_test(vha, &elreq, response);
2253 }
2254 break;
2255 case QLA84_RESET:
2256 if (!IS_QLA84XX(vha->hw)) {
2257 rval = -EINVAL;
2258 DEBUG16(printk(
2259 "%s(%ld): 8xxx exiting.\n",
2260 __func__, vha->host_no));
2261 return rval;
2262 }
2263 rval = qla84xx_reset(vha, &elreq, bsg_job);
2264 break;
2265 case QLA84_MGMT_CMD:
2266 if (!IS_QLA84XX(vha->hw)) {
2267 rval = -EINVAL;
2268 DEBUG16(printk(
2269 "%s(%ld): 8xxx exiting.\n",
2270 __func__, vha->host_no));
2271 return rval;
2272 }
2273 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2274 break;
2275 default:
2276 rval = -ENOSYS;
2277 }
2278
2279 if (rval != QLA_SUCCESS) {
2280 DEBUG2(qla_printk(KERN_WARNING, ha,
2281 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2282 rval = 0;
2283 bsg_job->reply->result = (DID_ERROR << 16);
2284 bsg_job->reply->reply_payload_rcv_len = 0;
2285 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2286 memcpy( fw_sts_ptr, response, sizeof(response));
2287 fw_sts_ptr += sizeof(response);
2288 *fw_sts_ptr = command_sent;
2289 } else {
2290 DEBUG2(qla_printk(KERN_WARNING, ha,
2291 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2292 rval = bsg_job->reply->result = 0;
2293 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2294 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2295 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2296 memcpy(fw_sts_ptr, response, sizeof(response));
2297 fw_sts_ptr += sizeof(response);
2298 *fw_sts_ptr = command_sent;
2299 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2300 bsg_job->reply_payload.sg_cnt, rsp_data,
2301 rsp_data_len);
2302 }
2303 bsg_job->job_done(bsg_job);
2304
2305done_unmap_sg:
2306
2307 if(req_data)
2308 dma_free_coherent(&ha->pdev->dev, req_data_len,
2309 req_data, req_data_dma);
2310 dma_unmap_sg(&ha->pdev->dev,
2311 bsg_job->request_payload.sg_list,
2312 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2313 dma_unmap_sg(&ha->pdev->dev,
2314 bsg_job->reply_payload.sg_list,
2315 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2316
2317done:
2318 return rval;
2319}
2320
2321static int
2322qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2323{
2324 int ret = -EINVAL;
2325
2326 switch (bsg_job->request->msgcode) {
2327 case FC_BSG_RPT_ELS:
2328 case FC_BSG_HST_ELS_NOLOGIN:
2329 ret = qla2x00_process_els(bsg_job);
2330 break;
2331 case FC_BSG_HST_CT:
2332 ret = qla2x00_process_ct(bsg_job);
2333 break;
2334 case FC_BSG_HST_VENDOR:
2335 ret = qla2x00_process_vendor_specific(bsg_job);
2336 break;
2337 case FC_BSG_HST_ADD_RPORT:
2338 case FC_BSG_HST_DEL_RPORT:
2339 case FC_BSG_RPT_CT:
2340 default:
2341 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2342 break;
2343 }
2344 return ret;
2345}
2346
2347static int
2348qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2349{
2350 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2351 struct qla_hw_data *ha = vha->hw;
2352 srb_t *sp;
2353 int cnt, que;
2354 unsigned long flags;
2355 struct req_que *req;
2356 struct srb_bsg *sp_bsg;
2357
2358 /* find the bsg job from the active list of commands */
2359 spin_lock_irqsave(&ha->hardware_lock, flags);
2360 for (que = 0; que < ha->max_req_queues; que++) {
2361 req = ha->req_q_map[que];
2362 if (!req)
2363 continue;
2364
2365 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2366 sp = req->outstanding_cmds[cnt];
2367
2368 if (sp) {
2369 sp_bsg = (struct srb_bsg*)sp->ctx;
2370
2371 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2372 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2373 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2374 (sp_bsg->bsg_job == bsg_job)) {
2375 if (ha->isp_ops->abort_command(sp)) {
2376 DEBUG2(qla_printk(KERN_INFO, ha,
2377 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2378 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2379 } else {
2380 DEBUG2(qla_printk(KERN_INFO, ha,
2381 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2382 bsg_job->req->errors = bsg_job->reply->result = 0;
2383 }
2384 goto done;
2385 }
2386 }
2387 }
2388 }
2389 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2390 DEBUG2(qla_printk(KERN_INFO, ha,
2391 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2392 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2393 return 0;
2394
2395done:
2396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2397 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2398 kfree(sp->fcport);
2399 kfree(sp->ctx);
2400 mempool_free(sp, ha->srb_mempool);
2401 return 0;
2402}
2403
1775struct fc_function_template qla2xxx_transport_functions = { 2404struct fc_function_template qla2xxx_transport_functions = {
1776 2405
1777 .show_host_node_name = 1, 2406 .show_host_node_name = 1,
@@ -1815,6 +2444,8 @@ struct fc_function_template qla2xxx_transport_functions = {
1815 .vport_create = qla24xx_vport_create, 2444 .vport_create = qla24xx_vport_create,
1816 .vport_disable = qla24xx_vport_disable, 2445 .vport_disable = qla24xx_vport_disable,
1817 .vport_delete = qla24xx_vport_delete, 2446 .vport_delete = qla24xx_vport_delete,
2447 .bsg_request = qla24xx_bsg_request,
2448 .bsg_timeout = qla24xx_bsg_timeout,
1818}; 2449};
1819 2450
1820struct fc_function_template qla2xxx_transport_vport_functions = { 2451struct fc_function_template qla2xxx_transport_vport_functions = {
@@ -1855,6 +2486,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1855 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2486 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1856 .terminate_rport_io = qla2x00_terminate_rport_io, 2487 .terminate_rport_io = qla2x00_terminate_rport_io,
1857 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2488 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2489 .bsg_request = qla24xx_bsg_request,
2490 .bsg_timeout = qla24xx_bsg_timeout,
1858}; 2491};
1859 2492
1860void 2493void
@@ -1883,3 +2516,125 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1883 speed = FC_PORTSPEED_1GBIT; 2516 speed = FC_PORTSPEED_1GBIT;
1884 fc_host_supported_speeds(vha->host) = speed; 2517 fc_host_supported_speeds(vha->host) = speed;
1885} 2518}
2519static int
2520qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2521{
2522 int ret = 0;
2523 int cmd;
2524 uint16_t cmd_status;
2525
2526 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2527
2528 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2529 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2530 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2531 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2532 &cmd_status);
2533 return ret;
2534}
2535
2536static int
2537qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2538{
2539 struct access_chip_84xx *mn;
2540 dma_addr_t mn_dma, mgmt_dma;
2541 void *mgmt_b = NULL;
2542 int ret = 0;
2543 int rsp_hdr_len, len = 0;
2544 struct qla84_msg_mgmt *ql84_mgmt;
2545
2546 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2547 ql84_mgmt->cmd =
2548 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2549 ql84_mgmt->mgmtp.u.mem.start_addr =
2550 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2551 ql84_mgmt->len =
2552 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2553 ql84_mgmt->mgmtp.u.config.id =
2554 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2555 ql84_mgmt->mgmtp.u.config.param0 =
2556 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2557 ql84_mgmt->mgmtp.u.config.param1 =
2558 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2559 ql84_mgmt->mgmtp.u.info.type =
2560 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2561 ql84_mgmt->mgmtp.u.info.context =
2562 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2563
2564 rsp_hdr_len = bsg_job->request_payload.payload_len;
2565
2566 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2567 if (mn == NULL) {
2568 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2569 "failed%lu\n", __func__, ha->host_no));
2570 return -ENOMEM;
2571 }
2572
2573 memset(mn, 0, sizeof (struct access_chip_84xx));
2574
2575 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2576 mn->entry_count = 1;
2577
2578 switch (ql84_mgmt->cmd) {
2579 case QLA84_MGMT_READ_MEM:
2580 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2581 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2582 break;
2583 case QLA84_MGMT_WRITE_MEM:
2584 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2585 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2586 break;
2587 case QLA84_MGMT_CHNG_CONFIG:
2588 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2589 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2590 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2591 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2592 break;
2593 case QLA84_MGMT_GET_INFO:
2594 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2595 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2596 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2597 break;
2598 default:
2599 ret = -EIO;
2600 goto exit_mgmt0;
2601 }
2602
2603 if ((len == ql84_mgmt->len) &&
2604 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2605 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2606 &mgmt_dma, GFP_KERNEL);
2607 if (mgmt_b == NULL) {
2608 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2609 "failed%lu\n", __func__, ha->host_no));
2610 ret = -ENOMEM;
2611 goto exit_mgmt0;
2612 }
2613 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2614 mn->dseg_count = cpu_to_le16(1);
2615 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2616 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2617 mn->dseg_length = cpu_to_le32(len);
2618
2619 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2620 memcpy(mgmt_b, ql84_mgmt->payload, len);
2621 }
2622 }
2623
2624 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2625 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2626 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2627 if (ret != QLA_SUCCESS)
2628 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2629 __func__, ha->host_no));
2630 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2631 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2632 }
2633
2634 if (mgmt_b)
2635 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2636
2637exit_mgmt0:
2638 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2639 return ret;
2640}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cca8e4ab0372..cb2eca4c26d8 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -377,6 +377,24 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
377 return ptr + sizeof(struct qla2xxx_mq_chain); 377 return ptr + sizeof(struct qla2xxx_mq_chain);
378} 378}
379 379
380static void
381qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
382{
383 struct qla_hw_data *ha = vha->hw;
384
385 if (rval != QLA_SUCCESS) {
386 qla_printk(KERN_WARNING, ha,
387 "Failed to dump firmware (%x)!!!\n", rval);
388 ha->fw_dumped = 0;
389 } else {
390 qla_printk(KERN_INFO, ha,
391 "Firmware dump saved to temp buffer (%ld/%p).\n",
392 vha->host_no, ha->fw_dump);
393 ha->fw_dumped = 1;
394 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
395 }
396}
397
380/** 398/**
381 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 399 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
382 * @ha: HA context 400 * @ha: HA context
@@ -530,17 +548,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
530 if (rval == QLA_SUCCESS) 548 if (rval == QLA_SUCCESS)
531 qla2xxx_copy_queues(ha, nxt); 549 qla2xxx_copy_queues(ha, nxt);
532 550
533 if (rval != QLA_SUCCESS) { 551 qla2xxx_dump_post_process(base_vha, rval);
534 qla_printk(KERN_WARNING, ha,
535 "Failed to dump firmware (%x)!!!\n", rval);
536 ha->fw_dumped = 0;
537
538 } else {
539 qla_printk(KERN_INFO, ha,
540 "Firmware dump saved to temp buffer (%ld/%p).\n",
541 base_vha->host_no, ha->fw_dump);
542 ha->fw_dumped = 1;
543 }
544 552
545qla2300_fw_dump_failed: 553qla2300_fw_dump_failed:
546 if (!hardware_locked) 554 if (!hardware_locked)
@@ -737,17 +745,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
737 if (rval == QLA_SUCCESS) 745 if (rval == QLA_SUCCESS)
738 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); 746 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
739 747
740 if (rval != QLA_SUCCESS) { 748 qla2xxx_dump_post_process(base_vha, rval);
741 qla_printk(KERN_WARNING, ha,
742 "Failed to dump firmware (%x)!!!\n", rval);
743 ha->fw_dumped = 0;
744
745 } else {
746 qla_printk(KERN_INFO, ha,
747 "Firmware dump saved to temp buffer (%ld/%p).\n",
748 base_vha->host_no, ha->fw_dump);
749 ha->fw_dumped = 1;
750 }
751 749
752qla2100_fw_dump_failed: 750qla2100_fw_dump_failed:
753 if (!hardware_locked) 751 if (!hardware_locked)
@@ -984,17 +982,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
984 qla24xx_copy_eft(ha, nxt); 982 qla24xx_copy_eft(ha, nxt);
985 983
986qla24xx_fw_dump_failed_0: 984qla24xx_fw_dump_failed_0:
987 if (rval != QLA_SUCCESS) { 985 qla2xxx_dump_post_process(base_vha, rval);
988 qla_printk(KERN_WARNING, ha,
989 "Failed to dump firmware (%x)!!!\n", rval);
990 ha->fw_dumped = 0;
991
992 } else {
993 qla_printk(KERN_INFO, ha,
994 "Firmware dump saved to temp buffer (%ld/%p).\n",
995 base_vha->host_no, ha->fw_dump);
996 ha->fw_dumped = 1;
997 }
998 986
999qla24xx_fw_dump_failed: 987qla24xx_fw_dump_failed:
1000 if (!hardware_locked) 988 if (!hardware_locked)
@@ -1305,17 +1293,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1305 } 1293 }
1306 1294
1307qla25xx_fw_dump_failed_0: 1295qla25xx_fw_dump_failed_0:
1308 if (rval != QLA_SUCCESS) { 1296 qla2xxx_dump_post_process(base_vha, rval);
1309 qla_printk(KERN_WARNING, ha,
1310 "Failed to dump firmware (%x)!!!\n", rval);
1311 ha->fw_dumped = 0;
1312
1313 } else {
1314 qla_printk(KERN_INFO, ha,
1315 "Firmware dump saved to temp buffer (%ld/%p).\n",
1316 base_vha->host_no, ha->fw_dump);
1317 ha->fw_dumped = 1;
1318 }
1319 1297
1320qla25xx_fw_dump_failed: 1298qla25xx_fw_dump_failed:
1321 if (!hardware_locked) 1299 if (!hardware_locked)
@@ -1628,17 +1606,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1628 } 1606 }
1629 1607
1630qla81xx_fw_dump_failed_0: 1608qla81xx_fw_dump_failed_0:
1631 if (rval != QLA_SUCCESS) { 1609 qla2xxx_dump_post_process(base_vha, rval);
1632 qla_printk(KERN_WARNING, ha,
1633 "Failed to dump firmware (%x)!!!\n", rval);
1634 ha->fw_dumped = 0;
1635
1636 } else {
1637 qla_printk(KERN_INFO, ha,
1638 "Firmware dump saved to temp buffer (%ld/%p).\n",
1639 base_vha->host_no, ha->fw_dump);
1640 ha->fw_dumped = 1;
1641 }
1642 1610
1643qla81xx_fw_dump_failed: 1611qla81xx_fw_dump_failed:
1644 if (!hardware_locked) 1612 if (!hardware_locked)
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f660dd70b72e..d6d9c86cb058 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -26,7 +26,7 @@
26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ 26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ 27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ 28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */ 29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30 30
31/* 31/*
32* Macros use for debugging the driver. 32* Macros use for debugging the driver.
@@ -132,6 +132,13 @@
132#else 132#else
133#define DEBUG16(x) do {} while (0) 133#define DEBUG16(x) do {} while (0)
134#endif 134#endif
135
136#if defined(QL_DEBUG_LEVEL_17)
137#define DEBUG17(x) do {x;} while (0)
138#else
139#define DEBUG17(x) do {} while (0)
140#endif
141
135/* 142/*
136 * Firmware Dump structure definition 143 * Firmware Dump structure definition
137 */ 144 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 215061861794..afa95614aaf8 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -31,6 +31,7 @@
31#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
32#include <scsi/scsi_cmnd.h> 32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h>
34 35
35#define QLA2XXX_DRIVER_NAME "qla2xxx" 36#define QLA2XXX_DRIVER_NAME "qla2xxx"
36 37
@@ -228,6 +229,27 @@ struct srb_logio {
228 uint16_t flags; 229 uint16_t flags;
229}; 230};
230 231
232struct srb_bsg_ctx {
233#define SRB_ELS_CMD_RPT 3
234#define SRB_ELS_CMD_HST 4
235#define SRB_CT_CMD 5
236 uint16_t type;
237};
238
239struct srb_bsg {
240 struct srb_bsg_ctx ctx;
241 struct fc_bsg_job *bsg_job;
242};
243
244struct msg_echo_lb {
245 dma_addr_t send_dma;
246 dma_addr_t rcv_dma;
247 uint16_t req_sg_cnt;
248 uint16_t rsp_sg_cnt;
249 uint16_t options;
250 uint32_t transfer_size;
251};
252
231/* 253/*
232 * ISP I/O Register Set structure definitions. 254 * ISP I/O Register Set structure definitions.
233 */ 255 */
@@ -522,6 +544,8 @@ typedef struct {
522#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ 544#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
523#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ 545#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
524 546
547/* ISP mailbox loopback echo diagnostic error code */
548#define MBS_LB_RESET 0x17
525/* 549/*
526 * Firmware options 1, 2, 3. 550 * Firmware options 1, 2, 3.
527 */ 551 */
@@ -1570,9 +1594,6 @@ typedef struct fc_port {
1570 struct fc_rport *rport, *drport; 1594 struct fc_rport *rport, *drport;
1571 u32 supported_classes; 1595 u32 supported_classes;
1572 1596
1573 unsigned long last_queue_full;
1574 unsigned long last_ramp_up;
1575
1576 uint16_t vp_idx; 1597 uint16_t vp_idx;
1577} fc_port_t; 1598} fc_port_t;
1578 1599
@@ -1589,8 +1610,7 @@ typedef struct fc_port {
1589 */ 1610 */
1590#define FCF_FABRIC_DEVICE BIT_0 1611#define FCF_FABRIC_DEVICE BIT_0
1591#define FCF_LOGIN_NEEDED BIT_1 1612#define FCF_LOGIN_NEEDED BIT_1
1592#define FCF_TAPE_PRESENT BIT_2 1613#define FCF_FCP2_DEVICE BIT_2
1593#define FCF_FCP2_DEVICE BIT_3
1594 1614
1595/* No loop ID flag. */ 1615/* No loop ID flag. */
1596#define FC_NO_LOOP_ID 0x1000 1616#define FC_NO_LOOP_ID 0x1000
@@ -2123,6 +2143,7 @@ enum qla_work_type {
2123 QLA_EVT_ASYNC_LOGIN_DONE, 2143 QLA_EVT_ASYNC_LOGIN_DONE,
2124 QLA_EVT_ASYNC_LOGOUT, 2144 QLA_EVT_ASYNC_LOGOUT,
2125 QLA_EVT_ASYNC_LOGOUT_DONE, 2145 QLA_EVT_ASYNC_LOGOUT_DONE,
2146 QLA_EVT_UEVENT,
2126}; 2147};
2127 2148
2128 2149
@@ -2146,6 +2167,10 @@ struct qla_work_evt {
2146#define QLA_LOGIO_LOGIN_RETRIED BIT_0 2167#define QLA_LOGIO_LOGIN_RETRIED BIT_0
2147 u16 data[2]; 2168 u16 data[2];
2148 } logio; 2169 } logio;
2170 struct {
2171 u32 code;
2172#define QLA_UEVENT_CODE_FW_DUMP 0
2173 } uevent;
2149 } u; 2174 } u;
2150}; 2175};
2151 2176
@@ -2229,6 +2254,13 @@ struct req_que {
2229 int max_q_depth; 2254 int max_q_depth;
2230}; 2255};
2231 2256
2257/* Place holder for FW buffer parameters */
2258struct qlfc_fw {
2259 void *fw_buf;
2260 dma_addr_t fw_dma;
2261 uint32_t len;
2262};
2263
2232/* 2264/*
2233 * Qlogic host adapter specific data structure. 2265 * Qlogic host adapter specific data structure.
2234*/ 2266*/
@@ -2254,12 +2286,15 @@ struct qla_hw_data {
2254 uint32_t disable_serdes :1; 2286 uint32_t disable_serdes :1;
2255 uint32_t gpsc_supported :1; 2287 uint32_t gpsc_supported :1;
2256 uint32_t npiv_supported :1; 2288 uint32_t npiv_supported :1;
2289 uint32_t pci_channel_io_perm_failure :1;
2257 uint32_t fce_enabled :1; 2290 uint32_t fce_enabled :1;
2258 uint32_t fac_supported :1; 2291 uint32_t fac_supported :1;
2259 uint32_t chip_reset_done :1; 2292 uint32_t chip_reset_done :1;
2260 uint32_t port0 :1; 2293 uint32_t port0 :1;
2261 uint32_t running_gold_fw :1; 2294 uint32_t running_gold_fw :1;
2295 uint32_t eeh_busy :1;
2262 uint32_t cpu_affinity_enabled :1; 2296 uint32_t cpu_affinity_enabled :1;
2297 uint32_t disable_msix_handshake :1;
2263 } flags; 2298 } flags;
2264 2299
2265 /* This spinlock is used to protect "io transactions", you must 2300 /* This spinlock is used to protect "io transactions", you must
@@ -2382,6 +2417,7 @@ struct qla_hw_data {
2382#define IS_QLA81XX(ha) (IS_QLA8001(ha)) 2417#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2383#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2418#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2384 IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2419 IS_QLA25XX(ha) || IS_QLA81XX(ha))
2420#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
2385#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \ 2421#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
2386 (ha)->flags.msix_enabled) 2422 (ha)->flags.msix_enabled)
2387#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha)) 2423#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
@@ -2435,11 +2471,11 @@ struct qla_hw_data {
2435 dma_addr_t edc_data_dma; 2471 dma_addr_t edc_data_dma;
2436 uint16_t edc_data_len; 2472 uint16_t edc_data_len;
2437 2473
2438#define XGMAC_DATA_SIZE PAGE_SIZE 2474#define XGMAC_DATA_SIZE 4096
2439 void *xgmac_data; 2475 void *xgmac_data;
2440 dma_addr_t xgmac_data_dma; 2476 dma_addr_t xgmac_data_dma;
2441 2477
2442#define DCBX_TLV_DATA_SIZE PAGE_SIZE 2478#define DCBX_TLV_DATA_SIZE 4096
2443 void *dcbx_tlv; 2479 void *dcbx_tlv;
2444 dma_addr_t dcbx_tlv_dma; 2480 dma_addr_t dcbx_tlv_dma;
2445 2481
@@ -2589,6 +2625,7 @@ struct qla_hw_data {
2589 struct qla_statistics qla_stats; 2625 struct qla_statistics qla_stats;
2590 struct isp_operations *isp_ops; 2626 struct isp_operations *isp_ops;
2591 struct workqueue_struct *wq; 2627 struct workqueue_struct *wq;
2628 struct qlfc_fw fw_buf;
2592}; 2629};
2593 2630
2594/* 2631/*
@@ -2761,4 +2798,127 @@ typedef struct scsi_qla_host {
2761 2798
2762#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 2799#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
2763 2800
2801/*
2802 * BSG Vendor specific commands
2803 */
2804
2805#define QL_VND_LOOPBACK 0x01
2806#define QLA84_RESET 0x02
2807#define QLA84_UPDATE_FW 0x03
2808#define QLA84_MGMT_CMD 0x04
2809
2810/* BSG definations for interpreting CommandSent field */
2811#define INT_DEF_LB_LOOPBACK_CMD 0
2812#define INT_DEF_LB_ECHO_CMD 1
2813
2814/* BSG Vendor specific definations */
2815typedef struct _A84_RESET {
2816 uint16_t Flags;
2817 uint16_t Reserved;
2818#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
2819} __attribute__((packed)) A84_RESET, *PA84_RESET;
2820
2821#define A84_ISSUE_WRITE_TYPE_CMD 0
2822#define A84_ISSUE_READ_TYPE_CMD 1
2823#define A84_CLEANUP_CMD 2
2824#define A84_ISSUE_RESET_OP_FW 3
2825#define A84_ISSUE_RESET_DIAG_FW 4
2826#define A84_ISSUE_UPDATE_OPFW_CMD 5
2827#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
2828
2829struct qla84_mgmt_param {
2830 union {
2831 struct {
2832 uint32_t start_addr;
2833 } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
2834 struct {
2835 uint32_t id;
2836#define QLA84_MGMT_CONFIG_ID_UIF 1
2837#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
2838#define QLA84_MGMT_CONFIG_ID_PAUSE 3
2839#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
2840
2841 uint32_t param0;
2842 uint32_t param1;
2843 } config; /* for QLA84_MGMT_CHNG_CONFIG */
2844
2845 struct {
2846 uint32_t type;
2847#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
2848#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
2849#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
2850#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
2851#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
2852#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
2853#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
2854
2855 uint32_t context;
2856/*
2857* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
2858*/
2859#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
2860#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
2861#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
2862#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
2863#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
2864#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
2865#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
2866#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
2867#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
2868#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
2869
2870/*
2871* context definitions for QLA84_MGMT_INFO_PORT_STAT
2872*/
2873#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
2874#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
2875#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
2876#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
2877#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
2878#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
2879
2880
2881/*
2882* context definitions for QLA84_MGMT_INFO_LIF_STAT
2883*/
2884#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
2885#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
2886#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
2887#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
2888#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
2889
2890 } info; /* for QLA84_MGMT_GET_INFO */
2891 } u;
2892};
2893
2894struct qla84_msg_mgmt {
2895 uint16_t cmd;
2896#define QLA84_MGMT_READ_MEM 0x00
2897#define QLA84_MGMT_WRITE_MEM 0x01
2898#define QLA84_MGMT_CHNG_CONFIG 0x02
2899#define QLA84_MGMT_GET_INFO 0x03
2900 uint16_t rsrvd;
2901 struct qla84_mgmt_param mgmtp;/* parameters for cmd */
2902 uint32_t len; /* bytes in payload following this struct */
2903 uint8_t payload[0]; /* payload for cmd */
2904};
2905
2906struct msg_update_fw {
2907 /*
2908 * diag_fw = 0 operational fw
2909 * otherwise diagnostic fw
2910 * offset, len, fw_len are present to overcome the current limitation
2911 * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
2912 * specifies the byte "offset" where it fits in the fw buffer. The
2913 * number of bytes in each chunk is specified in "len". "fw_len"
2914 * is the total size of fw. The first chunk should start at offset = 0.
2915 * When offset+len == fw_len, the fw is written to the HBA.
2916 */
2917 uint32_t diag_fw;
2918 uint32_t offset;/* start offset */
2919 uint32_t len; /* num bytes in cur xfer */
2920 uint32_t fw_len; /* size of fw in bytes */
2921 uint8_t fw_bytes[0];
2922};
2923
2764#endif 2924#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 66a8da5d7d08..42c5587cc50c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -627,6 +627,39 @@ struct els_entry_24xx {
627 uint32_t rx_len; /* Data segment 1 length. */ 627 uint32_t rx_len; /* Data segment 1 length. */
628}; 628};
629 629
630struct els_sts_entry_24xx {
631 uint8_t entry_type; /* Entry type. */
632 uint8_t entry_count; /* Entry count. */
633 uint8_t sys_define; /* System Defined. */
634 uint8_t entry_status; /* Entry Status. */
635
636 uint32_t handle; /* System handle. */
637
638 uint16_t comp_status;
639
640 uint16_t nport_handle; /* N_PORT handle. */
641
642 uint16_t reserved_1;
643
644 uint8_t vp_index;
645 uint8_t sof_type;
646
647 uint32_t rx_xchg_address; /* Receive exchange address. */
648 uint16_t reserved_2;
649
650 uint8_t opcode;
651 uint8_t reserved_3;
652
653 uint8_t port_id[3];
654 uint8_t reserved_4;
655
656 uint16_t reserved_5;
657
658 uint16_t control_flags; /* Control flags. */
659 uint32_t total_byte_count;
660 uint32_t error_subcode_1;
661 uint32_t error_subcode_2;
662};
630/* 663/*
631 * ISP queue - Mailbox Command entry structure definition. 664 * ISP queue - Mailbox Command entry structure definition.
632 */ 665 */
@@ -1559,10 +1592,22 @@ struct nvram_81xx {
1559 1592
1560 /* Offset 384. */ 1593 /* Offset 384. */
1561 uint8_t reserved_21[16]; 1594 uint8_t reserved_21[16];
1562 uint16_t reserved_22[8]; 1595 uint16_t reserved_22[3];
1596
1597 /*
1598 * BIT 0 = Extended BB credits for LR
1599 * BIT 1 = Virtual Fabric Enable
1600 * BIT 2 = Enhanced Features Unused
1601 * BIT 3-7 = Enhanced Features Reserved
1602 */
1603 /* Enhanced Features */
1604 uint8_t enhanced_features;
1605
1606 uint8_t reserved_23;
1607 uint16_t reserved_24[4];
1563 1608
1564 /* Offset 416. */ 1609 /* Offset 416. */
1565 uint16_t reserved_23[32]; 1610 uint16_t reserved_25[32];
1566 1611
1567 /* Offset 480. */ 1612 /* Offset 480. */
1568 uint8_t model_name[16]; 1613 uint8_t model_name[16];
@@ -1570,7 +1615,7 @@ struct nvram_81xx {
1570 /* Offset 496. */ 1615 /* Offset 496. */
1571 uint16_t feature_mask_l; 1616 uint16_t feature_mask_l;
1572 uint16_t feature_mask_h; 1617 uint16_t feature_mask_h;
1573 uint16_t reserved_24[2]; 1618 uint16_t reserved_26[2];
1574 1619
1575 uint16_t subsystem_vendor_id; 1620 uint16_t subsystem_vendor_id;
1576 uint16_t subsystem_device_id; 1621 uint16_t subsystem_device_id;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f3d1d1afa95b..3a89bc514e2b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -60,6 +60,8 @@ extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
60extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, 60extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
61 uint16_t *); 61 uint16_t *);
62 62
63extern fc_port_t *
64qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
63/* 65/*
64 * Global Data in qla_os.c source file. 66 * Global Data in qla_os.c source file.
65 */ 67 */
@@ -72,12 +74,11 @@ extern int ql2xloginretrycount;
72extern int ql2xfdmienable; 74extern int ql2xfdmienable;
73extern int ql2xallocfwdump; 75extern int ql2xallocfwdump;
74extern int ql2xextended_error_logging; 76extern int ql2xextended_error_logging;
75extern int ql2xqfullrampup;
76extern int ql2xqfulltracking;
77extern int ql2xiidmaenable; 77extern int ql2xiidmaenable;
78extern int ql2xmaxqueues; 78extern int ql2xmaxqueues;
79extern int ql2xmultique_tag; 79extern int ql2xmultique_tag;
80extern int ql2xfwloadbin; 80extern int ql2xfwloadbin;
81extern int ql2xetsenable;
81 82
82extern int qla2x00_loop_reset(scsi_qla_host_t *); 83extern int qla2x00_loop_reset(scsi_qla_host_t *);
83extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 84extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -92,10 +93,10 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
92 uint16_t *); 93 uint16_t *);
93extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, 94extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
94 fc_port_t *, uint16_t *); 95 fc_port_t *, uint16_t *);
96extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
95 97
96extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); 98extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
97 99
98extern void qla2x00_abort_fcport_cmds(fc_port_t *);
99extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, 100extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
100 struct qla_hw_data *); 101 struct qla_hw_data *);
101extern void qla2x00_free_host(struct scsi_qla_host *); 102extern void qla2x00_free_host(struct scsi_qla_host *);
@@ -155,6 +156,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
155int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 156int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
156 uint16_t, uint16_t, uint8_t); 157 uint16_t, uint16_t, uint8_t);
157extern int qla2x00_start_sp(srb_t *); 158extern int qla2x00_start_sp(srb_t *);
159extern void qla2x00_ctx_sp_free(srb_t *);
158 160
159/* 161/*
160 * Global Function Prototypes in qla_mbx.c source file. 162 * Global Function Prototypes in qla_mbx.c source file.
@@ -246,7 +248,7 @@ qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *);
246 248
247extern int 249extern int
248qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, 250qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *,
249 uint16_t *, uint16_t *, uint16_t *); 251 uint16_t *, uint16_t *, uint16_t *, uint16_t *);
250 252
251extern int 253extern int
252qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 254qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
@@ -325,6 +327,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
325extern int 327extern int
326qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); 328qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
327 329
330extern int qla2x00_get_data_rate(scsi_qla_host_t *);
328/* 331/*
329 * Global Function Prototypes in qla_isr.c source file. 332 * Global Function Prototypes in qla_isr.c source file.
330 */ 333 */
@@ -426,6 +429,8 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
426extern void qla2x00_init_host_attr(scsi_qla_host_t *); 429extern void qla2x00_init_host_attr(scsi_qla_host_t *);
427extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); 430extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
428extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); 431extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
432extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
433extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
429 434
430/* 435/*
431 * Global Function Prototypes in qla_dfs.c source file. 436 * Global Function Prototypes in qla_dfs.c source file.
@@ -453,6 +458,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
453extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 458extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
454extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 459extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
455extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 460extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
456extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
457 461
458#endif /* _QLA_GBL_H */ 462#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9e3eaac25596..4229bb483c5e 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -8,6 +8,7 @@
8#include "qla_gbl.h" 8#include "qla_gbl.h"
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/slab.h>
11#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
12 13
13#include "qla_devtbl.h" 14#include "qla_devtbl.h"
@@ -62,7 +63,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
62 ctx->free(sp); 63 ctx->free(sp);
63} 64}
64 65
65static void 66void
66qla2x00_ctx_sp_free(srb_t *sp) 67qla2x00_ctx_sp_free(srb_t *sp)
67{ 68{
68 struct srb_ctx *ctx = sp->ctx; 69 struct srb_ctx *ctx = sp->ctx;
@@ -205,7 +206,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
205 206
206 switch (data[0]) { 207 switch (data[0]) {
207 case MBS_COMMAND_COMPLETE: 208 case MBS_COMMAND_COMPLETE:
208 if (fcport->flags & FCF_TAPE_PRESENT) 209 if (fcport->flags & FCF_FCP2_DEVICE)
209 opts |= BIT_1; 210 opts |= BIT_1;
210 rval = qla2x00_get_port_database(vha, fcport, opts); 211 rval = qla2x00_get_port_database(vha, fcport, opts);
211 if (rval != QLA_SUCCESS) 212 if (rval != QLA_SUCCESS)
@@ -269,6 +270,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
269 vha->flags.online = 0; 270 vha->flags.online = 0;
270 ha->flags.chip_reset_done = 0; 271 ha->flags.chip_reset_done = 0;
271 vha->flags.reset_active = 0; 272 vha->flags.reset_active = 0;
273 ha->flags.pci_channel_io_perm_failure = 0;
274 ha->flags.eeh_busy = 0;
272 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 275 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
273 atomic_set(&vha->loop_state, LOOP_DOWN); 276 atomic_set(&vha->loop_state, LOOP_DOWN);
274 vha->device_flags = DFLG_NO_CABLE; 277 vha->device_flags = DFLG_NO_CABLE;
@@ -277,7 +280,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
277 vha->marker_needed = 0; 280 vha->marker_needed = 0;
278 ha->isp_abort_cnt = 0; 281 ha->isp_abort_cnt = 0;
279 ha->beacon_blink_led = 0; 282 ha->beacon_blink_led = 0;
280 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
281 283
282 set_bit(0, ha->req_qid_map); 284 set_bit(0, ha->req_qid_map);
283 set_bit(0, ha->rsp_qid_map); 285 set_bit(0, ha->rsp_qid_map);
@@ -337,6 +339,16 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
337 rval = qla2x00_init_rings(vha); 339 rval = qla2x00_init_rings(vha);
338 ha->flags.chip_reset_done = 1; 340 ha->flags.chip_reset_done = 1;
339 341
342 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
343 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
344 rval = qla84xx_init_chip(vha);
345 if (rval != QLA_SUCCESS) {
346 qla_printk(KERN_ERR, ha,
347 "Unable to initialize ISP84XX.\n");
348 qla84xx_put_chip(vha);
349 }
350 }
351
340 return (rval); 352 return (rval);
341} 353}
342 354
@@ -582,6 +594,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
582 uint32_t cnt; 594 uint32_t cnt;
583 uint16_t cmd; 595 uint16_t cmd;
584 596
597 if (unlikely(pci_channel_offline(ha->pdev)))
598 return;
599
585 ha->isp_ops->disable_intrs(ha); 600 ha->isp_ops->disable_intrs(ha);
586 601
587 spin_lock_irqsave(&ha->hardware_lock, flags); 602 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -787,6 +802,12 @@ void
787qla24xx_reset_chip(scsi_qla_host_t *vha) 802qla24xx_reset_chip(scsi_qla_host_t *vha)
788{ 803{
789 struct qla_hw_data *ha = vha->hw; 804 struct qla_hw_data *ha = vha->hw;
805
806 if (pci_channel_offline(ha->pdev) &&
807 ha->flags.pci_channel_io_perm_failure) {
808 return;
809 }
810
790 ha->isp_ops->disable_intrs(ha); 811 ha->isp_ops->disable_intrs(ha);
791 812
792 /* Perform RISC reset. */ 813 /* Perform RISC reset. */
@@ -1203,7 +1224,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1203 } 1224 }
1204 qla2x00_get_resource_cnts(vha, NULL, 1225 qla2x00_get_resource_cnts(vha, NULL,
1205 &ha->fw_xcb_count, NULL, NULL, 1226 &ha->fw_xcb_count, NULL, NULL,
1206 &ha->max_npiv_vports); 1227 &ha->max_npiv_vports, NULL);
1207 1228
1208 if (!fw_major_version && ql2xallocfwdump) 1229 if (!fw_major_version && ql2xallocfwdump)
1209 qla2x00_alloc_fw_dump(vha); 1230 qla2x00_alloc_fw_dump(vha);
@@ -1443,7 +1464,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1443 icb->firmware_options_2 |= 1464 icb->firmware_options_2 |=
1444 __constant_cpu_to_le32(BIT_18); 1465 __constant_cpu_to_le32(BIT_18);
1445 1466
1446 icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); 1467 /* Use Disable MSIX Handshake mode for capable adapters */
1468 if (IS_MSIX_NACK_CAPABLE(ha)) {
1469 icb->firmware_options_2 &=
1470 __constant_cpu_to_le32(~BIT_22);
1471 ha->flags.disable_msix_handshake = 1;
1472 qla_printk(KERN_INFO, ha,
1473 "MSIX Handshake Disable Mode turned on\n");
1474 } else {
1475 icb->firmware_options_2 |=
1476 __constant_cpu_to_le32(BIT_22);
1477 }
1447 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 1478 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1448 1479
1449 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0); 1480 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
@@ -2196,7 +2227,7 @@ qla2x00_rport_del(void *data)
2196 * 2227 *
2197 * Returns a pointer to the allocated fcport, or NULL, if none available. 2228 * Returns a pointer to the allocated fcport, or NULL, if none available.
2198 */ 2229 */
2199static fc_port_t * 2230fc_port_t *
2200qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 2231qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2201{ 2232{
2202 fc_port_t *fcport; 2233 fc_port_t *fcport;
@@ -2257,6 +2288,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2257 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2288 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2258 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 2289 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2259 2290
2291 qla2x00_get_data_rate(vha);
2292
2260 /* Determine what we need to do */ 2293 /* Determine what we need to do */
2261 if (ha->current_topology == ISP_CFG_FL && 2294 if (ha->current_topology == ISP_CFG_FL &&
2262 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2295 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
@@ -2704,7 +2737,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2704 2737
2705 /* 2738 /*
2706 * Logout all previous fabric devices marked lost, except 2739 * Logout all previous fabric devices marked lost, except
2707 * tape devices. 2740 * FCP2 devices.
2708 */ 2741 */
2709 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2742 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2710 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2743 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
@@ -2717,7 +2750,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2717 qla2x00_mark_device_lost(vha, fcport, 2750 qla2x00_mark_device_lost(vha, fcport,
2718 ql2xplogiabsentdevice, 0); 2751 ql2xplogiabsentdevice, 0);
2719 if (fcport->loop_id != FC_NO_LOOP_ID && 2752 if (fcport->loop_id != FC_NO_LOOP_ID &&
2720 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2753 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
2721 fcport->port_type != FCT_INITIATOR && 2754 fcport->port_type != FCT_INITIATOR &&
2722 fcport->port_type != FCT_BROADCAST) { 2755 fcport->port_type != FCT_BROADCAST) {
2723 ha->isp_ops->fabric_logout(vha, 2756 ha->isp_ops->fabric_logout(vha,
@@ -2878,8 +2911,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2878 if (qla2x00_is_reserved_id(vha, loop_id)) 2911 if (qla2x00_is_reserved_id(vha, loop_id))
2879 continue; 2912 continue;
2880 2913
2881 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha)) 2914 if (atomic_read(&vha->loop_down_timer) ||
2915 LOOP_TRANSITION(vha)) {
2916 atomic_set(&vha->loop_down_timer, 0);
2917 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2918 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2882 break; 2919 break;
2920 }
2883 2921
2884 if (swl != NULL) { 2922 if (swl != NULL) {
2885 if (last_dev) { 2923 if (last_dev) {
@@ -2996,7 +3034,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2996 fcport->d_id.b24 = new_fcport->d_id.b24; 3034 fcport->d_id.b24 = new_fcport->d_id.b24;
2997 fcport->flags |= FCF_LOGIN_NEEDED; 3035 fcport->flags |= FCF_LOGIN_NEEDED;
2998 if (fcport->loop_id != FC_NO_LOOP_ID && 3036 if (fcport->loop_id != FC_NO_LOOP_ID &&
2999 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 3037 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3000 fcport->port_type != FCT_INITIATOR && 3038 fcport->port_type != FCT_INITIATOR &&
3001 fcport->port_type != FCT_BROADCAST) { 3039 fcport->port_type != FCT_BROADCAST) {
3002 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3040 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3250,9 +3288,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3250 3288
3251 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3289 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3252 if (rval == QLA_SUCCESS) { 3290 if (rval == QLA_SUCCESS) {
3253 /* Send an ADISC to tape devices.*/ 3291 /* Send an ADISC to FCP2 devices.*/
3254 opts = 0; 3292 opts = 0;
3255 if (fcport->flags & FCF_TAPE_PRESENT) 3293 if (fcport->flags & FCF_FCP2_DEVICE)
3256 opts |= BIT_1; 3294 opts |= BIT_1;
3257 rval = qla2x00_get_port_database(vha, fcport, opts); 3295 rval = qla2x00_get_port_database(vha, fcport, opts);
3258 if (rval != QLA_SUCCESS) { 3296 if (rval != QLA_SUCCESS) {
@@ -3551,6 +3589,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3551 /* Requeue all commands in outstanding command list. */ 3589 /* Requeue all commands in outstanding command list. */
3552 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3590 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3553 3591
3592 if (unlikely(pci_channel_offline(ha->pdev) &&
3593 ha->flags.pci_channel_io_perm_failure)) {
3594 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3595 status = 0;
3596 return status;
3597 }
3598
3554 ha->isp_ops->get_flash_version(vha, req->ring); 3599 ha->isp_ops->get_flash_version(vha, req->ring);
3555 3600
3556 ha->isp_ops->nvram_config(vha); 3601 ha->isp_ops->nvram_config(vha);
@@ -3573,6 +3618,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3573 ha->isp_abort_cnt = 0; 3618 ha->isp_abort_cnt = 0;
3574 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3619 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3575 3620
3621 if (IS_QLA81XX(ha))
3622 qla2x00_get_fw_version(vha,
3623 &ha->fw_major_version,
3624 &ha->fw_minor_version,
3625 &ha->fw_subminor_version,
3626 &ha->fw_attributes, &ha->fw_memory_size,
3627 ha->mpi_version, &ha->mpi_capabilities,
3628 ha->phy_version);
3629
3576 if (ha->fce) { 3630 if (ha->fce) {
3577 ha->flags.fce_enabled = 1; 3631 ha->flags.fce_enabled = 1;
3578 memset(ha->fce, 0, 3632 memset(ha->fce, 0,
@@ -4440,6 +4494,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4440 int ret, retries; 4494 int ret, retries;
4441 struct qla_hw_data *ha = vha->hw; 4495 struct qla_hw_data *ha = vha->hw;
4442 4496
4497 if (ha->flags.pci_channel_io_perm_failure)
4498 return;
4443 if (!IS_FWI2_CAPABLE(ha)) 4499 if (!IS_FWI2_CAPABLE(ha))
4444 return; 4500 return;
4445 if (!ha->fw_major_version) 4501 if (!ha->fw_major_version)
@@ -4837,6 +4893,15 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4837} 4893}
4838 4894
4839void 4895void
4840qla81xx_update_fw_options(scsi_qla_host_t *ha) 4896qla81xx_update_fw_options(scsi_qla_host_t *vha)
4841{ 4897{
4898 struct qla_hw_data *ha = vha->hw;
4899
4900 if (!ql2xetsenable)
4901 return;
4902
4903 /* Enable ETS Burst. */
4904 memset(ha->fw_options, 0, sizeof(ha->fw_options));
4905 ha->fw_options[2] |= BIT_9;
4906 qla2x00_set_fw_options(vha, ha->fw_options);
4842} 4907}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5ccac0bef76..8299a9891bfe 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1025,6 +1025,119 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1025 /* Implicit: mbx->mbx10 = 0. */ 1025 /* Implicit: mbx->mbx10 = 0. */
1026} 1026}
1027 1027
1028static void
1029qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1030{
1031 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1032
1033 els_iocb->entry_type = ELS_IOCB_TYPE;
1034 els_iocb->entry_count = 1;
1035 els_iocb->sys_define = 0;
1036 els_iocb->entry_status = 0;
1037 els_iocb->handle = sp->handle;
1038 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1039 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1040 els_iocb->vp_index = sp->fcport->vp_idx;
1041 els_iocb->sof_type = EST_SOFI3;
1042 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1043
1044 els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
1045 bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
1046 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1047 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1048 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1049 els_iocb->control_flags = 0;
1050 els_iocb->rx_byte_count =
1051 cpu_to_le32(bsg_job->reply_payload.payload_len);
1052 els_iocb->tx_byte_count =
1053 cpu_to_le32(bsg_job->request_payload.payload_len);
1054
1055 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1056 (bsg_job->request_payload.sg_list)));
1057 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1058 (bsg_job->request_payload.sg_list)));
1059 els_iocb->tx_len = cpu_to_le32(sg_dma_len
1060 (bsg_job->request_payload.sg_list));
1061
1062 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1063 (bsg_job->reply_payload.sg_list)));
1064 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1065 (bsg_job->reply_payload.sg_list)));
1066 els_iocb->rx_len = cpu_to_le32(sg_dma_len
1067 (bsg_job->reply_payload.sg_list));
1068}
1069
1070static void
1071qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1072{
1073 uint16_t avail_dsds;
1074 uint32_t *cur_dsd;
1075 struct scatterlist *sg;
1076 int index;
1077 uint16_t tot_dsds;
1078 scsi_qla_host_t *vha = sp->fcport->vha;
1079 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1080 int loop_iterartion = 0;
1081 int cont_iocb_prsnt = 0;
1082 int entry_count = 1;
1083
1084 ct_iocb->entry_type = CT_IOCB_TYPE;
1085 ct_iocb->entry_status = 0;
1086 ct_iocb->sys_define = 0;
1087 ct_iocb->handle = sp->handle;
1088
1089 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1090 ct_iocb->vp_index = sp->fcport->vp_idx;
1091 ct_iocb->comp_status = __constant_cpu_to_le16(0);
1092
1093 ct_iocb->cmd_dsd_count =
1094 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1095 ct_iocb->timeout = 0;
1096 ct_iocb->rsp_dsd_count =
1097 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1098 ct_iocb->rsp_byte_count =
1099 cpu_to_le32(bsg_job->reply_payload.payload_len);
1100 ct_iocb->cmd_byte_count =
1101 cpu_to_le32(bsg_job->request_payload.payload_len);
1102 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1103 (bsg_job->request_payload.sg_list)));
1104 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1105 (bsg_job->request_payload.sg_list)));
1106 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1107 (bsg_job->request_payload.sg_list));
1108
1109 avail_dsds = 1;
1110 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1111 index = 0;
1112 tot_dsds = bsg_job->reply_payload.sg_cnt;
1113
1114 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1115 dma_addr_t sle_dma;
1116 cont_a64_entry_t *cont_pkt;
1117
1118 /* Allocate additional continuation packets? */
1119 if (avail_dsds == 0) {
1120 /*
1121 * Five DSDs are available in the Cont.
1122 * Type 1 IOCB.
1123 */
1124 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1125 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1126 avail_dsds = 5;
1127 cont_iocb_prsnt = 1;
1128 entry_count++;
1129 }
1130
1131 sle_dma = sg_dma_address(sg);
1132 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1133 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1134 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1135 loop_iterartion++;
1136 avail_dsds--;
1137 }
1138 ct_iocb->entry_count = entry_count;
1139}
1140
1028int 1141int
1029qla2x00_start_sp(srb_t *sp) 1142qla2x00_start_sp(srb_t *sp)
1030{ 1143{
@@ -1052,6 +1165,13 @@ qla2x00_start_sp(srb_t *sp)
1052 qla24xx_logout_iocb(sp, pkt): 1165 qla24xx_logout_iocb(sp, pkt):
1053 qla2x00_logout_iocb(sp, pkt); 1166 qla2x00_logout_iocb(sp, pkt);
1054 break; 1167 break;
1168 case SRB_ELS_CMD_RPT:
1169 case SRB_ELS_CMD_HST:
1170 qla24xx_els_iocb(sp, pkt);
1171 break;
1172 case SRB_CT_CMD:
1173 qla24xx_ct_iocb(sp, pkt);
1174 break;
1055 default: 1175 default:
1056 break; 1176 break;
1057 } 1177 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b20a7169aac2..db539b0c3dae 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -7,7 +7,9 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/slab.h>
10#include <scsi/scsi_tcq.h> 11#include <scsi/scsi_tcq.h>
12#include <scsi/scsi_bsg_fc.h>
11 13
12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 14static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 15static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -152,7 +154,7 @@ qla2300_intr_handler(int irq, void *dev_id)
152 for (iter = 50; iter--; ) { 154 for (iter = 50; iter--; ) {
153 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 155 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
154 if (stat & HSR_RISC_PAUSED) { 156 if (stat & HSR_RISC_PAUSED) {
155 if (pci_channel_offline(ha->pdev)) 157 if (unlikely(pci_channel_offline(ha->pdev)))
156 break; 158 break;
157 159
158 hccr = RD_REG_WORD(&reg->hccr); 160 hccr = RD_REG_WORD(&reg->hccr);
@@ -313,10 +315,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
313 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 315 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
314 char *link_speed; 316 char *link_speed;
315 uint16_t handle_cnt; 317 uint16_t handle_cnt;
316 uint16_t cnt; 318 uint16_t cnt, mbx;
317 uint32_t handles[5]; 319 uint32_t handles[5];
318 struct qla_hw_data *ha = vha->hw; 320 struct qla_hw_data *ha = vha->hw;
319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 321 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
322 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
320 uint32_t rscn_entry, host_pid; 323 uint32_t rscn_entry, host_pid;
321 uint8_t rscn_queue_index; 324 uint8_t rscn_queue_index;
322 unsigned long flags; 325 unsigned long flags;
@@ -395,9 +398,10 @@ skip_rio:
395 break; 398 break;
396 399
397 case MBA_SYSTEM_ERR: /* System Error */ 400 case MBA_SYSTEM_ERR: /* System Error */
401 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
398 qla_printk(KERN_INFO, ha, 402 qla_printk(KERN_INFO, ha,
399 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 403 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
400 mb[1], mb[2], mb[3]); 404 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
401 405
402 ha->isp_ops->fw_dump(vha, 1); 406 ha->isp_ops->fw_dump(vha, 1);
403 407
@@ -419,9 +423,10 @@ skip_rio:
419 break; 423 break;
420 424
421 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 425 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
422 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", 426 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n",
423 vha->host_no)); 427 vha->host_no, mb[1]));
424 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 428 qla_printk(KERN_WARNING, ha,
429 "ISP Request Transfer Error (%x).\n", mb[1]);
425 430
426 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 431 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
427 break; 432 break;
@@ -485,10 +490,13 @@ skip_rio:
485 break; 490 break;
486 491
487 case MBA_LOOP_DOWN: /* Loop Down Event */ 492 case MBA_LOOP_DOWN: /* Loop Down Event */
493 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
488 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 494 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
489 "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3])); 495 "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
490 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", 496 mbx));
491 mb[1], mb[2], mb[3]); 497 qla_printk(KERN_INFO, ha,
498 "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
499 mbx);
492 500
493 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 501 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
494 atomic_set(&vha->loop_state, LOOP_DOWN); 502 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -613,11 +621,10 @@ skip_rio:
613 * vp_idx does not match 621 * vp_idx does not match
614 * Event is not global, vp_idx does not match 622 * Event is not global, vp_idx does not match
615 */ 623 */
616 if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) 624 if (IS_QLA2XXX_MIDTYPE(ha) &&
617 || (mb[1] != 0xffff)) { 625 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
618 if (vha->vp_idx != (mb[3] & 0xff)) 626 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
619 break; 627 break;
620 }
621 628
622 /* Global event -- port logout or port unavailable. */ 629 /* Global event -- port logout or port unavailable. */
623 if (mb[1] == 0xffff && mb[2] == 0x7) { 630 if (mb[1] == 0xffff && mb[2] == 0x7) {
@@ -805,78 +812,6 @@ skip_rio:
805 qla2x00_alert_all_vps(rsp, mb); 812 qla2x00_alert_all_vps(rsp, mb);
806} 813}
807 814
808static void
809qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
810{
811 fc_port_t *fcport = data;
812 struct scsi_qla_host *vha = fcport->vha;
813 struct qla_hw_data *ha = vha->hw;
814 struct req_que *req = NULL;
815
816 if (!ql2xqfulltracking)
817 return;
818
819 req = vha->req;
820 if (!req)
821 return;
822 if (req->max_q_depth <= sdev->queue_depth)
823 return;
824
825 if (sdev->ordered_tags)
826 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
827 sdev->queue_depth + 1);
828 else
829 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
830 sdev->queue_depth + 1);
831
832 fcport->last_ramp_up = jiffies;
833
834 DEBUG2(qla_printk(KERN_INFO, ha,
835 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
836 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
837 sdev->queue_depth));
838}
839
840static void
841qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
842{
843 fc_port_t *fcport = data;
844
845 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
846 return;
847
848 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
849 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
850 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
851 sdev->queue_depth));
852}
853
854static inline void
855qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
856 srb_t *sp)
857{
858 fc_port_t *fcport;
859 struct scsi_device *sdev;
860
861 if (!ql2xqfulltracking)
862 return;
863
864 sdev = sp->cmd->device;
865 if (sdev->queue_depth >= req->max_q_depth)
866 return;
867
868 fcport = sp->fcport;
869 if (time_before(jiffies,
870 fcport->last_ramp_up + ql2xqfullrampup * HZ))
871 return;
872 if (time_before(jiffies,
873 fcport->last_queue_full + ql2xqfullrampup * HZ))
874 return;
875
876 starget_for_each_device(sdev->sdev_target, fcport,
877 qla2x00_adjust_sdev_qdepth_up);
878}
879
880/** 815/**
881 * qla2x00_process_completed_request() - Process a Fast Post response. 816 * qla2x00_process_completed_request() - Process a Fast Post response.
882 * @ha: SCSI driver HA context 817 * @ha: SCSI driver HA context
@@ -907,8 +842,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
907 842
908 /* Save ISP completion status */ 843 /* Save ISP completion status */
909 sp->cmd->result = DID_OK << 16; 844 sp->cmd->result = DID_OK << 16;
910
911 qla2x00_ramp_up_queue_depth(vha, req, sp);
912 qla2x00_sp_compl(ha, sp); 845 qla2x00_sp_compl(ha, sp);
913 } else { 846 } else {
914 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 847 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
@@ -949,7 +882,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
949 index); 882 index);
950 return NULL; 883 return NULL;
951 } 884 }
885
952 req->outstanding_cmds[index] = NULL; 886 req->outstanding_cmds[index] = NULL;
887
953done: 888done:
954 return sp; 889 return sp;
955} 890}
@@ -1050,6 +985,100 @@ done_post_logio_done_work:
1050} 985}
1051 986
1052static void 987static void
988qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
989 struct sts_entry_24xx *pkt, int iocb_type)
990{
991 const char func[] = "ELS_CT_IOCB";
992 const char *type;
993 struct qla_hw_data *ha = vha->hw;
994 srb_t *sp;
995 struct srb_bsg *sp_bsg;
996 struct fc_bsg_job *bsg_job;
997 uint16_t comp_status;
998 uint32_t fw_status[3];
999 uint8_t* fw_sts_ptr;
1000
1001 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1002 if (!sp)
1003 return;
1004 sp_bsg = (struct srb_bsg*)sp->ctx;
1005 bsg_job = sp_bsg->bsg_job;
1006
1007 type = NULL;
1008 switch (sp_bsg->ctx.type) {
1009 case SRB_ELS_CMD_RPT:
1010 case SRB_ELS_CMD_HST:
1011 type = "els";
1012 break;
1013 case SRB_CT_CMD:
1014 type = "ct pass-through";
1015 break;
1016 default:
1017 qla_printk(KERN_WARNING, ha,
1018 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1019 sp_bsg->ctx.type);
1020 return;
1021 }
1022
1023 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1024 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1025 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1026
1027 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1028 * fc payload to the caller
1029 */
1030 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1031 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1032
1033 if (comp_status != CS_COMPLETE) {
1034 if (comp_status == CS_DATA_UNDERRUN) {
1035 bsg_job->reply->result = DID_OK << 16;
1036 bsg_job->reply->reply_payload_rcv_len =
1037 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1038
1039 DEBUG2(qla_printk(KERN_WARNING, ha,
1040 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1041 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1042 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
1043 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
1044 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1045 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1046 }
1047 else {
1048 DEBUG2(qla_printk(KERN_WARNING, ha,
1049 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1050 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1051 vha->host_no, sp->handle, type, comp_status,
1052 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
1053 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
1054 bsg_job->reply->result = DID_ERROR << 16;
1055 bsg_job->reply->reply_payload_rcv_len = 0;
1056 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1057 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1058 }
1059 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1060 }
1061 else {
1062 bsg_job->reply->result = DID_OK << 16;;
1063 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1064 bsg_job->reply_len = 0;
1065 }
1066
1067 dma_unmap_sg(&ha->pdev->dev,
1068 bsg_job->request_payload.sg_list,
1069 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1070 dma_unmap_sg(&ha->pdev->dev,
1071 bsg_job->reply_payload.sg_list,
1072 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1073 if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) ||
1074 (sp_bsg->ctx.type == SRB_CT_CMD))
1075 kfree(sp->fcport);
1076 kfree(sp->ctx);
1077 mempool_free(sp, ha->srb_mempool);
1078 bsg_job->job_done(bsg_job);
1079}
1080
1081static void
1053qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1082qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1054 struct logio_entry_24xx *logio) 1083 struct logio_entry_24xx *logio)
1055{ 1084{
@@ -1347,16 +1376,22 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1347 1376
1348 sense_len = rsp_info_len = resid_len = fw_resid_len = 0; 1377 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1349 if (IS_FWI2_CAPABLE(ha)) { 1378 if (IS_FWI2_CAPABLE(ha)) {
1350 sense_len = le32_to_cpu(sts24->sense_len); 1379 if (scsi_status & SS_SENSE_LEN_VALID)
1351 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1380 sense_len = le32_to_cpu(sts24->sense_len);
1352 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1381 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1353 fw_resid_len = le32_to_cpu(sts24->residual_len); 1382 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1383 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1384 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1385 if (comp_status == CS_DATA_UNDERRUN)
1386 fw_resid_len = le32_to_cpu(sts24->residual_len);
1354 rsp_info = sts24->data; 1387 rsp_info = sts24->data;
1355 sense_data = sts24->data; 1388 sense_data = sts24->data;
1356 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1389 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1357 } else { 1390 } else {
1358 sense_len = le16_to_cpu(sts->req_sense_length); 1391 if (scsi_status & SS_SENSE_LEN_VALID)
1359 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1392 sense_len = le16_to_cpu(sts->req_sense_length);
1393 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1394 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1360 resid_len = le32_to_cpu(sts->residual_length); 1395 resid_len = le32_to_cpu(sts->residual_length);
1361 rsp_info = sts->rsp_info; 1396 rsp_info = sts->rsp_info;
1362 sense_data = sts->req_sense_data; 1397 sense_data = sts->req_sense_data;
@@ -1423,13 +1458,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1423 "scsi(%ld): QUEUE FULL status detected " 1458 "scsi(%ld): QUEUE FULL status detected "
1424 "0x%x-0x%x.\n", vha->host_no, comp_status, 1459 "0x%x-0x%x.\n", vha->host_no, comp_status,
1425 scsi_status)); 1460 scsi_status));
1426
1427 /* Adjust queue depth for all luns on the port. */
1428 if (!ql2xqfulltracking)
1429 break;
1430 fcport->last_queue_full = jiffies;
1431 starget_for_each_device(cp->device->sdev_target,
1432 fcport, qla2x00_adjust_sdev_qdepth_down);
1433 break; 1461 break;
1434 } 1462 }
1435 if (lscsi_status != SS_CHECK_CONDITION) 1463 if (lscsi_status != SS_CHECK_CONDITION)
@@ -1443,54 +1471,67 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1443 break; 1471 break;
1444 1472
1445 case CS_DATA_UNDERRUN: 1473 case CS_DATA_UNDERRUN:
1446 resid = resid_len; 1474 DEBUG2(printk(KERN_INFO
1475 "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
1476 "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
1477 vha->host_no, cp->device->id, cp->device->lun, comp_status,
1478 scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
1479 cp->underflow));
1480
1447 /* Use F/W calculated residual length. */ 1481 /* Use F/W calculated residual length. */
1448 if (IS_FWI2_CAPABLE(ha)) { 1482 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1449 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1483 scsi_set_resid(cp, resid);
1450 lscsi_status = 0; 1484 if (scsi_status & SS_RESIDUAL_UNDER) {
1451 } else if (resid != fw_resid_len) { 1485 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1452 scsi_status &= ~SS_RESIDUAL_UNDER; 1486 DEBUG2(printk(
1453 lscsi_status = 0; 1487 "scsi(%ld:%d:%d:%d) Dropped frame(s) "
1488 "detected (%x of %x bytes)...residual "
1489 "length mismatch...retrying command.\n",
1490 vha->host_no, cp->device->channel,
1491 cp->device->id, cp->device->lun, resid,
1492 scsi_bufflen(cp)));
1493
1494 cp->result = DID_ERROR << 16 | lscsi_status;
1495 break;
1454 } 1496 }
1455 resid = fw_resid_len;
1456 }
1457 1497
1458 if (scsi_status & SS_RESIDUAL_UNDER) { 1498 if (!lscsi_status &&
1459 scsi_set_resid(cp, resid); 1499 ((unsigned)(scsi_bufflen(cp) - resid) <
1460 } else { 1500 cp->underflow)) {
1461 DEBUG2(printk(KERN_INFO 1501 qla_printk(KERN_INFO, ha,
1462 "scsi(%ld:%d:%d) UNDERRUN status detected " 1502 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1463 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " 1503 "detected (%x of %x bytes)...returning "
1464 "os_underflow=0x%x\n", vha->host_no, 1504 "error status.\n", vha->host_no,
1465 cp->device->id, cp->device->lun, comp_status, 1505 cp->device->channel, cp->device->id,
1466 scsi_status, resid_len, resid, cp->cmnd[0], 1506 cp->device->lun, resid, scsi_bufflen(cp));
1467 cp->underflow));
1468 1507
1508 cp->result = DID_ERROR << 16;
1509 break;
1510 }
1511 } else if (!lscsi_status) {
1512 DEBUG2(printk(
1513 "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
1514 "(%x of %x bytes)...firmware reported underrun..."
1515 "retrying command.\n", vha->host_no,
1516 cp->device->channel, cp->device->id,
1517 cp->device->lun, resid, scsi_bufflen(cp)));
1518
1519 cp->result = DID_ERROR << 16;
1520 break;
1469 } 1521 }
1470 1522
1523 cp->result = DID_OK << 16 | lscsi_status;
1524
1471 /* 1525 /*
1472 * Check to see if SCSI Status is non zero. If so report SCSI 1526 * Check to see if SCSI Status is non zero. If so report SCSI
1473 * Status. 1527 * Status.
1474 */ 1528 */
1475 if (lscsi_status != 0) { 1529 if (lscsi_status != 0) {
1476 cp->result = DID_OK << 16 | lscsi_status;
1477
1478 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1530 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1479 DEBUG2(printk(KERN_INFO 1531 DEBUG2(printk(KERN_INFO
1480 "scsi(%ld): QUEUE FULL status detected " 1532 "scsi(%ld): QUEUE FULL status detected "
1481 "0x%x-0x%x.\n", vha->host_no, comp_status, 1533 "0x%x-0x%x.\n", vha->host_no, comp_status,
1482 scsi_status)); 1534 scsi_status));
1483
1484 /*
1485 * Adjust queue depth for all luns on the
1486 * port.
1487 */
1488 if (!ql2xqfulltracking)
1489 break;
1490 fcport->last_queue_full = jiffies;
1491 starget_for_each_device(
1492 cp->device->sdev_target, fcport,
1493 qla2x00_adjust_sdev_qdepth_down);
1494 break; 1535 break;
1495 } 1536 }
1496 if (lscsi_status != SS_CHECK_CONDITION) 1537 if (lscsi_status != SS_CHECK_CONDITION)
@@ -1501,42 +1542,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1501 break; 1542 break;
1502 1543
1503 qla2x00_handle_sense(sp, sense_data, sense_len, rsp); 1544 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1504 } else {
1505 /*
1506 * If RISC reports underrun and target does not report
1507 * it then we must have a lost frame, so tell upper
1508 * layer to retry it by reporting an error.
1509 */
1510 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1511 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1512 "frame(s) detected (%x of %x bytes)..."
1513 "retrying command.\n",
1514 vha->host_no, cp->device->channel,
1515 cp->device->id, cp->device->lun, resid,
1516 scsi_bufflen(cp)));
1517
1518 scsi_set_resid(cp, resid);
1519 cp->result = DID_ERROR << 16;
1520 break;
1521 }
1522
1523 /* Handle mid-layer underflow */
1524 if ((unsigned)(scsi_bufflen(cp) - resid) <
1525 cp->underflow) {
1526 qla_printk(KERN_INFO, ha,
1527 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1528 "detected (%x of %x bytes)...returning "
1529 "error status.\n", vha->host_no,
1530 cp->device->channel, cp->device->id,
1531 cp->device->lun, resid,
1532 scsi_bufflen(cp));
1533
1534 cp->result = DID_ERROR << 16;
1535 break;
1536 }
1537
1538 /* Everybody online, looking good... */
1539 cp->result = DID_OK << 16;
1540 } 1545 }
1541 break; 1546 break;
1542 1547
@@ -1841,6 +1846,13 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1841 qla24xx_logio_entry(vha, rsp->req, 1846 qla24xx_logio_entry(vha, rsp->req,
1842 (struct logio_entry_24xx *)pkt); 1847 (struct logio_entry_24xx *)pkt);
1843 break; 1848 break;
1849 case CT_IOCB_TYPE:
1850 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1851 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
1852 break;
1853 case ELS_IOCB_TYPE:
1854 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
1855 break;
1844 default: 1856 default:
1845 /* Type Not Supported. */ 1857 /* Type Not Supported. */
1846 DEBUG4(printk(KERN_WARNING 1858 DEBUG4(printk(KERN_WARNING
@@ -1938,12 +1950,15 @@ qla24xx_intr_handler(int irq, void *dev_id)
1938 reg = &ha->iobase->isp24; 1950 reg = &ha->iobase->isp24;
1939 status = 0; 1951 status = 0;
1940 1952
1953 if (unlikely(pci_channel_offline(ha->pdev)))
1954 return IRQ_HANDLED;
1955
1941 spin_lock_irqsave(&ha->hardware_lock, flags); 1956 spin_lock_irqsave(&ha->hardware_lock, flags);
1942 vha = pci_get_drvdata(ha->pdev); 1957 vha = pci_get_drvdata(ha->pdev);
1943 for (iter = 50; iter--; ) { 1958 for (iter = 50; iter--; ) {
1944 stat = RD_REG_DWORD(&reg->host_status); 1959 stat = RD_REG_DWORD(&reg->host_status);
1945 if (stat & HSRX_RISC_PAUSED) { 1960 if (stat & HSRX_RISC_PAUSED) {
1946 if (pci_channel_offline(ha->pdev)) 1961 if (unlikely(pci_channel_offline(ha->pdev)))
1947 break; 1962 break;
1948 1963
1949 hccr = RD_REG_DWORD(&reg->hccr); 1964 hccr = RD_REG_DWORD(&reg->hccr);
@@ -2006,6 +2021,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2006 struct rsp_que *rsp; 2021 struct rsp_que *rsp;
2007 struct device_reg_24xx __iomem *reg; 2022 struct device_reg_24xx __iomem *reg;
2008 struct scsi_qla_host *vha; 2023 struct scsi_qla_host *vha;
2024 unsigned long flags;
2009 2025
2010 rsp = (struct rsp_que *) dev_id; 2026 rsp = (struct rsp_que *) dev_id;
2011 if (!rsp) { 2027 if (!rsp) {
@@ -2016,15 +2032,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2016 ha = rsp->hw; 2032 ha = rsp->hw;
2017 reg = &ha->iobase->isp24; 2033 reg = &ha->iobase->isp24;
2018 2034
2019 spin_lock_irq(&ha->hardware_lock); 2035 spin_lock_irqsave(&ha->hardware_lock, flags);
2020 2036
2021 vha = qla25xx_get_host(rsp); 2037 vha = pci_get_drvdata(ha->pdev);
2022 qla24xx_process_response_queue(vha, rsp); 2038 qla24xx_process_response_queue(vha, rsp);
2023 if (!ha->mqenable) { 2039 if (!ha->flags.disable_msix_handshake) {
2024 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2040 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2025 RD_REG_DWORD_RELAXED(&reg->hccr); 2041 RD_REG_DWORD_RELAXED(&reg->hccr);
2026 } 2042 }
2027 spin_unlock_irq(&ha->hardware_lock); 2043 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2028 2044
2029 return IRQ_HANDLED; 2045 return IRQ_HANDLED;
2030} 2046}
@@ -2034,6 +2050,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2034{ 2050{
2035 struct qla_hw_data *ha; 2051 struct qla_hw_data *ha;
2036 struct rsp_que *rsp; 2052 struct rsp_que *rsp;
2053 struct device_reg_24xx __iomem *reg;
2054 unsigned long flags;
2037 2055
2038 rsp = (struct rsp_que *) dev_id; 2056 rsp = (struct rsp_que *) dev_id;
2039 if (!rsp) { 2057 if (!rsp) {
@@ -2043,6 +2061,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2043 } 2061 }
2044 ha = rsp->hw; 2062 ha = rsp->hw;
2045 2063
2064 /* Clear the interrupt, if enabled, for this response queue */
2065 if (rsp->options & ~BIT_6) {
2066 reg = &ha->iobase->isp24;
2067 spin_lock_irqsave(&ha->hardware_lock, flags);
2068 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2069 RD_REG_DWORD_RELAXED(&reg->hccr);
2070 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2071 }
2046 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2072 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2047 2073
2048 return IRQ_HANDLED; 2074 return IRQ_HANDLED;
@@ -2059,6 +2085,7 @@ qla24xx_msix_default(int irq, void *dev_id)
2059 uint32_t stat; 2085 uint32_t stat;
2060 uint32_t hccr; 2086 uint32_t hccr;
2061 uint16_t mb[4]; 2087 uint16_t mb[4];
2088 unsigned long flags;
2062 2089
2063 rsp = (struct rsp_que *) dev_id; 2090 rsp = (struct rsp_que *) dev_id;
2064 if (!rsp) { 2091 if (!rsp) {
@@ -2070,12 +2097,12 @@ qla24xx_msix_default(int irq, void *dev_id)
2070 reg = &ha->iobase->isp24; 2097 reg = &ha->iobase->isp24;
2071 status = 0; 2098 status = 0;
2072 2099
2073 spin_lock_irq(&ha->hardware_lock); 2100 spin_lock_irqsave(&ha->hardware_lock, flags);
2074 vha = pci_get_drvdata(ha->pdev); 2101 vha = pci_get_drvdata(ha->pdev);
2075 do { 2102 do {
2076 stat = RD_REG_DWORD(&reg->host_status); 2103 stat = RD_REG_DWORD(&reg->host_status);
2077 if (stat & HSRX_RISC_PAUSED) { 2104 if (stat & HSRX_RISC_PAUSED) {
2078 if (pci_channel_offline(ha->pdev)) 2105 if (unlikely(pci_channel_offline(ha->pdev)))
2079 break; 2106 break;
2080 2107
2081 hccr = RD_REG_DWORD(&reg->hccr); 2108 hccr = RD_REG_DWORD(&reg->hccr);
@@ -2119,14 +2146,13 @@ qla24xx_msix_default(int irq, void *dev_id)
2119 } 2146 }
2120 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2147 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2121 } while (0); 2148 } while (0);
2122 spin_unlock_irq(&ha->hardware_lock); 2149 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2123 2150
2124 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2151 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2125 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2152 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2126 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2153 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2127 complete(&ha->mbx_intr_comp); 2154 complete(&ha->mbx_intr_comp);
2128 } 2155 }
2129
2130 return IRQ_HANDLED; 2156 return IRQ_HANDLED;
2131} 2157}
2132 2158
@@ -2246,30 +2272,28 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2246 2272
2247 /* If possible, enable MSI-X. */ 2273 /* If possible, enable MSI-X. */
2248 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2274 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2249 !IS_QLA8432(ha) && !IS_QLA8001(ha)) 2275 !IS_QLA8432(ha) && !IS_QLA8001(ha))
2250 goto skip_msix; 2276 goto skip_msi;
2277
2278 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2279 (ha->pdev->subsystem_device == 0x7040 ||
2280 ha->pdev->subsystem_device == 0x7041 ||
2281 ha->pdev->subsystem_device == 0x1705)) {
2282 DEBUG2(qla_printk(KERN_WARNING, ha,
2283 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
2284 ha->pdev->subsystem_vendor,
2285 ha->pdev->subsystem_device));
2286 goto skip_msi;
2287 }
2251 2288
2252 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2289 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2253 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 2290 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2254 DEBUG2(qla_printk(KERN_WARNING, ha, 2291 DEBUG2(qla_printk(KERN_WARNING, ha,
2255 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 2292 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2256 ha->pdev->revision, ha->fw_attributes)); 2293 ha->pdev->revision, ha->fw_attributes));
2257
2258 goto skip_msix; 2294 goto skip_msix;
2259 } 2295 }
2260 2296
2261 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2262 (ha->pdev->subsystem_device == 0x7040 ||
2263 ha->pdev->subsystem_device == 0x7041 ||
2264 ha->pdev->subsystem_device == 0x1705)) {
2265 DEBUG2(qla_printk(KERN_WARNING, ha,
2266 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
2267 ha->pdev->subsystem_vendor,
2268 ha->pdev->subsystem_device));
2269
2270 goto skip_msi;
2271 }
2272
2273 ret = qla24xx_enable_msix(ha, rsp); 2297 ret = qla24xx_enable_msix(ha, rsp);
2274 if (!ret) { 2298 if (!ret) {
2275 DEBUG2(qla_printk(KERN_INFO, ha, 2299 DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2332,10 +2356,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2332 2356
2333 if (ha->flags.msix_enabled) 2357 if (ha->flags.msix_enabled)
2334 qla24xx_disable_msix(ha); 2358 qla24xx_disable_msix(ha);
2335 else if (ha->flags.inta_enabled) { 2359 else if (ha->flags.msi_enabled) {
2336 free_irq(ha->pdev->irq, rsp); 2360 free_irq(ha->pdev->irq, rsp);
2337 pci_disable_msi(ha->pdev); 2361 pci_disable_msi(ha->pdev);
2338 } 2362 } else
2363 free_irq(ha->pdev->irq, rsp);
2339} 2364}
2340 2365
2341 2366
@@ -2357,30 +2382,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2357 msix->rsp = rsp; 2382 msix->rsp = rsp;
2358 return ret; 2383 return ret;
2359} 2384}
2360
2361struct scsi_qla_host *
2362qla25xx_get_host(struct rsp_que *rsp)
2363{
2364 srb_t *sp;
2365 struct qla_hw_data *ha = rsp->hw;
2366 struct scsi_qla_host *vha = NULL;
2367 struct sts_entry_24xx *pkt;
2368 struct req_que *req;
2369 uint16_t que;
2370 uint32_t handle;
2371
2372 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2373 que = MSW(pkt->handle);
2374 handle = (uint32_t) LSW(pkt->handle);
2375 req = ha->req_q_map[que];
2376 if (handle < MAX_OUTSTANDING_COMMANDS) {
2377 sp = req->outstanding_cmds[handle];
2378 if (sp)
2379 return sp->fcport->vha;
2380 else
2381 goto base_que;
2382 }
2383base_que:
2384 vha = pci_get_drvdata(ha->pdev);
2385 return vha;
2386}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b6202fe118ac..42eb7ffd5942 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -7,6 +7,7 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/gfp.h>
10 11
11 12
12/* 13/*
@@ -56,6 +57,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
56 57
57 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); 58 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
58 59
60 if (ha->flags.pci_channel_io_perm_failure) {
61 DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX "
62 "Exiting.\n", __func__, vha->host_no));
63 return QLA_FUNCTION_TIMEOUT;
64 }
65
59 /* 66 /*
60 * Wait for active mailbox commands to finish by waiting at most tov 67 * Wait for active mailbox commands to finish by waiting at most tov
61 * seconds. This is to serialize actual issuing of mailbox cmds during 68 * seconds. This is to serialize actual issuing of mailbox cmds during
@@ -154,10 +161,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
154 /* Check for pending interrupts. */ 161 /* Check for pending interrupts. */
155 qla2x00_poll(ha->rsp_q_map[0]); 162 qla2x00_poll(ha->rsp_q_map[0]);
156 163
157 if (command != MBC_LOAD_RISC_RAM_EXTENDED && 164 if (!ha->flags.mbox_int &&
158 !ha->flags.mbox_int) 165 !(IS_QLA2200(ha) &&
166 command == MBC_LOAD_RISC_RAM_EXTENDED))
159 msleep(10); 167 msleep(10);
160 } /* while */ 168 } /* while */
169 DEBUG17(qla_printk(KERN_WARNING, ha,
170 "Waited %d sec\n",
171 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)));
161 } 172 }
162 173
163 /* Check whether we timed out */ 174 /* Check whether we timed out */
@@ -227,7 +238,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
227 238
228 if (rval == QLA_FUNCTION_TIMEOUT && 239 if (rval == QLA_FUNCTION_TIMEOUT &&
229 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 240 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
230 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { 241 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
242 ha->flags.eeh_busy) {
231 /* not in dpc. schedule it for dpc to take over. */ 243 /* not in dpc. schedule it for dpc to take over. */
232 DEBUG(printk("%s(%ld): timeout schedule " 244 DEBUG(printk("%s(%ld): timeout schedule "
233 "isp_abort_needed.\n", __func__, 245 "isp_abort_needed.\n", __func__,
@@ -237,7 +249,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
237 base_vha->host_no)); 249 base_vha->host_no));
238 qla_printk(KERN_WARNING, ha, 250 qla_printk(KERN_WARNING, ha,
239 "Mailbox command timeout occurred. Scheduling ISP " 251 "Mailbox command timeout occurred. Scheduling ISP "
240 "abort.\n"); 252 "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy);
241 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 253 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
242 qla2xxx_wake_dpc(vha); 254 qla2xxx_wake_dpc(vha);
243 } else if (!abort_active) { 255 } else if (!abort_active) {
@@ -328,6 +340,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
328 return rval; 340 return rval;
329} 341}
330 342
343#define EXTENDED_BB_CREDITS BIT_0
331/* 344/*
332 * qla2x00_execute_fw 345 * qla2x00_execute_fw
333 * Start adapter firmware. 346 * Start adapter firmware.
@@ -360,7 +373,12 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
360 mcp->mb[1] = MSW(risc_addr); 373 mcp->mb[1] = MSW(risc_addr);
361 mcp->mb[2] = LSW(risc_addr); 374 mcp->mb[2] = LSW(risc_addr);
362 mcp->mb[3] = 0; 375 mcp->mb[3] = 0;
363 mcp->mb[4] = 0; 376 if (IS_QLA81XX(ha)) {
377 struct nvram_81xx *nv = ha->nvram;
378 mcp->mb[4] = (nv->enhanced_features &
379 EXTENDED_BB_CREDITS);
380 } else
381 mcp->mb[4] = 0;
364 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; 382 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
365 mcp->in_mb |= MBX_1; 383 mcp->in_mb |= MBX_1;
366 } else { 384 } else {
@@ -2006,7 +2024,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2006int 2024int
2007qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, 2025qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2008 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, 2026 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
2009 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports) 2027 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
2010{ 2028{
2011 int rval; 2029 int rval;
2012 mbx_cmd_t mc; 2030 mbx_cmd_t mc;
@@ -2017,6 +2035,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2017 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2035 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2018 mcp->out_mb = MBX_0; 2036 mcp->out_mb = MBX_0;
2019 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2037 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2038 if (IS_QLA81XX(vha->hw))
2039 mcp->in_mb |= MBX_12;
2020 mcp->tov = MBX_TOV_SECONDS; 2040 mcp->tov = MBX_TOV_SECONDS;
2021 mcp->flags = 0; 2041 mcp->flags = 0;
2022 rval = qla2x00_mailbox_command(vha, mcp); 2042 rval = qla2x00_mailbox_command(vha, mcp);
@@ -2027,9 +2047,10 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2027 vha->host_no, mcp->mb[0])); 2047 vha->host_no, mcp->mb[0]));
2028 } else { 2048 } else {
2029 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 2049 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
2030 "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no, 2050 "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__,
2031 mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], 2051 vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3],
2032 mcp->mb[10], mcp->mb[11])); 2052 mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11],
2053 mcp->mb[12]));
2033 2054
2034 if (cur_xchg_cnt) 2055 if (cur_xchg_cnt)
2035 *cur_xchg_cnt = mcp->mb[3]; 2056 *cur_xchg_cnt = mcp->mb[3];
@@ -2041,6 +2062,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2041 *orig_iocb_cnt = mcp->mb[10]; 2062 *orig_iocb_cnt = mcp->mb[10];
2042 if (vha->hw->flags.npiv_supported && max_npiv_vports) 2063 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2043 *max_npiv_vports = mcp->mb[11]; 2064 *max_npiv_vports = mcp->mb[11];
2065 if (IS_QLA81XX(vha->hw) && max_fcfs)
2066 *max_fcfs = mcp->mb[12];
2044 } 2067 }
2045 2068
2046 return (rval); 2069 return (rval);
@@ -2313,6 +2336,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2313{ 2336{
2314 int rval, rval2; 2337 int rval, rval2;
2315 struct tsk_mgmt_cmd *tsk; 2338 struct tsk_mgmt_cmd *tsk;
2339 struct sts_entry_24xx *sts;
2316 dma_addr_t tsk_dma; 2340 dma_addr_t tsk_dma;
2317 scsi_qla_host_t *vha; 2341 scsi_qla_host_t *vha;
2318 struct qla_hw_data *ha; 2342 struct qla_hw_data *ha;
@@ -2352,20 +2376,37 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2352 sizeof(tsk->p.tsk.lun)); 2376 sizeof(tsk->p.tsk.lun));
2353 } 2377 }
2354 2378
2379 sts = &tsk->p.sts;
2355 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 2380 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2356 if (rval != QLA_SUCCESS) { 2381 if (rval != QLA_SUCCESS) {
2357 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " 2382 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
2358 "(%x).\n", __func__, vha->host_no, name, rval)); 2383 "(%x).\n", __func__, vha->host_no, name, rval));
2359 } else if (tsk->p.sts.entry_status != 0) { 2384 } else if (sts->entry_status != 0) {
2360 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2385 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2361 "-- error status (%x).\n", __func__, vha->host_no, 2386 "-- error status (%x).\n", __func__, vha->host_no,
2362 tsk->p.sts.entry_status)); 2387 sts->entry_status));
2363 rval = QLA_FUNCTION_FAILED; 2388 rval = QLA_FUNCTION_FAILED;
2364 } else if (tsk->p.sts.comp_status != 2389 } else if (sts->comp_status !=
2365 __constant_cpu_to_le16(CS_COMPLETE)) { 2390 __constant_cpu_to_le16(CS_COMPLETE)) {
2366 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2391 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2367 "-- completion status (%x).\n", __func__, 2392 "-- completion status (%x).\n", __func__,
2368 vha->host_no, le16_to_cpu(tsk->p.sts.comp_status))); 2393 vha->host_no, le16_to_cpu(sts->comp_status)));
2394 rval = QLA_FUNCTION_FAILED;
2395 } else if (!(le16_to_cpu(sts->scsi_status) &
2396 SS_RESPONSE_INFO_LEN_VALID)) {
2397 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2398 "-- no response info (%x).\n", __func__, vha->host_no,
2399 le16_to_cpu(sts->scsi_status)));
2400 rval = QLA_FUNCTION_FAILED;
2401 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
2402 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2403 "-- not enough response info (%d).\n", __func__,
2404 vha->host_no, le32_to_cpu(sts->rsp_data_len)));
2405 rval = QLA_FUNCTION_FAILED;
2406 } else if (sts->data[3]) {
2407 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2408 "-- response (%x).\n", __func__,
2409 vha->host_no, sts->data[3]));
2369 rval = QLA_FUNCTION_FAILED; 2410 rval = QLA_FUNCTION_FAILED;
2370 } 2411 }
2371 2412
@@ -2507,6 +2548,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2507 if (!IS_FWI2_CAPABLE(vha->hw)) 2548 if (!IS_FWI2_CAPABLE(vha->hw))
2508 return QLA_FUNCTION_FAILED; 2549 return QLA_FUNCTION_FAILED;
2509 2550
2551 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2552 return QLA_FUNCTION_FAILED;
2553
2510 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2554 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2511 2555
2512 mcp->mb[0] = MBC_TRACE_CONTROL; 2556 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2542,6 +2586,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2542 if (!IS_FWI2_CAPABLE(vha->hw)) 2586 if (!IS_FWI2_CAPABLE(vha->hw))
2543 return QLA_FUNCTION_FAILED; 2587 return QLA_FUNCTION_FAILED;
2544 2588
2589 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2590 return QLA_FUNCTION_FAILED;
2591
2545 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2592 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2546 2593
2547 mcp->mb[0] = MBC_TRACE_CONTROL; 2594 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2572,6 +2619,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2572 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) 2619 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2573 return QLA_FUNCTION_FAILED; 2620 return QLA_FUNCTION_FAILED;
2574 2621
2622 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2623 return QLA_FUNCTION_FAILED;
2624
2575 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2625 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2576 2626
2577 mcp->mb[0] = MBC_TRACE_CONTROL; 2627 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2616,6 +2666,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2616 if (!IS_FWI2_CAPABLE(vha->hw)) 2666 if (!IS_FWI2_CAPABLE(vha->hw))
2617 return QLA_FUNCTION_FAILED; 2667 return QLA_FUNCTION_FAILED;
2618 2668
2669 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2670 return QLA_FUNCTION_FAILED;
2671
2619 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2672 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2620 2673
2621 mcp->mb[0] = MBC_TRACE_CONTROL; 2674 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2759,8 +2812,10 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2759 vp_idx, MSB(stat), 2812 vp_idx, MSB(stat),
2760 rptid_entry->port_id[2], rptid_entry->port_id[1], 2813 rptid_entry->port_id[2], rptid_entry->port_id[1],
2761 rptid_entry->port_id[0])); 2814 rptid_entry->port_id[0]));
2762 if (vp_idx == 0) 2815
2763 return; 2816 vp = vha;
2817 if (vp_idx == 0 && (MSB(stat) != 1))
2818 goto reg_needed;
2764 2819
2765 if (MSB(stat) == 1) { 2820 if (MSB(stat) == 1) {
2766 DEBUG2(printk("scsi(%ld): Could not acquire ID for " 2821 DEBUG2(printk("scsi(%ld): Could not acquire ID for "
@@ -2783,8 +2838,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2783 * response queue. Handle it in dpc context. 2838 * response queue. Handle it in dpc context.
2784 */ 2839 */
2785 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 2840 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
2786 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
2787 2841
2842reg_needed:
2843 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
2844 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
2845 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
2788 qla2xxx_wake_dpc(vha); 2846 qla2xxx_wake_dpc(vha);
2789 } 2847 }
2790} 2848}
@@ -3585,6 +3643,157 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3585} 3643}
3586 3644
3587int 3645int
3646qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
3647{
3648 int rval;
3649 mbx_cmd_t mc;
3650 mbx_cmd_t *mcp = &mc;
3651 uint32_t iter_cnt = 0x1;
3652
3653 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
3654
3655 memset(mcp->mb, 0 , sizeof(mcp->mb));
3656 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
3657 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
3658
3659 /* transfer count */
3660 mcp->mb[10] = LSW(mreq->transfer_size);
3661 mcp->mb[11] = MSW(mreq->transfer_size);
3662
3663 /* send data address */
3664 mcp->mb[14] = LSW(mreq->send_dma);
3665 mcp->mb[15] = MSW(mreq->send_dma);
3666 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3667 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3668
3669 /* recieve data address */
3670 mcp->mb[16] = LSW(mreq->rcv_dma);
3671 mcp->mb[17] = MSW(mreq->rcv_dma);
3672 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3673 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3674
3675 /* Iteration count */
3676 mcp->mb[18] = LSW(iter_cnt);
3677 mcp->mb[19] = MSW(iter_cnt);
3678
3679 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
3680 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3681 if (IS_QLA81XX(vha->hw))
3682 mcp->out_mb |= MBX_2;
3683 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
3684
3685 mcp->buf_size = mreq->transfer_size;
3686 mcp->tov = MBX_TOV_SECONDS;
3687 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3688
3689 rval = qla2x00_mailbox_command(vha, mcp);
3690
3691 if (rval != QLA_SUCCESS) {
3692 DEBUG2(printk(KERN_WARNING
3693 "(%ld): failed=%x mb[0]=0x%x "
3694 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
3695 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
3696 } else {
3697 DEBUG2(printk(KERN_WARNING
3698 "scsi(%ld): done.\n", vha->host_no));
3699 }
3700
3701 /* Copy mailbox information */
3702 memcpy( mresp, mcp->mb, 64);
3703 mresp[3] = mcp->mb[18];
3704 mresp[4] = mcp->mb[19];
3705 return rval;
3706}
3707
3708int
3709qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
3710{
3711 int rval;
3712 mbx_cmd_t mc;
3713 mbx_cmd_t *mcp = &mc;
3714 struct qla_hw_data *ha = vha->hw;
3715
3716 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
3717
3718 memset(mcp->mb, 0 , sizeof(mcp->mb));
3719 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
3720 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
3721 if (IS_QLA81XX(ha))
3722 mcp->mb[1] |= BIT_15;
3723 mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
3724 mcp->mb[16] = LSW(mreq->rcv_dma);
3725 mcp->mb[17] = MSW(mreq->rcv_dma);
3726 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3727 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3728
3729 mcp->mb[10] = LSW(mreq->transfer_size);
3730
3731 mcp->mb[14] = LSW(mreq->send_dma);
3732 mcp->mb[15] = MSW(mreq->send_dma);
3733 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3734 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3735
3736 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
3737 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3738 if (IS_QLA81XX(ha))
3739 mcp->out_mb |= MBX_2;
3740
3741 mcp->in_mb = MBX_0;
3742 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
3743 mcp->in_mb |= MBX_1;
3744 if (IS_QLA81XX(ha))
3745 mcp->in_mb |= MBX_3;
3746
3747 mcp->tov = MBX_TOV_SECONDS;
3748 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3749 mcp->buf_size = mreq->transfer_size;
3750
3751 rval = qla2x00_mailbox_command(vha, mcp);
3752
3753 if (rval != QLA_SUCCESS) {
3754 DEBUG2(printk(KERN_WARNING
3755 "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
3756 vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
3757 } else {
3758 DEBUG2(printk(KERN_WARNING
3759 "scsi(%ld): done.\n", vha->host_no));
3760 }
3761
3762 /* Copy mailbox information */
3763 memcpy( mresp, mcp->mb, 32);
3764 return rval;
3765}
3766int
3767qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
3768 uint16_t *cmd_status)
3769{
3770 int rval;
3771 mbx_cmd_t mc;
3772 mbx_cmd_t *mcp = &mc;
3773
3774 DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__,
3775 ha->host_no, enable_diagnostic));
3776
3777 mcp->mb[0] = MBC_ISP84XX_RESET;
3778 mcp->mb[1] = enable_diagnostic;
3779 mcp->out_mb = MBX_1|MBX_0;
3780 mcp->in_mb = MBX_1|MBX_0;
3781 mcp->tov = MBX_TOV_SECONDS;
3782 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3783 rval = qla2x00_mailbox_command(ha, mcp);
3784
3785 /* Return mailbox statuses. */
3786 *cmd_status = mcp->mb[0];
3787 if (rval != QLA_SUCCESS)
3788 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
3789 rval));
3790 else
3791 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
3792
3793 return rval;
3794}
3795
3796int
3588qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 3797qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3589{ 3798{
3590 int rval; 3799 int rval;
@@ -3615,3 +3824,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3615 3824
3616 return rval; 3825 return rval;
3617} 3826}
3827
3828int
3829qla2x00_get_data_rate(scsi_qla_host_t *vha)
3830{
3831 int rval;
3832 mbx_cmd_t mc;
3833 mbx_cmd_t *mcp = &mc;
3834 struct qla_hw_data *ha = vha->hw;
3835
3836 if (!IS_FWI2_CAPABLE(ha))
3837 return QLA_FUNCTION_FAILED;
3838
3839 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
3840
3841 mcp->mb[0] = MBC_DATA_RATE;
3842 mcp->mb[1] = 0;
3843 mcp->out_mb = MBX_1|MBX_0;
3844 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3845 mcp->tov = MBX_TOV_SECONDS;
3846 mcp->flags = 0;
3847 rval = qla2x00_mailbox_command(vha, mcp);
3848 if (rval != QLA_SUCCESS) {
3849 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
3850 __func__, vha->host_no, rval, mcp->mb[0]));
3851 } else {
3852 DEBUG11(printk(KERN_INFO
3853 "%s(%ld): done.\n", __func__, vha->host_no));
3854 if (mcp->mb[1] != 0x7)
3855 ha->link_data_rate = mcp->mb[1];
3856 }
3857
3858 return rval;
3859}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index e07b3617f019..8220e7b9799b 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/moduleparam.h> 10#include <linux/moduleparam.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/slab.h>
12#include <linux/list.h> 13#include <linux/list.h>
13 14
14#include <scsi/scsi_tcq.h> 15#include <scsi/scsi_tcq.h>
@@ -382,8 +383,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
382 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 383 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
383 384
384 vha->dpc_flags = 0L; 385 vha->dpc_flags = 0L;
385 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
386 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
387 386
388 /* 387 /*
389 * To fix the issue of processing a parent's RSCN for the vport before 388 * To fix the issue of processing a parent's RSCN for the vport before
@@ -638,11 +637,15 @@ failed:
638 637
639static void qla_do_work(struct work_struct *work) 638static void qla_do_work(struct work_struct *work)
640{ 639{
640 unsigned long flags;
641 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); 641 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
642 struct scsi_qla_host *vha; 642 struct scsi_qla_host *vha;
643 struct qla_hw_data *ha = rsp->hw;
643 644
644 vha = qla25xx_get_host(rsp); 645 spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
646 vha = pci_get_drvdata(ha->pdev);
645 qla24xx_process_response_queue(vha, rsp); 647 qla24xx_process_response_queue(vha, rsp);
648 spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
646} 649}
647 650
648/* create response queue */ 651/* create response queue */
@@ -698,6 +701,10 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
698 /* Use alternate PCI devfn */ 701 /* Use alternate PCI devfn */
699 if (LSB(rsp->rid)) 702 if (LSB(rsp->rid))
700 options |= BIT_5; 703 options |= BIT_5;
704 /* Enable MSIX handshake mode on for uncapable adapters */
705 if (!IS_MSIX_NACK_CAPABLE(ha))
706 options |= BIT_6;
707
701 rsp->options = options; 708 rsp->options = options;
702 rsp->id = que_id; 709 rsp->id = que_id;
703 reg = ISP_QUE_REG(ha, que_id); 710 reg = ISP_QUE_REG(ha, que_id);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b79fca7d461b..48c37e38ed01 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -11,6 +11,8 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include <linux/mutex.h> 13#include <linux/mutex.h>
14#include <linux/kobject.h>
15#include <linux/slab.h>
14 16
15#include <scsi/scsi_tcq.h> 17#include <scsi/scsi_tcq.h>
16#include <scsi/scsicam.h> 18#include <scsi/scsicam.h>
@@ -77,21 +79,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(ql2xmaxqdepth, 79MODULE_PARM_DESC(ql2xmaxqdepth,
78 "Maximum queue depth to report for target devices."); 80 "Maximum queue depth to report for target devices.");
79 81
80int ql2xqfulltracking = 1;
81module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfulltracking,
83 "Controls whether the driver tracks queue full status "
84 "returns and dynamically adjusts a scsi device's queue "
85 "depth. Default is 1, perform tracking. Set to 0 to "
86 "disable dynamic tracking and adjustment of queue depth.");
87
88int ql2xqfullrampup = 120;
89module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
90MODULE_PARM_DESC(ql2xqfullrampup,
91 "Number of seconds to wait to begin to ramp-up the queue "
92 "depth for a device after a queue-full condition has been "
93 "detected. Default is 120 seconds.");
94
95int ql2xiidmaenable=1; 82int ql2xiidmaenable=1;
96module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 83module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
97MODULE_PARM_DESC(ql2xiidmaenable, 84MODULE_PARM_DESC(ql2xiidmaenable,
@@ -121,6 +108,12 @@ MODULE_PARM_DESC(ql2xfwloadbin,
121 " 1 -- load firmware from flash.\n" 108 " 1 -- load firmware from flash.\n"
122 " 0 -- use default semantics.\n"); 109 " 0 -- use default semantics.\n");
123 110
111int ql2xetsenable;
112module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
113MODULE_PARM_DESC(ql2xetsenable,
114 "Enables firmware ETS burst."
115 "Default is 0 - skip ETS enablement.");
116
124/* 117/*
125 * SCSI host template entry points 118 * SCSI host template entry points
126 */ 119 */
@@ -137,7 +130,7 @@ static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
137static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 130static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
138static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 131static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
139 132
140static int qla2x00_change_queue_depth(struct scsi_device *, int); 133static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
141static int qla2x00_change_queue_type(struct scsi_device *, int); 134static int qla2x00_change_queue_type(struct scsi_device *, int);
142 135
143struct scsi_host_template qla2xxx_driver_template = { 136struct scsi_host_template qla2xxx_driver_template = {
@@ -489,11 +482,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
489 srb_t *sp; 482 srb_t *sp;
490 int rval; 483 int rval;
491 484
492 if (unlikely(pci_channel_offline(ha->pdev))) { 485 if (ha->flags.eeh_busy) {
493 if (ha->pdev->error_state == pci_channel_io_frozen) 486 if (ha->flags.pci_channel_io_perm_failure)
494 cmd->result = DID_REQUEUE << 16;
495 else
496 cmd->result = DID_NO_CONNECT << 16; 487 cmd->result = DID_NO_CONNECT << 16;
488 else
489 cmd->result = DID_REQUEUE << 16;
497 goto qc24_fail_command; 490 goto qc24_fail_command;
498 } 491 }
499 492
@@ -566,8 +559,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
566#define ABORT_POLLING_PERIOD 1000 559#define ABORT_POLLING_PERIOD 1000
567#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 560#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
568 unsigned long wait_iter = ABORT_WAIT_ITER; 561 unsigned long wait_iter = ABORT_WAIT_ITER;
562 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
563 struct qla_hw_data *ha = vha->hw;
569 int ret = QLA_SUCCESS; 564 int ret = QLA_SUCCESS;
570 565
566 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
567 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
568 return ret;
569 }
570
571 while (CMD_SP(cmd) && wait_iter--) { 571 while (CMD_SP(cmd) && wait_iter--) {
572 msleep(ABORT_POLLING_PERIOD); 572 msleep(ABORT_POLLING_PERIOD);
573 } 573 }
@@ -689,61 +689,6 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
689 return (return_status); 689 return (return_status);
690} 690}
691 691
692void
693qla2x00_abort_fcport_cmds(fc_port_t *fcport)
694{
695 int cnt;
696 unsigned long flags;
697 srb_t *sp;
698 scsi_qla_host_t *vha = fcport->vha;
699 struct qla_hw_data *ha = vha->hw;
700 struct req_que *req;
701
702 spin_lock_irqsave(&ha->hardware_lock, flags);
703 req = vha->req;
704 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
705 sp = req->outstanding_cmds[cnt];
706 if (!sp)
707 continue;
708 if (sp->fcport != fcport)
709 continue;
710 if (sp->ctx)
711 continue;
712
713 spin_unlock_irqrestore(&ha->hardware_lock, flags);
714 if (ha->isp_ops->abort_command(sp)) {
715 DEBUG2(qla_printk(KERN_WARNING, ha,
716 "Abort failed -- %lx\n",
717 sp->cmd->serial_number));
718 } else {
719 if (qla2x00_eh_wait_on_command(sp->cmd) !=
720 QLA_SUCCESS)
721 DEBUG2(qla_printk(KERN_WARNING, ha,
722 "Abort failed while waiting -- %lx\n",
723 sp->cmd->serial_number));
724 }
725 spin_lock_irqsave(&ha->hardware_lock, flags);
726 }
727 spin_unlock_irqrestore(&ha->hardware_lock, flags);
728}
729
730static void
731qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
732{
733 struct Scsi_Host *shost = cmnd->device->host;
734 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
735 unsigned long flags;
736
737 spin_lock_irqsave(shost->host_lock, flags);
738 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
739 spin_unlock_irqrestore(shost->host_lock, flags);
740 msleep(1000);
741 spin_lock_irqsave(shost->host_lock, flags);
742 }
743 spin_unlock_irqrestore(shost->host_lock, flags);
744 return;
745}
746
747/************************************************************************** 692/**************************************************************************
748* qla2xxx_eh_abort 693* qla2xxx_eh_abort
749* 694*
@@ -773,7 +718,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
773 struct req_que *req = vha->req; 718 struct req_que *req = vha->req;
774 srb_t *spt; 719 srb_t *spt;
775 720
776 qla2x00_block_error_handler(cmd); 721 fc_block_scsi_eh(cmd);
777 722
778 if (!CMD_SP(cmd)) 723 if (!CMD_SP(cmd))
779 return SUCCESS; 724 return SUCCESS;
@@ -904,7 +849,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
904 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 849 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
905 int err; 850 int err;
906 851
907 qla2x00_block_error_handler(cmd); 852 fc_block_scsi_eh(cmd);
908 853
909 if (!fcport) 854 if (!fcport)
910 return FAILED; 855 return FAILED;
@@ -984,7 +929,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
984 unsigned long serial; 929 unsigned long serial;
985 srb_t *sp = (srb_t *) CMD_SP(cmd); 930 srb_t *sp = (srb_t *) CMD_SP(cmd);
986 931
987 qla2x00_block_error_handler(cmd); 932 fc_block_scsi_eh(cmd);
988 933
989 id = cmd->device->id; 934 id = cmd->device->id;
990 lun = cmd->device->lun; 935 lun = cmd->device->lun;
@@ -1047,7 +992,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1047 srb_t *sp = (srb_t *) CMD_SP(cmd); 992 srb_t *sp = (srb_t *) CMD_SP(cmd);
1048 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 993 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1049 994
1050 qla2x00_block_error_handler(cmd); 995 fc_block_scsi_eh(cmd);
1051 996
1052 id = cmd->device->id; 997 id = cmd->device->id;
1053 lun = cmd->device->lun; 998 lun = cmd->device->lun;
@@ -1119,6 +1064,20 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1119 struct fc_port *fcport; 1064 struct fc_port *fcport;
1120 struct qla_hw_data *ha = vha->hw; 1065 struct qla_hw_data *ha = vha->hw;
1121 1066
1067 if (ha->flags.enable_target_reset) {
1068 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1069 if (fcport->port_type != FCT_TARGET)
1070 continue;
1071
1072 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1073 if (ret != QLA_SUCCESS) {
1074 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1075 "target_reset=%d d_id=%x.\n", __func__,
1076 vha->host_no, ret, fcport->d_id.b24));
1077 }
1078 }
1079 }
1080
1122 if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) { 1081 if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
1123 ret = qla2x00_full_login_lip(vha); 1082 ret = qla2x00_full_login_lip(vha);
1124 if (ret != QLA_SUCCESS) { 1083 if (ret != QLA_SUCCESS) {
@@ -1141,19 +1100,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1141 qla2x00_wait_for_loop_ready(vha); 1100 qla2x00_wait_for_loop_ready(vha);
1142 } 1101 }
1143 1102
1144 if (ha->flags.enable_target_reset) {
1145 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1146 if (fcport->port_type != FCT_TARGET)
1147 continue;
1148
1149 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1150 if (ret != QLA_SUCCESS) {
1151 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1152 "target_reset=%d d_id=%x.\n", __func__,
1153 vha->host_no, ret, fcport->d_id.b24));
1154 }
1155 }
1156 }
1157 /* Issue marker command only when we are going to start the I/O */ 1103 /* Issue marker command only when we are going to start the I/O */
1158 vha->marker_needed = 1; 1104 vha->marker_needed = 1;
1159 1105
@@ -1184,8 +1130,19 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1184 qla2x00_sp_compl(ha, sp); 1130 qla2x00_sp_compl(ha, sp);
1185 } else { 1131 } else {
1186 ctx = sp->ctx; 1132 ctx = sp->ctx;
1187 del_timer_sync(&ctx->timer); 1133 if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
1188 ctx->free(sp); 1134 del_timer_sync(&ctx->timer);
1135 ctx->free(sp);
1136 } else {
1137 struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
1138 if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
1139 kfree(sp->fcport);
1140 sp_bsg->bsg_job->req->errors = 0;
1141 sp_bsg->bsg_job->reply->result = res;
1142 sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
1143 kfree(sp->ctx);
1144 mempool_free(sp, ha->srb_mempool);
1145 }
1189 } 1146 }
1190 } 1147 }
1191 } 1148 }
@@ -1212,7 +1169,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1212 scsi_qla_host_t *vha = shost_priv(sdev->host); 1169 scsi_qla_host_t *vha = shost_priv(sdev->host);
1213 struct qla_hw_data *ha = vha->hw; 1170 struct qla_hw_data *ha = vha->hw;
1214 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1171 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1215 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1216 struct req_que *req = vha->req; 1172 struct req_que *req = vha->req;
1217 1173
1218 if (sdev->tagged_supported) 1174 if (sdev->tagged_supported)
@@ -1221,8 +1177,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1221 scsi_deactivate_tcq(sdev, req->max_q_depth); 1177 scsi_deactivate_tcq(sdev, req->max_q_depth);
1222 1178
1223 rport->dev_loss_tmo = ha->port_down_retry_count; 1179 rport->dev_loss_tmo = ha->port_down_retry_count;
1224 if (sdev->type == TYPE_TAPE)
1225 fcport->flags |= FCF_TAPE_PRESENT;
1226 1180
1227 return 0; 1181 return 0;
1228} 1182}
@@ -1233,10 +1187,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1233 sdev->hostdata = NULL; 1187 sdev->hostdata = NULL;
1234} 1188}
1235 1189
1190static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1191{
1192 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1193
1194 if (!scsi_track_queue_full(sdev, qdepth))
1195 return;
1196
1197 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1198 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1199 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1200 sdev->queue_depth));
1201}
1202
1203static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1204{
1205 fc_port_t *fcport = sdev->hostdata;
1206 struct scsi_qla_host *vha = fcport->vha;
1207 struct qla_hw_data *ha = vha->hw;
1208 struct req_que *req = NULL;
1209
1210 req = vha->req;
1211 if (!req)
1212 return;
1213
1214 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1215 return;
1216
1217 if (sdev->ordered_tags)
1218 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1219 else
1220 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1221
1222 DEBUG2(qla_printk(KERN_INFO, ha,
1223 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1224 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1225 sdev->queue_depth));
1226}
1227
1236static int 1228static int
1237qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth) 1229qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1238{ 1230{
1239 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 1231 switch (reason) {
1232 case SCSI_QDEPTH_DEFAULT:
1233 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1234 break;
1235 case SCSI_QDEPTH_QFULL:
1236 qla2x00_handle_queue_full(sdev, qdepth);
1237 break;
1238 case SCSI_QDEPTH_RAMP_UP:
1239 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1240 break;
1241 default:
1242 return -EOPNOTSUPP;
1243 }
1244
1240 return sdev->queue_depth; 1245 return sdev->queue_depth;
1241} 1246}
1242 1247
@@ -1672,9 +1677,11 @@ skip_pio:
1672 1677
1673 /* Determine queue resources */ 1678 /* Determine queue resources */
1674 ha->max_req_queues = ha->max_rsp_queues = 1; 1679 ha->max_req_queues = ha->max_rsp_queues = 1;
1675 if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) && 1680 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1681 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1676 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1682 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1677 goto mqiobase_exit; 1683 goto mqiobase_exit;
1684
1678 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1685 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1679 pci_resource_len(ha->pdev, 3)); 1686 pci_resource_len(ha->pdev, 3));
1680 if (ha->mqiobase) { 1687 if (ha->mqiobase) {
@@ -1790,6 +1797,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1790 1797
1791 /* Set ISP-type information. */ 1798 /* Set ISP-type information. */
1792 qla2x00_set_isp_flags(ha); 1799 qla2x00_set_isp_flags(ha);
1800
1801 /* Set EEH reset type to fundamental if required by hba */
1802 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1803 pdev->needs_freset = 1;
1804 }
1805
1793 /* Configure PCI I/O space */ 1806 /* Configure PCI I/O space */
1794 ret = qla2x00_iospace_config(ha); 1807 ret = qla2x00_iospace_config(ha);
1795 if (ret) 1808 if (ret)
@@ -1939,11 +1952,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1939 host->max_channel = MAX_BUSES - 1; 1952 host->max_channel = MAX_BUSES - 1;
1940 host->max_lun = MAX_LUNS; 1953 host->max_lun = MAX_LUNS;
1941 host->transportt = qla2xxx_transport_template; 1954 host->transportt = qla2xxx_transport_template;
1955 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
1942 1956
1943 /* Set up the irqs */ 1957 /* Set up the irqs */
1944 ret = qla2x00_request_irqs(ha, rsp); 1958 ret = qla2x00_request_irqs(ha, rsp);
1945 if (ret) 1959 if (ret)
1946 goto probe_init_failed; 1960 goto probe_init_failed;
1961
1962 pci_save_state(pdev);
1963
1947 /* Alloc arrays of request and response ring ptrs */ 1964 /* Alloc arrays of request and response ring ptrs */
1948que_init: 1965que_init:
1949 if (!qla2x00_alloc_queues(ha)) { 1966 if (!qla2x00_alloc_queues(ha)) {
@@ -2016,13 +2033,13 @@ skip_dpc:
2016 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2033 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2017 base_vha->host_no, ha)); 2034 base_vha->host_no, ha));
2018 2035
2019 base_vha->flags.init_done = 1;
2020 base_vha->flags.online = 1;
2021
2022 ret = scsi_add_host(host, &pdev->dev); 2036 ret = scsi_add_host(host, &pdev->dev);
2023 if (ret) 2037 if (ret)
2024 goto probe_failed; 2038 goto probe_failed;
2025 2039
2040 base_vha->flags.init_done = 1;
2041 base_vha->flags.online = 1;
2042
2026 ha->isp_ops->enable_intrs(ha); 2043 ha->isp_ops->enable_intrs(ha);
2027 2044
2028 scsi_scan_host(host); 2045 scsi_scan_host(host);
@@ -2145,6 +2162,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
2145 kfree(ha); 2162 kfree(ha);
2146 ha = NULL; 2163 ha = NULL;
2147 2164
2165 pci_disable_pcie_error_reporting(pdev);
2166
2148 pci_disable_device(pdev); 2167 pci_disable_device(pdev);
2149 pci_set_drvdata(pdev, NULL); 2168 pci_set_drvdata(pdev, NULL);
2150} 2169}
@@ -2154,6 +2173,24 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2154{ 2173{
2155 struct qla_hw_data *ha = vha->hw; 2174 struct qla_hw_data *ha = vha->hw;
2156 2175
2176 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2177
2178 /* Disable timer */
2179 if (vha->timer_active)
2180 qla2x00_stop_timer(vha);
2181
2182 /* Kill the kernel thread for this host */
2183 if (ha->dpc_thread) {
2184 struct task_struct *t = ha->dpc_thread;
2185
2186 /*
2187 * qla2xxx_wake_dpc checks for ->dpc_thread
2188 * so we need to zero it out.
2189 */
2190 ha->dpc_thread = NULL;
2191 kthread_stop(t);
2192 }
2193
2157 qla25xx_delete_queues(vha); 2194 qla25xx_delete_queues(vha);
2158 2195
2159 if (ha->flags.fce_enabled) 2196 if (ha->flags.fce_enabled)
@@ -2165,6 +2202,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2165 /* Stop currently executing firmware. */ 2202 /* Stop currently executing firmware. */
2166 qla2x00_try_to_stop_firmware(vha); 2203 qla2x00_try_to_stop_firmware(vha);
2167 2204
2205 vha->flags.online = 0;
2206
2168 /* turn-off interrupts on the card */ 2207 /* turn-off interrupts on the card */
2169 if (ha->interrupts_on) 2208 if (ha->interrupts_on)
2170 ha->isp_ops->disable_intrs(ha); 2209 ha->isp_ops->disable_intrs(ha);
@@ -2653,6 +2692,37 @@ qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
2653qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 2692qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
2654qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 2693qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
2655 2694
2695int
2696qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
2697{
2698 struct qla_work_evt *e;
2699
2700 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
2701 if (!e)
2702 return QLA_FUNCTION_FAILED;
2703
2704 e->u.uevent.code = code;
2705 return qla2x00_post_work(vha, e);
2706}
2707
2708static void
2709qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
2710{
2711 char event_string[40];
2712 char *envp[] = { event_string, NULL };
2713
2714 switch (code) {
2715 case QLA_UEVENT_CODE_FW_DUMP:
2716 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
2717 vha->host_no);
2718 break;
2719 default:
2720 /* do nothing */
2721 break;
2722 }
2723 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
2724}
2725
2656void 2726void
2657qla2x00_do_work(struct scsi_qla_host *vha) 2727qla2x00_do_work(struct scsi_qla_host *vha)
2658{ 2728{
@@ -2690,6 +2760,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2690 qla2x00_async_logout_done(vha, e->u.logio.fcport, 2760 qla2x00_async_logout_done(vha, e->u.logio.fcport,
2691 e->u.logio.data); 2761 e->u.logio.data);
2692 break; 2762 break;
2763 case QLA_EVT_UEVENT:
2764 qla2x00_uevent_emit(vha, e->u.uevent.code);
2765 break;
2693 } 2766 }
2694 if (e->flags & QLA_EVT_FLAG_FREE) 2767 if (e->flags & QLA_EVT_FLAG_FREE)
2695 kfree(e); 2768 kfree(e);
@@ -2717,7 +2790,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
2717 2790
2718 fcport->login_retry--; 2791 fcport->login_retry--;
2719 if (fcport->flags & FCF_FABRIC_DEVICE) { 2792 if (fcport->flags & FCF_FABRIC_DEVICE) {
2720 if (fcport->flags & FCF_TAPE_PRESENT) 2793 if (fcport->flags & FCF_FCP2_DEVICE)
2721 ha->isp_ops->fabric_logout(vha, 2794 ha->isp_ops->fabric_logout(vha,
2722 fcport->loop_id, 2795 fcport->loop_id,
2723 fcport->d_id.b.domain, 2796 fcport->d_id.b.domain,
@@ -2805,6 +2878,13 @@ qla2x00_do_dpc(void *data)
2805 if (!base_vha->flags.init_done) 2878 if (!base_vha->flags.init_done)
2806 continue; 2879 continue;
2807 2880
2881 if (ha->flags.eeh_busy) {
2882 DEBUG17(qla_printk(KERN_WARNING, ha,
2883 "qla2x00_do_dpc: dpc_flags: %lx\n",
2884 base_vha->dpc_flags));
2885 continue;
2886 }
2887
2808 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); 2888 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2809 2889
2810 ha->dpc_active = 1; 2890 ha->dpc_active = 1;
@@ -2995,8 +3075,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
2995 int index; 3075 int index;
2996 srb_t *sp; 3076 srb_t *sp;
2997 int t; 3077 int t;
3078 uint16_t w;
2998 struct qla_hw_data *ha = vha->hw; 3079 struct qla_hw_data *ha = vha->hw;
2999 struct req_que *req; 3080 struct req_que *req;
3081
3082 /* Hardware read to raise pending EEH errors during mailbox waits. */
3083 if (!pci_channel_offline(ha->pdev))
3084 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3000 /* 3085 /*
3001 * Ports - Port down timer. 3086 * Ports - Port down timer.
3002 * 3087 *
@@ -3041,7 +3126,10 @@ qla2x00_timer(scsi_qla_host_t *vha)
3041 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3126 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3042 atomic_set(&vha->loop_state, LOOP_DEAD); 3127 atomic_set(&vha->loop_state, LOOP_DEAD);
3043 3128
3044 /* Schedule an ISP abort to return any tape commands. */ 3129 /*
3130 * Schedule an ISP abort to return any FCP2-device
3131 * commands.
3132 */
3045 /* NPIV - scan physical port only */ 3133 /* NPIV - scan physical port only */
3046 if (!vha->vp_idx) { 3134 if (!vha->vp_idx) {
3047 spin_lock_irqsave(&ha->hardware_lock, 3135 spin_lock_irqsave(&ha->hardware_lock,
@@ -3058,7 +3146,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3058 if (sp->ctx) 3146 if (sp->ctx)
3059 continue; 3147 continue;
3060 sfcp = sp->fcport; 3148 sfcp = sp->fcport;
3061 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 3149 if (!(sfcp->flags & FCF_FCP2_DEVICE))
3062 continue; 3150 continue;
3063 3151
3064 set_bit(ISP_ABORT_NEEDED, 3152 set_bit(ISP_ABORT_NEEDED,
@@ -3198,16 +3286,24 @@ qla2x00_release_firmware(void)
3198static pci_ers_result_t 3286static pci_ers_result_t
3199qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 3287qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3200{ 3288{
3201 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 3289 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3290 struct qla_hw_data *ha = vha->hw;
3291
3292 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3293 state));
3202 3294
3203 switch (state) { 3295 switch (state) {
3204 case pci_channel_io_normal: 3296 case pci_channel_io_normal:
3297 ha->flags.eeh_busy = 0;
3205 return PCI_ERS_RESULT_CAN_RECOVER; 3298 return PCI_ERS_RESULT_CAN_RECOVER;
3206 case pci_channel_io_frozen: 3299 case pci_channel_io_frozen:
3300 ha->flags.eeh_busy = 1;
3301 qla2x00_free_irqs(vha);
3207 pci_disable_device(pdev); 3302 pci_disable_device(pdev);
3208 return PCI_ERS_RESULT_NEED_RESET; 3303 return PCI_ERS_RESULT_NEED_RESET;
3209 case pci_channel_io_perm_failure: 3304 case pci_channel_io_perm_failure:
3210 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3305 ha->flags.pci_channel_io_perm_failure = 1;
3306 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3211 return PCI_ERS_RESULT_DISCONNECT; 3307 return PCI_ERS_RESULT_DISCONNECT;
3212 } 3308 }
3213 return PCI_ERS_RESULT_NEED_RESET; 3309 return PCI_ERS_RESULT_NEED_RESET;
@@ -3256,7 +3352,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3256 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 3352 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
3257 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 3353 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3258 struct qla_hw_data *ha = base_vha->hw; 3354 struct qla_hw_data *ha = base_vha->hw;
3259 int rc; 3355 struct rsp_que *rsp;
3356 int rc, retries = 10;
3357
3358 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3359
3360 /* Workaround: qla2xxx driver which access hardware earlier
3361 * needs error state to be pci_channel_io_online.
3362 * Otherwise mailbox command timesout.
3363 */
3364 pdev->error_state = pci_channel_io_normal;
3365
3366 pci_restore_state(pdev);
3367
3368 /* pci_restore_state() clears the saved_state flag of the device
3369 * save restored state which resets saved_state flag
3370 */
3371 pci_save_state(pdev);
3260 3372
3261 if (ha->mem_only) 3373 if (ha->mem_only)
3262 rc = pci_enable_device_mem(pdev); 3374 rc = pci_enable_device_mem(pdev);
@@ -3266,19 +3378,29 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3266 if (rc) { 3378 if (rc) {
3267 qla_printk(KERN_WARNING, ha, 3379 qla_printk(KERN_WARNING, ha,
3268 "Can't re-enable PCI device after reset.\n"); 3380 "Can't re-enable PCI device after reset.\n");
3269
3270 return ret; 3381 return ret;
3271 } 3382 }
3272 pci_set_master(pdev); 3383
3384 rsp = ha->rsp_q_map[0];
3385 if (qla2x00_request_irqs(ha, rsp))
3386 return ret;
3273 3387
3274 if (ha->isp_ops->pci_config(base_vha)) 3388 if (ha->isp_ops->pci_config(base_vha))
3275 return ret; 3389 return ret;
3276 3390
3391 while (ha->flags.mbox_busy && retries--)
3392 msleep(1000);
3393
3277 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3394 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3278 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) 3395 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
3279 ret = PCI_ERS_RESULT_RECOVERED; 3396 ret = PCI_ERS_RESULT_RECOVERED;
3280 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3397 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3281 3398
3399 pci_cleanup_aer_uncorrect_error_status(pdev);
3400
3401 DEBUG17(qla_printk(KERN_WARNING, ha,
3402 "slot_reset-return:ret=%x\n", ret));
3403
3282 return ret; 3404 return ret;
3283} 3405}
3284 3406
@@ -3289,13 +3411,16 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
3289 struct qla_hw_data *ha = base_vha->hw; 3411 struct qla_hw_data *ha = base_vha->hw;
3290 int ret; 3412 int ret;
3291 3413
3414 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
3415
3292 ret = qla2x00_wait_for_hba_online(base_vha); 3416 ret = qla2x00_wait_for_hba_online(base_vha);
3293 if (ret != QLA_SUCCESS) { 3417 if (ret != QLA_SUCCESS) {
3294 qla_printk(KERN_ERR, ha, 3418 qla_printk(KERN_ERR, ha,
3295 "the device failed to resume I/O " 3419 "the device failed to resume I/O "
3296 "from slot/link_reset"); 3420 "from slot/link_reset");
3297 } 3421 }
3298 pci_cleanup_aer_uncorrect_error_status(pdev); 3422
3423 ha->flags.eeh_busy = 0;
3299} 3424}
3300 3425
3301static struct pci_error_handlers qla2xxx_err_handler = { 3426static struct pci_error_handlers qla2xxx_err_handler = {
@@ -3408,4 +3533,3 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
3408MODULE_FIRMWARE(FW_FILE_ISP2322); 3533MODULE_FIRMWARE(FW_FILE_ISP2322);
3409MODULE_FIRMWARE(FW_FILE_ISP24XX); 3534MODULE_FIRMWARE(FW_FILE_ISP24XX);
3410MODULE_FIRMWARE(FW_FILE_ISP25XX); 3535MODULE_FIRMWARE(FW_FILE_ISP25XX);
3411MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 010e69b29afe..8b3de4e54c28 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -7,6 +7,7 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/slab.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
11#include <asm/uaccess.h> 12#include <asm/uaccess.h>
12 13
@@ -2292,11 +2293,14 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2292 uint32_t faddr, left, burst; 2293 uint32_t faddr, left, burst;
2293 struct qla_hw_data *ha = vha->hw; 2294 struct qla_hw_data *ha = vha->hw;
2294 2295
2296 if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
2297 goto try_fast;
2295 if (offset & 0xfff) 2298 if (offset & 0xfff)
2296 goto slow_read; 2299 goto slow_read;
2297 if (length < OPTROM_BURST_SIZE) 2300 if (length < OPTROM_BURST_SIZE)
2298 goto slow_read; 2301 goto slow_read;
2299 2302
2303try_fast:
2300 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2304 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
2301 &optrom_dma, GFP_KERNEL); 2305 &optrom_dma, GFP_KERNEL);
2302 if (!optrom) { 2306 if (!optrom) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ac107a2c34a4..109068df933f 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k6" 10#define QLA2XXX_VERSION "8.03.02-k2"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 1 14#define QLA_DRIVER_PATCH_VER 2
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index af8c3233e8ae..92329a461c68 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -844,10 +844,10 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
844 DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, 844 DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
845 __func__)); 845 __func__));
846 if (ql4xxx_lock_flash(ha) != QLA_SUCCESS) 846 if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
847 return (QLA_ERROR); 847 return QLA_ERROR;
848 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) { 848 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
849 ql4xxx_unlock_flash(ha); 849 ql4xxx_unlock_flash(ha);
850 return (QLA_ERROR); 850 return QLA_ERROR;
851 } 851 }
852 852
853 /* Get EEPRom Parameters from NVRAM and validate */ 853 /* Get EEPRom Parameters from NVRAM and validate */
@@ -858,20 +858,18 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
858 rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); 858 rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
859 spin_unlock_irqrestore(&ha->hardware_lock, flags); 859 spin_unlock_irqrestore(&ha->hardware_lock, flags);
860 } else { 860 } else {
861 /*
862 * QLogic adapters should always have a valid NVRAM.
863 * If not valid, do not load.
864 */
865 dev_warn(&ha->pdev->dev, 861 dev_warn(&ha->pdev->dev,
866 "scsi%ld: %s: EEProm checksum invalid. " 862 "scsi%ld: %s: EEProm checksum invalid. "
867 "Please update your EEPROM\n", ha->host_no, 863 "Please update your EEPROM\n", ha->host_no,
868 __func__); 864 __func__);
869 865
870 /* set defaults */ 866 /* Attempt to set defaults */
871 if (is_qla4010(ha)) 867 if (is_qla4010(ha))
872 extHwConfig.Asuint32_t = 0x1912; 868 extHwConfig.Asuint32_t = 0x1912;
873 else if (is_qla4022(ha) | is_qla4032(ha)) 869 else if (is_qla4022(ha) | is_qla4032(ha))
874 extHwConfig.Asuint32_t = 0x0023; 870 extHwConfig.Asuint32_t = 0x0023;
871 else
872 return QLA_ERROR;
875 } 873 }
876 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", 874 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
877 ha->host_no, __func__, extHwConfig.Asuint32_t)); 875 ha->host_no, __func__, extHwConfig.Asuint32_t));
@@ -884,7 +882,7 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
884 ql4xxx_unlock_nvram(ha); 882 ql4xxx_unlock_nvram(ha);
885 ql4xxx_unlock_flash(ha); 883 ql4xxx_unlock_flash(ha);
886 884
887 return (QLA_SUCCESS); 885 return QLA_SUCCESS;
888} 886}
889 887
890static void qla4x00_pci_config(struct scsi_qla_host *ha) 888static void qla4x00_pci_config(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 09d6d4b76f39..caeb7d10ae04 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
467 if (conn_err_detail) 467 if (conn_err_detail)
468 *conn_err_detail = mbox_sts[5]; 468 *conn_err_detail = mbox_sts[5];
469 if (tcp_source_port_num) 469 if (tcp_source_port_num)
470 *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16; 470 *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
471 if (connection_id) 471 if (connection_id)
472 *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; 472 *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
473 status = QLA_SUCCESS; 473 status = QLA_SUCCESS;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 83c8b5e4fc8b..2ccad36bee9f 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7#include <linux/moduleparam.h> 7#include <linux/moduleparam.h>
8#include <linux/slab.h>
8 9
9#include <scsi/scsi_tcq.h> 10#include <scsi/scsi_tcq.h>
10#include <scsi/scsicam.h> 11#include <scsi/scsicam.h>
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index fa34b92850a6..aa406497eebc 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -16,7 +16,7 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/slab.h> 19#include <linux/gfp.h>
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/stat.h> 22#include <linux/stat.h>
@@ -738,7 +738,7 @@ static int __devinit qpti_register_irq(struct qlogicpti *qpti)
738 * sanely maintain. 738 * sanely maintain.
739 */ 739 */
740 if (request_irq(qpti->irq, qpti_intr, 740 if (request_irq(qpti->irq, qpti_intr,
741 IRQF_SHARED, "Qlogic/PTI", qpti)) 741 IRQF_SHARED, "QlogicPTI", qpti))
742 goto fail; 742 goto fail;
743 743
744 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq); 744 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index 9c053bbaa877..e3c74d1ee2db 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -43,7 +43,7 @@
43 * determined for each queue request anew. 43 * determined for each queue request anew.
44 */ 44 */
45#define QLOGICPTI_REQ_QUEUE_LEN 255 /* must be power of two - 1 */ 45#define QLOGICPTI_REQ_QUEUE_LEN 255 /* must be power of two - 1 */
46#define QLOGICPTI_MAX_SG(ql) (4 + ((ql) > 0) ? 7*((ql) - 1) : 0) 46#define QLOGICPTI_MAX_SG(ql) (4 + (((ql) > 0) ? 7*((ql) - 1) : 0))
47 47
48/* mailbox command complete status codes */ 48/* mailbox command complete status codes */
49#define MBOX_COMMAND_COMPLETE 0x4000 49#define MBOX_COMMAND_COMPLETE 0x4000
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 8e5c169b03fb..2c146b44d95f 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -63,6 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
63 * emulated RAID devices, so start with SCSI */ 63 * emulated RAID devices, so start with SCSI */
64 struct raid_internal *i = ac_to_raid_internal(cont); 64 struct raid_internal *i = ac_to_raid_internal(cont);
65 65
66#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
66 if (scsi_is_sdev_device(dev)) { 67 if (scsi_is_sdev_device(dev)) {
67 struct scsi_device *sdev = to_scsi_device(dev); 68 struct scsi_device *sdev = to_scsi_device(dev);
68 69
@@ -71,6 +72,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
71 72
72 return i->f->is_raid(dev); 73 return i->f->is_raid(dev);
73 } 74 }
75#endif
74 /* FIXME: look at other subsystems too */ 76 /* FIXME: look at other subsystems too */
75 return 0; 77 return 0;
76} 78}
@@ -149,6 +151,7 @@ static struct {
149 { RAID_LEVEL_0, "raid0" }, 151 { RAID_LEVEL_0, "raid0" },
150 { RAID_LEVEL_1, "raid1" }, 152 { RAID_LEVEL_1, "raid1" },
151 { RAID_LEVEL_10, "raid10" }, 153 { RAID_LEVEL_10, "raid10" },
154 { RAID_LEVEL_1E, "raid1e" },
152 { RAID_LEVEL_3, "raid3" }, 155 { RAID_LEVEL_3, "raid3" },
153 { RAID_LEVEL_4, "raid4" }, 156 { RAID_LEVEL_4, "raid4" },
154 { RAID_LEVEL_5, "raid5" }, 157 { RAID_LEVEL_5, "raid5" },
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index dd098cad337b..1c08f6164658 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -940,10 +940,16 @@ EXPORT_SYMBOL(scsi_adjust_queue_depth);
940 */ 940 */
941int scsi_track_queue_full(struct scsi_device *sdev, int depth) 941int scsi_track_queue_full(struct scsi_device *sdev, int depth)
942{ 942{
943 if ((jiffies >> 4) == sdev->last_queue_full_time) 943
944 /*
945 * Don't let QUEUE_FULLs on the same
946 * jiffies count, they could all be from
947 * same event.
948 */
949 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
944 return 0; 950 return 0;
945 951
946 sdev->last_queue_full_time = (jiffies >> 4); 952 sdev->last_queue_full_time = jiffies;
947 if (sdev->last_queue_full_depth != depth) { 953 if (sdev->last_queue_full_depth != depth) {
948 sdev->last_queue_full_count = 1; 954 sdev->last_queue_full_count = 1;
949 sdev->last_queue_full_depth = depth; 955 sdev->last_queue_full_depth = depth;
@@ -1012,6 +1018,8 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
1012 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device 1018 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
1013 * @sdev: The device to ask 1019 * @sdev: The device to ask
1014 * @page: Which Vital Product Data to return 1020 * @page: Which Vital Product Data to return
1021 * @buf: where to store the VPD
1022 * @buf_len: number of bytes in the VPD buffer area
1015 * 1023 *
1016 * SCSI devices may optionally supply Vital Product Data. Each 'page' 1024 * SCSI devices may optionally supply Vital Product Data. Each 'page'
1017 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). 1025 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
@@ -1020,55 +1028,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
1020 * responsible for calling kfree() on this pointer when it is no longer 1028 * responsible for calling kfree() on this pointer when it is no longer
1021 * needed. If we cannot retrieve the VPD page this routine returns %NULL. 1029 * needed. If we cannot retrieve the VPD page this routine returns %NULL.
1022 */ 1030 */
1023unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page) 1031int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1032 int buf_len)
1024{ 1033{
1025 int i, result; 1034 int i, result;
1026 unsigned int len;
1027 const unsigned int init_vpd_len = 255;
1028 unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL);
1029
1030 if (!buf)
1031 return NULL;
1032 1035
1033 /* Ask for all the pages supported by this device */ 1036 /* Ask for all the pages supported by this device */
1034 result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len); 1037 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
1035 if (result) 1038 if (result)
1036 goto fail; 1039 goto fail;
1037 1040
1038 /* If the user actually wanted this page, we can skip the rest */ 1041 /* If the user actually wanted this page, we can skip the rest */
1039 if (page == 0) 1042 if (page == 0)
1040 return buf; 1043 return -EINVAL;
1041 1044
1042 for (i = 0; i < buf[3]; i++) 1045 for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
1043 if (buf[i + 4] == page) 1046 if (buf[i + 4] == page)
1044 goto found; 1047 goto found;
1048
1049 if (i < buf[3] && i > buf_len)
1050 /* ran off the end of the buffer, give us benefit of doubt */
1051 goto found;
1045 /* The device claims it doesn't support the requested page */ 1052 /* The device claims it doesn't support the requested page */
1046 goto fail; 1053 goto fail;
1047 1054
1048 found: 1055 found:
1049 result = scsi_vpd_inquiry(sdev, buf, page, 255); 1056 result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
1050 if (result)
1051 goto fail;
1052
1053 /*
1054 * Some pages are longer than 255 bytes. The actual length of
1055 * the page is returned in the header.
1056 */
1057 len = ((buf[2] << 8) | buf[3]) + 4;
1058 if (len <= init_vpd_len)
1059 return buf;
1060
1061 kfree(buf);
1062 buf = kmalloc(len, GFP_KERNEL);
1063 result = scsi_vpd_inquiry(sdev, buf, page, len);
1064 if (result) 1057 if (result)
1065 goto fail; 1058 goto fail;
1066 1059
1067 return buf; 1060 return 0;
1068 1061
1069 fail: 1062 fail:
1070 kfree(buf); 1063 return -EINVAL;
1071 return NULL;
1072} 1064}
1073EXPORT_SYMBOL_GPL(scsi_get_vpd_page); 1065EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
1074 1066
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c4103bef41b5..3a5bfd10b2cb 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -30,6 +30,7 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/timer.h> 32#include <linux/timer.h>
33#include <linux/slab.h>
33#include <linux/types.h> 34#include <linux/types.h>
34#include <linux/string.h> 35#include <linux/string.h>
35#include <linux/genhd.h> 36#include <linux/genhd.h>
@@ -44,6 +45,8 @@
44 45
45#include <net/checksum.h> 46#include <net/checksum.h>
46 47
48#include <asm/unaligned.h>
49
47#include <scsi/scsi.h> 50#include <scsi/scsi.h>
48#include <scsi/scsi_cmnd.h> 51#include <scsi/scsi_cmnd.h>
49#include <scsi/scsi_device.h> 52#include <scsi/scsi_device.h>
@@ -105,6 +108,10 @@ static const char * scsi_debug_version_date = "20070104";
105#define DEF_ATO 1 108#define DEF_ATO 1
106#define DEF_PHYSBLK_EXP 0 109#define DEF_PHYSBLK_EXP 0
107#define DEF_LOWEST_ALIGNED 0 110#define DEF_LOWEST_ALIGNED 0
111#define DEF_UNMAP_MAX_BLOCKS 0
112#define DEF_UNMAP_MAX_DESC 0
113#define DEF_UNMAP_GRANULARITY 0
114#define DEF_UNMAP_ALIGNMENT 0
108 115
109/* bit mask values for scsi_debug_opts */ 116/* bit mask values for scsi_debug_opts */
110#define SCSI_DEBUG_OPT_NOISE 1 117#define SCSI_DEBUG_OPT_NOISE 1
@@ -162,6 +169,10 @@ static int scsi_debug_guard = DEF_GUARD;
162static int scsi_debug_ato = DEF_ATO; 169static int scsi_debug_ato = DEF_ATO;
163static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; 170static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
164static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; 171static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
172static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
173static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
174static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
175static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
165 176
166static int scsi_debug_cmnd_count = 0; 177static int scsi_debug_cmnd_count = 0;
167 178
@@ -223,7 +234,9 @@ static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
223 234
224static unsigned char * fake_storep; /* ramdisk storage */ 235static unsigned char * fake_storep; /* ramdisk storage */
225static unsigned char *dif_storep; /* protection info */ 236static unsigned char *dif_storep; /* protection info */
237static void *map_storep; /* provisioning map */
226 238
239static unsigned long map_size;
227static int num_aborts = 0; 240static int num_aborts = 0;
228static int num_dev_resets = 0; 241static int num_dev_resets = 0;
229static int num_bus_resets = 0; 242static int num_bus_resets = 0;
@@ -317,6 +330,7 @@ static void get_data_transfer_info(unsigned char *cmd,
317 (u32)cmd[28] << 24; 330 (u32)cmd[28] << 24;
318 break; 331 break;
319 332
333 case WRITE_SAME_16:
320 case WRITE_16: 334 case WRITE_16:
321 case READ_16: 335 case READ_16:
322 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | 336 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
@@ -335,6 +349,7 @@ static void get_data_transfer_info(unsigned char *cmd,
335 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 | 349 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
336 (u32)cmd[6] << 24; 350 (u32)cmd[6] << 24;
337 break; 351 break;
352 case WRITE_SAME:
338 case WRITE_10: 353 case WRITE_10:
339 case READ_10: 354 case READ_10:
340 case XDWRITEREAD_10: 355 case XDWRITEREAD_10:
@@ -671,10 +686,12 @@ static int inquiry_evpd_89(unsigned char * arr)
671} 686}
672 687
673 688
689/* Block limits VPD page (SBC-3) */
674static unsigned char vpdb0_data[] = { 690static unsigned char vpdb0_data[] = {
675 /* from 4th byte */ 0,0,0,4, 691 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
676 0,0,0x4,0, 692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
677 0,0,0,64, 693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
678}; 695};
679 696
680static int inquiry_evpd_b0(unsigned char * arr) 697static int inquiry_evpd_b0(unsigned char * arr)
@@ -691,14 +708,40 @@ static int inquiry_evpd_b0(unsigned char * arr)
691 arr[6] = (sdebug_store_sectors >> 8) & 0xff; 708 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
692 arr[7] = sdebug_store_sectors & 0xff; 709 arr[7] = sdebug_store_sectors & 0xff;
693 } 710 }
711
712 if (scsi_debug_unmap_max_desc) {
713 unsigned int blocks;
714
715 if (scsi_debug_unmap_max_blocks)
716 blocks = scsi_debug_unmap_max_blocks;
717 else
718 blocks = 0xffffffff;
719
720 put_unaligned_be32(blocks, &arr[16]);
721 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
722 }
723
724 if (scsi_debug_unmap_alignment) {
725 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
726 arr[28] |= 0x80; /* UGAVALID */
727 }
728
729 if (scsi_debug_unmap_granularity) {
730 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
731 return 0x3c; /* Mandatory page length for thin provisioning */
732 }
733
694 return sizeof(vpdb0_data); 734 return sizeof(vpdb0_data);
695} 735}
696 736
737/* Block device characteristics VPD page (SBC-3) */
697static int inquiry_evpd_b1(unsigned char *arr) 738static int inquiry_evpd_b1(unsigned char *arr)
698{ 739{
699 memset(arr, 0, 0x3c); 740 memset(arr, 0, 0x3c);
700 arr[0] = 0; 741 arr[0] = 0;
701 arr[1] = 1; 742 arr[1] = 1; /* non rotating medium (e.g. solid state) */
743 arr[2] = 0;
744 arr[3] = 5; /* less than 1.8" */
702 745
703 return 0x3c; 746 return 0x3c;
704} 747}
@@ -914,7 +957,8 @@ static int resp_start_stop(struct scsi_cmnd * scp,
914static sector_t get_sdebug_capacity(void) 957static sector_t get_sdebug_capacity(void)
915{ 958{
916 if (scsi_debug_virtual_gb > 0) 959 if (scsi_debug_virtual_gb > 0)
917 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb; 960 return (sector_t)scsi_debug_virtual_gb *
961 (1073741824 / scsi_debug_sector_size);
918 else 962 else
919 return sdebug_store_sectors; 963 return sdebug_store_sectors;
920} 964}
@@ -974,6 +1018,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
974 arr[11] = scsi_debug_sector_size & 0xff; 1018 arr[11] = scsi_debug_sector_size & 0xff;
975 arr[13] = scsi_debug_physblk_exp & 0xf; 1019 arr[13] = scsi_debug_physblk_exp & 0xf;
976 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; 1020 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1021
1022 if (scsi_debug_unmap_granularity)
1023 arr[14] |= 0x80; /* TPE */
1024
977 arr[15] = scsi_debug_lowest_aligned & 0xff; 1025 arr[15] = scsi_debug_lowest_aligned & 0xff;
978 1026
979 if (scsi_debug_dif) { 1027 if (scsi_debug_dif) {
@@ -1887,6 +1935,70 @@ out:
1887 return ret; 1935 return ret;
1888} 1936}
1889 1937
1938static unsigned int map_state(sector_t lba, unsigned int *num)
1939{
1940 unsigned int granularity, alignment, mapped;
1941 sector_t block, next, end;
1942
1943 granularity = scsi_debug_unmap_granularity;
1944 alignment = granularity - scsi_debug_unmap_alignment;
1945 block = lba + alignment;
1946 do_div(block, granularity);
1947
1948 mapped = test_bit(block, map_storep);
1949
1950 if (mapped)
1951 next = find_next_zero_bit(map_storep, map_size, block);
1952 else
1953 next = find_next_bit(map_storep, map_size, block);
1954
1955 end = next * granularity - scsi_debug_unmap_alignment;
1956 *num = end - lba;
1957
1958 return mapped;
1959}
1960
1961static void map_region(sector_t lba, unsigned int len)
1962{
1963 unsigned int granularity, alignment;
1964 sector_t end = lba + len;
1965
1966 granularity = scsi_debug_unmap_granularity;
1967 alignment = granularity - scsi_debug_unmap_alignment;
1968
1969 while (lba < end) {
1970 sector_t block, rem;
1971
1972 block = lba + alignment;
1973 rem = do_div(block, granularity);
1974
1975 set_bit(block, map_storep);
1976
1977 lba += granularity - rem;
1978 }
1979}
1980
1981static void unmap_region(sector_t lba, unsigned int len)
1982{
1983 unsigned int granularity, alignment;
1984 sector_t end = lba + len;
1985
1986 granularity = scsi_debug_unmap_granularity;
1987 alignment = granularity - scsi_debug_unmap_alignment;
1988
1989 while (lba < end) {
1990 sector_t block, rem;
1991
1992 block = lba + alignment;
1993 rem = do_div(block, granularity);
1994
1995 if (rem == 0 && lba + granularity <= end)
1996 clear_bit(block, map_storep);
1997
1998 lba += granularity - rem;
1999 }
2000}
2001
1890static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, 2002static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1891 unsigned int num, struct sdebug_dev_info *devip, 2003 unsigned int num, struct sdebug_dev_info *devip,
1892 u32 ei_lba) 2004 u32 ei_lba)
@@ -1910,6 +2022,8 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1910 2022
1911 write_lock_irqsave(&atomic_rw, iflags); 2023 write_lock_irqsave(&atomic_rw, iflags);
1912 ret = do_device_access(SCpnt, devip, lba, num, 1); 2024 ret = do_device_access(SCpnt, devip, lba, num, 1);
2025 if (scsi_debug_unmap_granularity)
2026 map_region(lba, num);
1913 write_unlock_irqrestore(&atomic_rw, iflags); 2027 write_unlock_irqrestore(&atomic_rw, iflags);
1914 if (-1 == ret) 2028 if (-1 == ret)
1915 return (DID_ERROR << 16); 2029 return (DID_ERROR << 16);
@@ -1917,9 +2031,143 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1917 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 2031 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1918 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " 2032 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
1919 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); 2033 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2034
2035 return 0;
2036}
2037
2038static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2039 unsigned int num, struct sdebug_dev_info *devip,
2040 u32 ei_lba, unsigned int unmap)
2041{
2042 unsigned long iflags;
2043 unsigned long long i;
2044 int ret;
2045
2046 ret = check_device_access_params(devip, lba, num);
2047 if (ret)
2048 return ret;
2049
2050 write_lock_irqsave(&atomic_rw, iflags);
2051
2052 if (unmap && scsi_debug_unmap_granularity) {
2053 unmap_region(lba, num);
2054 goto out;
2055 }
2056
2057 /* Else fetch one logical block */
2058 ret = fetch_to_dev_buffer(scmd,
2059 fake_storep + (lba * scsi_debug_sector_size),
2060 scsi_debug_sector_size);
2061
2062 if (-1 == ret) {
2063 write_unlock_irqrestore(&atomic_rw, iflags);
2064 return (DID_ERROR << 16);
2065 } else if ((ret < (num * scsi_debug_sector_size)) &&
2066 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2067 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2068 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2069
2070 /* Copy first sector to remaining blocks */
2071 for (i = 1 ; i < num ; i++)
2072 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2073 fake_storep + (lba * scsi_debug_sector_size),
2074 scsi_debug_sector_size);
2075
2076 if (scsi_debug_unmap_granularity)
2077 map_region(lba, num);
2078out:
2079 write_unlock_irqrestore(&atomic_rw, iflags);
2080
1920 return 0; 2081 return 0;
1921} 2082}
1922 2083
2084struct unmap_block_desc {
2085 __be64 lba;
2086 __be32 blocks;
2087 __be32 __reserved;
2088};
2089
2090static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2091{
2092 unsigned char *buf;
2093 struct unmap_block_desc *desc;
2094 unsigned int i, payload_len, descriptors;
2095 int ret;
2096
2097 ret = check_readiness(scmd, 1, devip);
2098 if (ret)
2099 return ret;
2100
2101 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2102 BUG_ON(scsi_bufflen(scmd) != payload_len);
2103
2104 descriptors = (payload_len - 8) / 16;
2105
2106 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2107 if (!buf)
2108 return check_condition_result;
2109
2110 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2111
2112 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2113 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2114
2115 desc = (void *)&buf[8];
2116
2117 for (i = 0 ; i < descriptors ; i++) {
2118 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2119 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2120
2121 ret = check_device_access_params(devip, lba, num);
2122 if (ret)
2123 goto out;
2124
2125 unmap_region(lba, num);
2126 }
2127
2128 ret = 0;
2129
2130out:
2131 kfree(buf);
2132
2133 return ret;
2134}
2135
2136#define SDEBUG_GET_LBA_STATUS_LEN 32
2137
2138static int resp_get_lba_status(struct scsi_cmnd * scmd,
2139 struct sdebug_dev_info * devip)
2140{
2141 unsigned long long lba;
2142 unsigned int alloc_len, mapped, num;
2143 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2144 int ret;
2145
2146 ret = check_readiness(scmd, 1, devip);
2147 if (ret)
2148 return ret;
2149
2150 lba = get_unaligned_be64(&scmd->cmnd[2]);
2151 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2152
2153 if (alloc_len < 24)
2154 return 0;
2155
2156 ret = check_device_access_params(devip, lba, 1);
2157 if (ret)
2158 return ret;
2159
2160 mapped = map_state(lba, &num);
2161
2162 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2163 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2164 put_unaligned_be64(lba, &arr[8]); /* LBA */
2165 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2166 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2167
2168 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2169}
2170
1923#define SDEBUG_RLUN_ARR_SZ 256 2171#define SDEBUG_RLUN_ARR_SZ 256
1924 2172
1925static int resp_report_luns(struct scsi_cmnd * scp, 2173static int resp_report_luns(struct scsi_cmnd * scp,
@@ -2430,6 +2678,10 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2430module_param_named(ato, scsi_debug_ato, int, S_IRUGO); 2678module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2431module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); 2679module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2432module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); 2680module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2681module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2682module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2683module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2684module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2433 2685
2434MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2686MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2435MODULE_DESCRIPTION("SCSI debug adapter driver"); 2687MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2458,6 +2710,10 @@ MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2458MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2710MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2459MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); 2711MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2460MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 2712MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2713MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)");
2714MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)");
2715MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)");
2716MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2461 2717
2462static char sdebug_info[256]; 2718static char sdebug_info[256];
2463 2719
@@ -2816,6 +3072,23 @@ static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
2816} 3072}
2817DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL); 3073DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
2818 3074
3075static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3076{
3077 ssize_t count;
3078
3079 if (scsi_debug_unmap_granularity == 0)
3080 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3081 sdebug_store_sectors);
3082
3083 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3084
3085 buf[count++] = '\n';
3086 buf[count++] = 0;
3087
3088 return count;
3089}
3090DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3091
2819 3092
2820/* Note: The following function creates attribute files in the 3093/* Note: The following function creates attribute files in the
2821 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 3094 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -2847,11 +3120,13 @@ static int do_create_driverfs_files(void)
2847 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif); 3120 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
2848 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard); 3121 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
2849 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato); 3122 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3123 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
2850 return ret; 3124 return ret;
2851} 3125}
2852 3126
2853static void do_remove_driverfs_files(void) 3127static void do_remove_driverfs_files(void)
2854{ 3128{
3129 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
2855 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato); 3130 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
2856 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard); 3131 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
2857 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif); 3132 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
@@ -2989,6 +3264,36 @@ static int __init scsi_debug_init(void)
2989 memset(dif_storep, 0xff, dif_size); 3264 memset(dif_storep, 0xff, dif_size);
2990 } 3265 }
2991 3266
3267 if (scsi_debug_unmap_granularity) {
3268 unsigned int map_bytes;
3269
3270 if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3271 printk(KERN_ERR
3272 "%s: ERR: unmap_granularity < unmap_alignment\n",
3273 __func__);
3274 return -EINVAL;
3275 }
3276
3277 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3278 map_bytes = map_size >> 3;
3279 map_storep = vmalloc(map_bytes);
3280
3281 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3282 map_size);
3283
3284 if (map_storep == NULL) {
3285 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3286 ret = -ENOMEM;
3287 goto free_vm;
3288 }
3289
3290 memset(map_storep, 0x0, map_bytes);
3291
3292 /* Map first 1KB for partition table */
3293 if (scsi_debug_num_parts)
3294 map_region(0, 2);
3295 }
3296
2992 ret = device_register(&pseudo_primary); 3297 ret = device_register(&pseudo_primary);
2993 if (ret < 0) { 3298 if (ret < 0) {
2994 printk(KERN_WARNING "scsi_debug: device_register error: %d\n", 3299 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
@@ -3041,6 +3346,8 @@ bus_unreg:
3041dev_unreg: 3346dev_unreg:
3042 device_unregister(&pseudo_primary); 3347 device_unregister(&pseudo_primary);
3043free_vm: 3348free_vm:
3349 if (map_storep)
3350 vfree(map_storep);
3044 if (dif_storep) 3351 if (dif_storep)
3045 vfree(dif_storep); 3352 vfree(dif_storep);
3046 vfree(fake_storep); 3353 vfree(fake_storep);
@@ -3167,6 +3474,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3167 int inj_dif = 0; 3474 int inj_dif = 0;
3168 int inj_dix = 0; 3475 int inj_dix = 0;
3169 int delay_override = 0; 3476 int delay_override = 0;
3477 int unmap = 0;
3170 3478
3171 scsi_set_resid(SCpnt, 0); 3479 scsi_set_resid(SCpnt, 0);
3172 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { 3480 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
@@ -3272,13 +3580,21 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3272 errsts = resp_readcap(SCpnt, devip); 3580 errsts = resp_readcap(SCpnt, devip);
3273 break; 3581 break;
3274 case SERVICE_ACTION_IN: 3582 case SERVICE_ACTION_IN:
3275 if (SAI_READ_CAPACITY_16 != cmd[1]) { 3583 if (cmd[1] == SAI_READ_CAPACITY_16)
3584 errsts = resp_readcap16(SCpnt, devip);
3585 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3586
3587 if (scsi_debug_unmap_max_desc == 0) {
3588 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3589 INVALID_COMMAND_OPCODE, 0);
3590 errsts = check_condition_result;
3591 } else
3592 errsts = resp_get_lba_status(SCpnt, devip);
3593 } else {
3276 mk_sense_buffer(devip, ILLEGAL_REQUEST, 3594 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3277 INVALID_OPCODE, 0); 3595 INVALID_OPCODE, 0);
3278 errsts = check_condition_result; 3596 errsts = check_condition_result;
3279 break;
3280 } 3597 }
3281 errsts = resp_readcap16(SCpnt, devip);
3282 break; 3598 break;
3283 case MAINTENANCE_IN: 3599 case MAINTENANCE_IN:
3284 if (MI_REPORT_TARGET_PGS != cmd[1]) { 3600 if (MI_REPORT_TARGET_PGS != cmd[1]) {
@@ -3378,6 +3694,29 @@ write:
3378 errsts = illegal_condition_result; 3694 errsts = illegal_condition_result;
3379 } 3695 }
3380 break; 3696 break;
3697 case WRITE_SAME_16:
3698 if (cmd[1] & 0x8)
3699 unmap = 1;
3700 /* fall through */
3701 case WRITE_SAME:
3702 errsts = check_readiness(SCpnt, 0, devip);
3703 if (errsts)
3704 break;
3705 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3706 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3707 break;
3708 case UNMAP:
3709 errsts = check_readiness(SCpnt, 0, devip);
3710 if (errsts)
3711 break;
3712
3713 if (scsi_debug_unmap_max_desc == 0) {
3714 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3715 INVALID_COMMAND_OPCODE, 0);
3716 errsts = check_condition_result;
3717 } else
3718 errsts = resp_unmap(SCpnt, devip);
3719 break;
3381 case MODE_SENSE: 3720 case MODE_SENSE:
3382 case MODE_SENSE_10: 3721 case MODE_SENSE_10:
3383 errsts = resp_mode_sense(SCpnt, target, devip); 3722 errsts = resp_mode_sense(SCpnt, target, devip);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 93c2622cb969..43fad4c09beb 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -6,6 +6,7 @@
6#include <linux/moduleparam.h> 6#include <linux/moduleparam.h>
7#include <linux/proc_fs.h> 7#include <linux/proc_fs.h>
8#include <linux/seq_file.h> 8#include <linux/seq_file.h>
9#include <linux/slab.h>
9 10
10#include <scsi/scsi_device.h> 11#include <scsi/scsi_device.h>
11#include <scsi/scsi_devinfo.h> 12#include <scsi/scsi_devinfo.h>
@@ -168,11 +169,10 @@ static struct {
168 {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, 169 {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
169 {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36}, 170 {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36},
170 {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36}, 171 {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
171 {"HITACHI", "DF400", "*", BLIST_SPARSELUN}, 172 {"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
172 {"HITACHI", "DF500", "*", BLIST_SPARSELUN}, 173 {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
173 {"HITACHI", "DF600", "*", BLIST_SPARSELUN}, 174 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
174 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, 175 {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
175 {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
176 {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 176 {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
177 {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 177 {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
178 {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 178 {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -454,7 +454,7 @@ int scsi_get_device_flags(struct scsi_device *sdev,
454 454
455 455
456/** 456/**
457 * get_device_flags_keyed - get device specific flags from the dynamic device list. 457 * scsi_get_device_flags_keyed - get device specific flags from the dynamic device list
458 * @sdev: &scsi_device to get flags for 458 * @sdev: &scsi_device to get flags for
459 * @vendor: vendor name 459 * @vendor: vendor name
460 * @model: model name 460 * @model: model name
@@ -685,7 +685,7 @@ MODULE_PARM_DESC(default_dev_flags,
685 "scsi default device flag integer value"); 685 "scsi default device flag integer value");
686 686
687/** 687/**
688 * scsi_dev_info_list_delete - called from scsi.c:exit_scsi to remove the scsi_dev_info_list. 688 * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list
689 **/ 689 **/
690void scsi_exit_devinfo(void) 690void scsi_exit_devinfo(void)
691{ 691{
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1b0060b791e8..7ad53fa42766 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/gfp.h>
19#include <linux/timer.h> 20#include <linux/timer.h>
20#include <linux/string.h> 21#include <linux/string.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -301,7 +302,20 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
301 if (scmd->device->allow_restart && 302 if (scmd->device->allow_restart &&
302 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 303 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
303 return FAILED; 304 return FAILED;
304 return SUCCESS; 305
306 if (blk_barrier_rq(scmd->request))
307 /*
308 * barrier requests should always retry on UA
309 * otherwise block will get a spurious error
310 */
311 return NEEDS_RETRY;
312 else
313 /*
314 * for normal (non barrier) commands, pass the
315 * UA upwards for a determination in the
316 * completion functions
317 */
318 return SUCCESS;
305 319
306 /* these three are not supported */ 320 /* these three are not supported */
307 case COPY_ABORTED: 321 case COPY_ABORTED:
@@ -331,6 +345,64 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
331 } 345 }
332} 346}
333 347
348static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
349{
350 struct scsi_host_template *sht = sdev->host->hostt;
351 struct scsi_device *tmp_sdev;
352
353 if (!sht->change_queue_depth ||
354 sdev->queue_depth >= sdev->max_queue_depth)
355 return;
356
357 if (time_before(jiffies,
358 sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
359 return;
360
361 if (time_before(jiffies,
362 sdev->last_queue_full_time + sdev->queue_ramp_up_period))
363 return;
364
365 /*
366 * Walk all devices of a target and do
367 * ramp up on them.
368 */
369 shost_for_each_device(tmp_sdev, sdev->host) {
370 if (tmp_sdev->channel != sdev->channel ||
371 tmp_sdev->id != sdev->id ||
372 tmp_sdev->queue_depth == sdev->max_queue_depth)
373 continue;
374 /*
375 * call back into LLD to increase queue_depth by one
376 * with ramp up reason code.
377 */
378 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1,
379 SCSI_QDEPTH_RAMP_UP);
380 sdev->last_queue_ramp_up = jiffies;
381 }
382}
383
384static void scsi_handle_queue_full(struct scsi_device *sdev)
385{
386 struct scsi_host_template *sht = sdev->host->hostt;
387 struct scsi_device *tmp_sdev;
388
389 if (!sht->change_queue_depth)
390 return;
391
392 shost_for_each_device(tmp_sdev, sdev->host) {
393 if (tmp_sdev->channel != sdev->channel ||
394 tmp_sdev->id != sdev->id)
395 continue;
396 /*
397 * We do not know the number of commands that were at
398 * the device when we got the queue full so we start
399 * from the highest possible value and work our way down.
400 */
401 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1,
402 SCSI_QDEPTH_QFULL);
403 }
404}
405
334/** 406/**
335 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. 407 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
336 * @scmd: SCSI cmd to examine. 408 * @scmd: SCSI cmd to examine.
@@ -371,6 +443,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
371 */ 443 */
372 switch (status_byte(scmd->result)) { 444 switch (status_byte(scmd->result)) {
373 case GOOD: 445 case GOOD:
446 scsi_handle_queue_ramp_up(scmd->device);
374 case COMMAND_TERMINATED: 447 case COMMAND_TERMINATED:
375 return SUCCESS; 448 return SUCCESS;
376 case CHECK_CONDITION: 449 case CHECK_CONDITION:
@@ -387,8 +460,10 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
387 * let issuer deal with this, it could be just fine 460 * let issuer deal with this, it could be just fine
388 */ 461 */
389 return SUCCESS; 462 return SUCCESS;
390 case BUSY:
391 case QUEUE_FULL: 463 case QUEUE_FULL:
464 scsi_handle_queue_full(scmd->device);
465 /* fall through */
466 case BUSY:
392 default: 467 default:
393 return FAILED; 468 return FAILED;
394 } 469 }
@@ -1387,6 +1462,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1387 */ 1462 */
1388 switch (status_byte(scmd->result)) { 1463 switch (status_byte(scmd->result)) {
1389 case QUEUE_FULL: 1464 case QUEUE_FULL:
1465 scsi_handle_queue_full(scmd->device);
1390 /* 1466 /*
1391 * the case of trying to send too many commands to a 1467 * the case of trying to send too many commands to a
1392 * tagged queueing device. 1468 * tagged queueing device.
@@ -1400,6 +1476,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1400 */ 1476 */
1401 return ADD_TO_MLQUEUE; 1477 return ADD_TO_MLQUEUE;
1402 case GOOD: 1478 case GOOD:
1479 scsi_handle_queue_ramp_up(scmd->device);
1403 case COMMAND_TERMINATED: 1480 case COMMAND_TERMINATED:
1404 return SUCCESS; 1481 return SUCCESS;
1405 case TASK_ABORTED: 1482 case TASK_ABORTED:
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b98f763931c5..d9564fb04f62 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -308,6 +308,9 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
308 case SG_SCSI_RESET_DEVICE: 308 case SG_SCSI_RESET_DEVICE:
309 val = SCSI_TRY_RESET_DEVICE; 309 val = SCSI_TRY_RESET_DEVICE;
310 break; 310 break;
311 case SG_SCSI_RESET_TARGET:
312 val = SCSI_TRY_RESET_TARGET;
313 break;
311 case SG_SCSI_RESET_BUS: 314 case SG_SCSI_RESET_BUS:
312 val = SCSI_TRY_RESET_BUS; 315 val = SCSI_TRY_RESET_BUS;
313 break; 316 break;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5987da857103..1646fe7cbd4b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
749 */ 749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid; 750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751 751
752 scsi_release_buffers(cmd);
752 blk_end_request_all(req, 0); 753 blk_end_request_all(req, 0);
753 754
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd); 755 scsi_next_command(cmd);
756 return; 756 return;
757 } 757 }
@@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 * we already took a copy of the original into rq->errors which 773 * we already took a copy of the original into rq->errors which
774 * is what gets returned to the user 774 * is what gets returned to the user
775 */ 775 */
776 if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) { 776 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
777 if (!(req->cmd_flags & REQ_QUIET)) 777 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
778 * print since caller wants ATA registers. Only occurs on
779 * SCSI ATA PASS_THROUGH commands when CK_COND=1
780 */
781 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
782 ;
783 else if (!(req->cmd_flags & REQ_QUIET))
778 scsi_print_sense("", cmd); 784 scsi_print_sense("", cmd);
779 result = 0; 785 result = 0;
780 /* BLOCK_PC may have set error */ 786 /* BLOCK_PC may have set error */
@@ -859,6 +865,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859 case 0x07: /* operation in progress */ 865 case 0x07: /* operation in progress */
860 case 0x08: /* Long write in progress */ 866 case 0x08: /* Long write in progress */
861 case 0x09: /* self test in progress */ 867 case 0x09: /* self test in progress */
868 case 0x14: /* space allocation in progress */
862 action = ACTION_DELAYED_RETRY; 869 action = ACTION_DELAYED_RETRY;
863 break; 870 break;
864 default: 871 default:
@@ -898,7 +905,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
898 scsi_print_sense("", cmd); 905 scsi_print_sense("", cmd);
899 scsi_print_command(cmd); 906 scsi_print_command(cmd);
900 } 907 }
901 if (blk_end_request_err(req, -EIO)) 908 if (blk_end_request_err(req, error))
902 scsi_requeue_command(q, cmd); 909 scsi_requeue_command(q, cmd);
903 else 910 else
904 scsi_next_command(cmd); 911 scsi_next_command(cmd);
@@ -1359,9 +1366,9 @@ static int scsi_lld_busy(struct request_queue *q)
1359static void scsi_kill_request(struct request *req, struct request_queue *q) 1366static void scsi_kill_request(struct request *req, struct request_queue *q)
1360{ 1367{
1361 struct scsi_cmnd *cmd = req->special; 1368 struct scsi_cmnd *cmd = req->special;
1362 struct scsi_device *sdev = cmd->device; 1369 struct scsi_device *sdev;
1363 struct scsi_target *starget = scsi_target(sdev); 1370 struct scsi_target *starget;
1364 struct Scsi_Host *shost = sdev->host; 1371 struct Scsi_Host *shost;
1365 1372
1366 blk_start_request(req); 1373 blk_start_request(req);
1367 1374
@@ -1371,6 +1378,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1371 BUG(); 1378 BUG();
1372 } 1379 }
1373 1380
1381 sdev = cmd->device;
1382 starget = scsi_target(sdev);
1383 shost = sdev->host;
1374 scsi_init_cmd_errh(cmd); 1384 scsi_init_cmd_errh(cmd);
1375 cmd->result = DID_NO_CONNECT << 16; 1385 cmd->result = DID_NO_CONNECT << 16;
1376 atomic_inc(&cmd->device->iorequest_cnt); 1386 atomic_inc(&cmd->device->iorequest_cnt);
@@ -1620,10 +1630,10 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1620 /* 1630 /*
1621 * this limit is imposed by hardware restrictions 1631 * this limit is imposed by hardware restrictions
1622 */ 1632 */
1623 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1633 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1624 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1634 SCSI_MAX_SG_CHAIN_SEGMENTS));
1625 1635
1626 blk_queue_max_sectors(q, shost->max_sectors); 1636 blk_queue_max_hw_sectors(q, shost->max_sectors);
1627 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1637 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1628 blk_queue_segment_boundary(q, shost->dma_boundary); 1638 blk_queue_segment_boundary(q, shost->dma_boundary);
1629 dma_set_seg_boundary(dev, shost->dma_boundary); 1639 dma_set_seg_boundary(dev, shost->dma_boundary);
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
index ac6855cd2657..dcd128583b89 100644
--- a/drivers/scsi/scsi_lib_dma.c
+++ b/drivers/scsi/scsi_lib_dma.c
@@ -23,7 +23,7 @@ int scsi_dma_map(struct scsi_cmnd *cmd)
23 int nseg = 0; 23 int nseg = 0;
24 24
25 if (scsi_sg_count(cmd)) { 25 if (scsi_sg_count(cmd)) {
26 struct device *dev = cmd->device->host->shost_gendev.parent; 26 struct device *dev = cmd->device->host->dma_dev;
27 27
28 nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), 28 nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
29 cmd->sc_data_direction); 29 cmd->sc_data_direction);
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(scsi_dma_map);
41void scsi_dma_unmap(struct scsi_cmnd *cmd) 41void scsi_dma_unmap(struct scsi_cmnd *cmd)
42{ 42{
43 if (scsi_sg_count(cmd)) { 43 if (scsi_sg_count(cmd)) {
44 struct device *dev = cmd->device->host->shost_gendev.parent; 44 struct device *dev = cmd->device->host->dma_dev;
45 45
46 dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), 46 dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
47 cmd->sc_data_direction); 47 cmd->sc_data_direction);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 723fdecd91bd..d53e6503c6d5 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -22,6 +22,7 @@
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/slab.h>
25#include <net/sock.h> 26#include <net/sock.h>
26#include <net/netlink.h> 27#include <net/netlink.h>
27 28
@@ -613,7 +614,7 @@ EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
613 * @data_buf: pointer to vendor unique data buffer 614 * @data_buf: pointer to vendor unique data buffer
614 * 615 *
615 * Returns: 616 * Returns:
616 * 0 on succesful return 617 * 0 on successful return
617 * otherwise, failing error code 618 * otherwise, failing error code
618 * 619 *
619 * Notes: 620 * Notes:
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 77fbddb507fd..c99da926fdac 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -20,12 +20,12 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/proc_fs.h> 23#include <linux/proc_fs.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/blkdev.h> 25#include <linux/blkdev.h>
27#include <linux/seq_file.h> 26#include <linux/seq_file.h>
28#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/gfp.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30 30
31#include <scsi/scsi.h> 31#include <scsi/scsi.h>
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index 998cb5be6833..6266a5d73d0f 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -5,7 +5,7 @@
5#define SAS_PHY_ATTRS 17 5#define SAS_PHY_ATTRS 17
6#define SAS_PORT_ATTRS 1 6#define SAS_PORT_ATTRS 1
7#define SAS_RPORT_ATTRS 7 7#define SAS_RPORT_ATTRS 7
8#define SAS_END_DEV_ATTRS 3 8#define SAS_END_DEV_ATTRS 5
9#define SAS_EXPANDER_ATTRS 7 9#define SAS_EXPANDER_ATTRS 7
10 10
11struct sas_internal { 11struct sas_internal {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 47291bcff0d5..38518b088073 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -33,6 +33,7 @@
33#include <linux/kthread.h> 33#include <linux/kthread.h>
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/async.h> 35#include <linux/async.h>
36#include <linux/slab.h>
36 37
37#include <scsi/scsi.h> 38#include <scsi/scsi.h>
38#include <scsi/scsi_cmnd.h> 39#include <scsi/scsi_cmnd.h>
@@ -251,6 +252,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
251 sdev->model = scsi_null_device_strs; 252 sdev->model = scsi_null_device_strs;
252 sdev->rev = scsi_null_device_strs; 253 sdev->rev = scsi_null_device_strs;
253 sdev->host = shost; 254 sdev->host = shost;
255 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
254 sdev->id = starget->id; 256 sdev->id = starget->id;
255 sdev->lun = lun; 257 sdev->lun = lun;
256 sdev->channel = starget->channel; 258 sdev->channel = starget->channel;
@@ -878,7 +880,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
878 * broken RA4x00 Compaq Disk Array 880 * broken RA4x00 Compaq Disk Array
879 */ 881 */
880 if (*bflags & BLIST_MAX_512) 882 if (*bflags & BLIST_MAX_512)
881 blk_queue_max_sectors(sdev->request_queue, 512); 883 blk_queue_max_hw_sectors(sdev->request_queue, 512);
882 884
883 /* 885 /*
884 * Some devices may not want to have a start command automatically 886 * Some devices may not want to have a start command automatically
@@ -941,6 +943,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
941 } 943 }
942 } 944 }
943 945
946 sdev->max_queue_depth = sdev->queue_depth;
947
944 /* 948 /*
945 * Ok, the device is now all set up, we can 949 * Ok, the device is now all set up, we can
946 * register it and tell the rest of the kernel 950 * register it and tell the rest of the kernel
@@ -1336,8 +1340,10 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1336 sdev = scsi_alloc_sdev(starget, 0, NULL); 1340 sdev = scsi_alloc_sdev(starget, 0, NULL);
1337 if (!sdev) 1341 if (!sdev)
1338 return 0; 1342 return 0;
1339 if (scsi_device_get(sdev)) 1343 if (scsi_device_get(sdev)) {
1344 __scsi_remove_device(sdev);
1340 return 0; 1345 return 0;
1346 }
1341 } 1347 }
1342 1348
1343 sprintf(devname, "host %d channel %d id %d", 1349 sprintf(devname, "host %d channel %d id %d",
@@ -1904,10 +1910,9 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1904 goto out; 1910 goto out;
1905 1911
1906 sdev = scsi_alloc_sdev(starget, 0, NULL); 1912 sdev = scsi_alloc_sdev(starget, 0, NULL);
1907 if (sdev) { 1913 if (sdev)
1908 sdev->sdev_gendev.parent = get_device(&starget->dev);
1909 sdev->borken = 0; 1914 sdev->borken = 0;
1910 } else 1915 else
1911 scsi_target_reap(starget); 1916 scsi_target_reap(starget);
1912 put_device(&starget->dev); 1917 put_device(&starget->dev);
1913 out: 1918 out:
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 63a30f566f3a..2b6b93f7d8ef 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -13,26 +13,23 @@
13 13
14 14
15static ctl_table scsi_table[] = { 15static ctl_table scsi_table[] = {
16 { .ctl_name = DEV_SCSI_LOGGING_LEVEL, 16 { .procname = "logging_level",
17 .procname = "logging_level",
18 .data = &scsi_logging_level, 17 .data = &scsi_logging_level,
19 .maxlen = sizeof(scsi_logging_level), 18 .maxlen = sizeof(scsi_logging_level),
20 .mode = 0644, 19 .mode = 0644,
21 .proc_handler = &proc_dointvec }, 20 .proc_handler = proc_dointvec },
22 { } 21 { }
23}; 22};
24 23
25static ctl_table scsi_dir_table[] = { 24static ctl_table scsi_dir_table[] = {
26 { .ctl_name = DEV_SCSI, 25 { .procname = "scsi",
27 .procname = "scsi",
28 .mode = 0555, 26 .mode = 0555,
29 .child = scsi_table }, 27 .child = scsi_table },
30 { } 28 { }
31}; 29};
32 30
33static ctl_table scsi_root_table[] = { 31static ctl_table scsi_root_table[] = {
34 { .ctl_name = CTL_DEV, 32 { .procname = "dev",
35 .procname = "dev",
36 .mode = 0555, 33 .mode = 0555,
37 .child = scsi_dir_table }, 34 .child = scsi_dir_table },
38 { } 35 { }
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 392d8db33905..429c9b73e3e4 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/slab.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/blkdev.h> 12#include <linux/blkdev.h>
12#include <linux/device.h> 13#include <linux/device.h>
@@ -766,10 +767,13 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
766 if (depth < 1) 767 if (depth < 1)
767 return -EINVAL; 768 return -EINVAL;
768 769
769 retval = sht->change_queue_depth(sdev, depth); 770 retval = sht->change_queue_depth(sdev, depth,
771 SCSI_QDEPTH_DEFAULT);
770 if (retval < 0) 772 if (retval < 0)
771 return retval; 773 return retval;
772 774
775 sdev->max_queue_depth = sdev->queue_depth;
776
773 return count; 777 return count;
774} 778}
775 779
@@ -778,6 +782,37 @@ static struct device_attribute sdev_attr_queue_depth_rw =
778 sdev_store_queue_depth_rw); 782 sdev_store_queue_depth_rw);
779 783
780static ssize_t 784static ssize_t
785sdev_show_queue_ramp_up_period(struct device *dev,
786 struct device_attribute *attr,
787 char *buf)
788{
789 struct scsi_device *sdev;
790 sdev = to_scsi_device(dev);
791 return snprintf(buf, 20, "%u\n",
792 jiffies_to_msecs(sdev->queue_ramp_up_period));
793}
794
795static ssize_t
796sdev_store_queue_ramp_up_period(struct device *dev,
797 struct device_attribute *attr,
798 const char *buf, size_t count)
799{
800 struct scsi_device *sdev = to_scsi_device(dev);
801 unsigned long period;
802
803 if (strict_strtoul(buf, 10, &period))
804 return -EINVAL;
805
806 sdev->queue_ramp_up_period = msecs_to_jiffies(period);
807 return period;
808}
809
810static struct device_attribute sdev_attr_queue_ramp_up_period =
811 __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
812 sdev_show_queue_ramp_up_period,
813 sdev_store_queue_ramp_up_period);
814
815static ssize_t
781sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr, 816sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
782 const char *buf, size_t count) 817 const char *buf, size_t count)
783{ 818{
@@ -813,6 +848,8 @@ static int scsi_target_add(struct scsi_target *starget)
813 if (starget->state != STARGET_CREATED) 848 if (starget->state != STARGET_CREATED)
814 return 0; 849 return 0;
815 850
851 device_enable_async_suspend(&starget->dev);
852
816 error = device_add(&starget->dev); 853 error = device_add(&starget->dev);
817 if (error) { 854 if (error) {
818 dev_err(&starget->dev, "target device_add failed, error %d\n", error); 855 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
@@ -844,7 +881,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
844 struct request_queue *rq = sdev->request_queue; 881 struct request_queue *rq = sdev->request_queue;
845 struct scsi_target *starget = sdev->sdev_target; 882 struct scsi_target *starget = sdev->sdev_target;
846 883
847 if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0) 884 error = scsi_device_set_state(sdev, SDEV_RUNNING);
885 if (error)
848 return error; 886 return error;
849 887
850 error = scsi_target_add(starget); 888 error = scsi_target_add(starget);
@@ -852,34 +890,40 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
852 return error; 890 return error;
853 891
854 transport_configure_device(&starget->dev); 892 transport_configure_device(&starget->dev);
893 device_enable_async_suspend(&sdev->sdev_gendev);
855 error = device_add(&sdev->sdev_gendev); 894 error = device_add(&sdev->sdev_gendev);
856 if (error) { 895 if (error) {
857 printk(KERN_INFO "error 1\n"); 896 printk(KERN_INFO "error 1\n");
858 goto out_remove; 897 return error;
859 } 898 }
899 device_enable_async_suspend(&sdev->sdev_dev);
860 error = device_add(&sdev->sdev_dev); 900 error = device_add(&sdev->sdev_dev);
861 if (error) { 901 if (error) {
862 printk(KERN_INFO "error 2\n"); 902 printk(KERN_INFO "error 2\n");
863 device_del(&sdev->sdev_gendev); 903 device_del(&sdev->sdev_gendev);
864 goto out_remove; 904 return error;
865 } 905 }
866 transport_add_device(&sdev->sdev_gendev); 906 transport_add_device(&sdev->sdev_gendev);
867 sdev->is_visible = 1; 907 sdev->is_visible = 1;
868 908
869 /* create queue files, which may be writable, depending on the host */ 909 /* create queue files, which may be writable, depending on the host */
870 if (sdev->host->hostt->change_queue_depth) 910 if (sdev->host->hostt->change_queue_depth) {
871 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw); 911 error = device_create_file(&sdev->sdev_gendev,
912 &sdev_attr_queue_depth_rw);
913 error = device_create_file(&sdev->sdev_gendev,
914 &sdev_attr_queue_ramp_up_period);
915 }
872 else 916 else
873 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); 917 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
874 if (error) 918 if (error)
875 goto out_remove; 919 return error;
876 920
877 if (sdev->host->hostt->change_queue_type) 921 if (sdev->host->hostt->change_queue_type)
878 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); 922 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
879 else 923 else
880 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); 924 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
881 if (error) 925 if (error)
882 goto out_remove; 926 return error;
883 927
884 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); 928 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
885 929
@@ -895,16 +939,11 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
895 error = device_create_file(&sdev->sdev_gendev, 939 error = device_create_file(&sdev->sdev_gendev,
896 sdev->host->hostt->sdev_attrs[i]); 940 sdev->host->hostt->sdev_attrs[i]);
897 if (error) 941 if (error)
898 goto out_remove; 942 return error;
899 } 943 }
900 } 944 }
901 945
902 return 0;
903
904 out_remove:
905 __scsi_remove_device(sdev);
906 return error; 946 return error;
907
908} 947}
909 948
910void __scsi_remove_device(struct scsi_device *sdev) 949void __scsi_remove_device(struct scsi_device *sdev)
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
index 0e9533f7aabc..a87e21c35ef2 100644
--- a/drivers/scsi/scsi_tgt_if.c
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -20,6 +20,7 @@
20 * 02110-1301 USA 20 * 02110-1301 USA
21 */ 21 */
22#include <linux/miscdevice.h> 22#include <linux/miscdevice.h>
23#include <linux/gfp.h>
23#include <linux/file.h> 24#include <linux/file.h>
24#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
25#include <net/tcp.h> 26#include <net/tcp.h>
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 10303272ba45..66241dd525ae 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -23,6 +23,7 @@
23#include <linux/hash.h> 23#include <linux/hash.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/pagemap.h> 25#include <linux/pagemap.h>
26#include <linux/slab.h>
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index c6f70dae9b2e..6cfffc88022a 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -27,6 +27,8 @@
27 */ 27 */
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
30#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
32#include <scsi/scsi_transport.h> 34#include <scsi/scsi_transport.h>
@@ -474,7 +476,8 @@ MODULE_PARM_DESC(dev_loss_tmo,
474 "Maximum number of seconds that the FC transport should" 476 "Maximum number of seconds that the FC transport should"
475 " insulate the loss of a remote port. Once this value is" 477 " insulate the loss of a remote port. Once this value is"
476 " exceeded, the scsi target is removed. Value should be" 478 " exceeded, the scsi target is removed. Value should be"
477 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); 479 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
480 " fast_io_fail_tmo is not set.");
478 481
479/* 482/*
480 * Netlink Infrastructure 483 * Netlink Infrastructure
@@ -648,11 +651,22 @@ static __init int fc_transport_init(void)
648 return error; 651 return error;
649 error = transport_class_register(&fc_vport_class); 652 error = transport_class_register(&fc_vport_class);
650 if (error) 653 if (error)
651 return error; 654 goto unreg_host_class;
652 error = transport_class_register(&fc_rport_class); 655 error = transport_class_register(&fc_rport_class);
653 if (error) 656 if (error)
654 return error; 657 goto unreg_vport_class;
655 return transport_class_register(&fc_transport_class); 658 error = transport_class_register(&fc_transport_class);
659 if (error)
660 goto unreg_rport_class;
661 return 0;
662
663unreg_rport_class:
664 transport_class_unregister(&fc_rport_class);
665unreg_vport_class:
666 transport_class_unregister(&fc_vport_class);
667unreg_host_class:
668 transport_class_unregister(&fc_host_class);
669 return error;
656} 670}
657 671
658static void __exit fc_transport_exit(void) 672static void __exit fc_transport_exit(void)
@@ -830,9 +844,17 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
830 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 844 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
831 return -EBUSY; 845 return -EBUSY;
832 val = simple_strtoul(buf, &cp, 0); 846 val = simple_strtoul(buf, &cp, 0);
833 if ((*cp && (*cp != '\n')) || 847 if ((*cp && (*cp != '\n')) || (val < 0))
834 (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
835 return -EINVAL; 848 return -EINVAL;
849
850 /*
851 * If fast_io_fail is off we have to cap
852 * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
853 */
854 if (rport->fast_io_fail_tmo == -1 &&
855 val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
856 return -EINVAL;
857
836 i->f->set_rport_dev_loss_tmo(rport, val); 858 i->f->set_rport_dev_loss_tmo(rport, val);
837 return count; 859 return count;
838} 860}
@@ -913,9 +935,16 @@ store_fc_rport_fast_io_fail_tmo(struct device *dev,
913 rport->fast_io_fail_tmo = -1; 935 rport->fast_io_fail_tmo = -1;
914 else { 936 else {
915 val = simple_strtoul(buf, &cp, 0); 937 val = simple_strtoul(buf, &cp, 0);
916 if ((*cp && (*cp != '\n')) || 938 if ((*cp && (*cp != '\n')) || (val < 0))
917 (val < 0) || (val >= rport->dev_loss_tmo))
918 return -EINVAL; 939 return -EINVAL;
940 /*
941 * Cap fast_io_fail by dev_loss_tmo or
942 * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
943 */
944 if ((val >= rport->dev_loss_tmo) ||
945 (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
946 return -EINVAL;
947
919 rport->fast_io_fail_tmo = val; 948 rport->fast_io_fail_tmo = val;
920 } 949 }
921 return count; 950 return count;
@@ -1204,6 +1233,15 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1204{ 1233{
1205 struct fc_vport *vport = transport_class_to_vport(dev); 1234 struct fc_vport *vport = transport_class_to_vport(dev);
1206 struct Scsi_Host *shost = vport_to_shost(vport); 1235 struct Scsi_Host *shost = vport_to_shost(vport);
1236 unsigned long flags;
1237
1238 spin_lock_irqsave(shost->host_lock, flags);
1239 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
1240 spin_unlock_irqrestore(shost->host_lock, flags);
1241 return -EBUSY;
1242 }
1243 vport->flags |= FC_VPORT_DELETING;
1244 spin_unlock_irqrestore(shost->host_lock, flags);
1207 1245
1208 fc_queue_work(shost, &vport->vport_delete_work); 1246 fc_queue_work(shost, &vport->vport_delete_work);
1209 return count; 1247 return count;
@@ -1793,6 +1831,9 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
1793 list_for_each_entry(vport, &fc_host->vports, peers) { 1831 list_for_each_entry(vport, &fc_host->vports, peers) {
1794 if ((vport->channel == 0) && 1832 if ((vport->channel == 0) &&
1795 (vport->port_name == wwpn) && (vport->node_name == wwnn)) { 1833 (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
1834 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1835 break;
1836 vport->flags |= FC_VPORT_DELETING;
1796 match = 1; 1837 match = 1;
1797 break; 1838 break;
1798 } 1839 }
@@ -2384,6 +2425,7 @@ fc_rport_final_delete(struct work_struct *work)
2384 struct Scsi_Host *shost = rport_to_shost(rport); 2425 struct Scsi_Host *shost = rport_to_shost(rport);
2385 struct fc_internal *i = to_fc_internal(shost->transportt); 2426 struct fc_internal *i = to_fc_internal(shost->transportt);
2386 unsigned long flags; 2427 unsigned long flags;
2428 int do_callback = 0;
2387 2429
2388 /* 2430 /*
2389 * if a scan is pending, flush the SCSI Host work_q so that 2431 * if a scan is pending, flush the SCSI Host work_q so that
@@ -2422,8 +2464,15 @@ fc_rport_final_delete(struct work_struct *work)
2422 * Avoid this call if we already called it when we preserved the 2464 * Avoid this call if we already called it when we preserved the
2423 * rport for the binding. 2465 * rport for the binding.
2424 */ 2466 */
2467 spin_lock_irqsave(shost->host_lock, flags);
2425 if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && 2468 if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
2426 (i->f->dev_loss_tmo_callbk)) 2469 (i->f->dev_loss_tmo_callbk)) {
2470 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
2471 do_callback = 1;
2472 }
2473 spin_unlock_irqrestore(shost->host_lock, flags);
2474
2475 if (do_callback)
2427 i->f->dev_loss_tmo_callbk(rport); 2476 i->f->dev_loss_tmo_callbk(rport);
2428 2477
2429 fc_bsg_remove(rport->rqst_q); 2478 fc_bsg_remove(rport->rqst_q);
@@ -2970,6 +3019,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
2970 struct fc_internal *i = to_fc_internal(shost->transportt); 3019 struct fc_internal *i = to_fc_internal(shost->transportt);
2971 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3020 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2972 unsigned long flags; 3021 unsigned long flags;
3022 int do_callback = 0;
2973 3023
2974 spin_lock_irqsave(shost->host_lock, flags); 3024 spin_lock_irqsave(shost->host_lock, flags);
2975 3025
@@ -3035,7 +3085,6 @@ fc_timeout_deleted_rport(struct work_struct *work)
3035 rport->roles = FC_PORT_ROLE_UNKNOWN; 3085 rport->roles = FC_PORT_ROLE_UNKNOWN;
3036 rport->port_state = FC_PORTSTATE_NOTPRESENT; 3086 rport->port_state = FC_PORTSTATE_NOTPRESENT;
3037 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 3087 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3038 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3039 3088
3040 /* 3089 /*
3041 * Pre-emptively kill I/O rather than waiting for the work queue 3090 * Pre-emptively kill I/O rather than waiting for the work queue
@@ -3045,32 +3094,40 @@ fc_timeout_deleted_rport(struct work_struct *work)
3045 spin_unlock_irqrestore(shost->host_lock, flags); 3094 spin_unlock_irqrestore(shost->host_lock, flags);
3046 fc_terminate_rport_io(rport); 3095 fc_terminate_rport_io(rport);
3047 3096
3048 BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT); 3097 spin_lock_irqsave(shost->host_lock, flags);
3049 3098
3050 /* remove the identifiers that aren't used in the consisting binding */ 3099 if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */
3051 switch (fc_host->tgtid_bind_type) { 3100
3052 case FC_TGTID_BIND_BY_WWPN: 3101 /* remove the identifiers that aren't used in the consisting binding */
3053 rport->node_name = -1; 3102 switch (fc_host->tgtid_bind_type) {
3054 rport->port_id = -1; 3103 case FC_TGTID_BIND_BY_WWPN:
3055 break; 3104 rport->node_name = -1;
3056 case FC_TGTID_BIND_BY_WWNN: 3105 rport->port_id = -1;
3057 rport->port_name = -1; 3106 break;
3058 rport->port_id = -1; 3107 case FC_TGTID_BIND_BY_WWNN:
3059 break; 3108 rport->port_name = -1;
3060 case FC_TGTID_BIND_BY_ID: 3109 rport->port_id = -1;
3061 rport->node_name = -1; 3110 break;
3062 rport->port_name = -1; 3111 case FC_TGTID_BIND_BY_ID:
3063 break; 3112 rport->node_name = -1;
3064 case FC_TGTID_BIND_NONE: /* to keep compiler happy */ 3113 rport->port_name = -1;
3065 break; 3114 break;
3115 case FC_TGTID_BIND_NONE: /* to keep compiler happy */
3116 break;
3117 }
3118
3119 /*
3120 * As this only occurs if the remote port (scsi target)
3121 * went away and didn't come back - we'll remove
3122 * all attached scsi devices.
3123 */
3124 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3125 fc_queue_work(shost, &rport->stgt_delete_work);
3126
3127 do_callback = 1;
3066 } 3128 }
3067 3129
3068 /* 3130 spin_unlock_irqrestore(shost->host_lock, flags);
3069 * As this only occurs if the remote port (scsi target)
3070 * went away and didn't come back - we'll remove
3071 * all attached scsi devices.
3072 */
3073 fc_queue_work(shost, &rport->stgt_delete_work);
3074 3131
3075 /* 3132 /*
3076 * Notify the driver that the rport is now dead. The LLDD will 3133 * Notify the driver that the rport is now dead. The LLDD will
@@ -3078,7 +3135,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
3078 * 3135 *
3079 * Note: we set the CALLBK_DONE flag above to correspond 3136 * Note: we set the CALLBK_DONE flag above to correspond
3080 */ 3137 */
3081 if (i->f->dev_loss_tmo_callbk) 3138 if (do_callback && i->f->dev_loss_tmo_callbk)
3082 i->f->dev_loss_tmo_callbk(rport); 3139 i->f->dev_loss_tmo_callbk(rport);
3083} 3140}
3084 3141
@@ -3128,6 +3185,31 @@ fc_scsi_scan_rport(struct work_struct *work)
3128 spin_unlock_irqrestore(shost->host_lock, flags); 3185 spin_unlock_irqrestore(shost->host_lock, flags);
3129} 3186}
3130 3187
3188/**
3189 * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
3190 * @cmnd: SCSI command that scsi_eh is trying to recover
3191 *
3192 * This routine can be called from a FC LLD scsi_eh callback. It
3193 * blocks the scsi_eh thread until the fc_rport leaves the
3194 * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh
3195 * failing recovery actions for blocked rports which would lead to
3196 * offlined SCSI devices.
3197 */
3198void fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3199{
3200 struct Scsi_Host *shost = cmnd->device->host;
3201 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3202 unsigned long flags;
3203
3204 spin_lock_irqsave(shost->host_lock, flags);
3205 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
3206 spin_unlock_irqrestore(shost->host_lock, flags);
3207 msleep(1000);
3208 spin_lock_irqsave(shost->host_lock, flags);
3209 }
3210 spin_unlock_irqrestore(shost->host_lock, flags);
3211}
3212EXPORT_SYMBOL(fc_block_scsi_eh);
3131 3213
3132/** 3214/**
3133 * fc_vport_setup - allocates and creates a FC virtual port. 3215 * fc_vport_setup - allocates and creates a FC virtual port.
@@ -3301,18 +3383,6 @@ fc_vport_terminate(struct fc_vport *vport)
3301 unsigned long flags; 3383 unsigned long flags;
3302 int stat; 3384 int stat;
3303 3385
3304 spin_lock_irqsave(shost->host_lock, flags);
3305 if (vport->flags & FC_VPORT_CREATING) {
3306 spin_unlock_irqrestore(shost->host_lock, flags);
3307 return -EBUSY;
3308 }
3309 if (vport->flags & (FC_VPORT_DEL)) {
3310 spin_unlock_irqrestore(shost->host_lock, flags);
3311 return -EALREADY;
3312 }
3313 vport->flags |= FC_VPORT_DELETING;
3314 spin_unlock_irqrestore(shost->host_lock, flags);
3315
3316 if (i->f->vport_delete) 3386 if (i->f->vport_delete)
3317 stat = i->f->vport_delete(vport); 3387 stat = i->f->vport_delete(vport);
3318 else 3388 else
@@ -3474,7 +3544,10 @@ fc_bsg_job_timeout(struct request *req)
3474 if (!done && i->f->bsg_timeout) { 3544 if (!done && i->f->bsg_timeout) {
3475 /* call LLDD to abort the i/o as it has timed out */ 3545 /* call LLDD to abort the i/o as it has timed out */
3476 err = i->f->bsg_timeout(job); 3546 err = i->f->bsg_timeout(job);
3477 if (err) 3547 if (err == -EAGAIN) {
3548 job->ref_cnt--;
3549 return BLK_EH_RESET_TIMER;
3550 } else if (err)
3478 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " 3551 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3479 "abort failed with status %d\n", err); 3552 "abort failed with status %d\n", err);
3480 } 3553 }
@@ -3769,8 +3842,9 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3769 return; 3842 return;
3770 3843
3771 while (!blk_queue_plugged(q)) { 3844 while (!blk_queue_plugged(q)) {
3772 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED)) 3845 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
3773 break; 3846 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3847 break;
3774 3848
3775 req = blk_fetch_request(q); 3849 req = blk_fetch_request(q);
3776 if (!req) 3850 if (!req)
@@ -3779,7 +3853,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3779 if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) { 3853 if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
3780 req->errors = -ENXIO; 3854 req->errors = -ENXIO;
3781 spin_unlock_irq(q->queue_lock); 3855 spin_unlock_irq(q->queue_lock);
3782 blk_end_request(req, -ENXIO, blk_rq_bytes(req)); 3856 blk_end_request_all(req, -ENXIO);
3783 spin_lock_irq(q->queue_lock); 3857 spin_lock_irq(q->queue_lock);
3784 continue; 3858 continue;
3785 } 3859 }
@@ -3789,7 +3863,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3789 ret = fc_req_to_bsgjob(shost, rport, req); 3863 ret = fc_req_to_bsgjob(shost, rport, req);
3790 if (ret) { 3864 if (ret) {
3791 req->errors = ret; 3865 req->errors = ret;
3792 blk_end_request(req, ret, blk_rq_bytes(req)); 3866 blk_end_request_all(req, ret);
3793 spin_lock_irq(q->queue_lock); 3867 spin_lock_irq(q->queue_lock);
3794 continue; 3868 continue;
3795 } 3869 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ad897df36615..1e6d4793542c 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -22,6 +22,7 @@
22 */ 22 */
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h>
25#include <net/tcp.h> 26#include <net/tcp.h>
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
@@ -30,7 +31,7 @@
30#include <scsi/scsi_transport_iscsi.h> 31#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/iscsi_if.h> 32#include <scsi/iscsi_if.h>
32 33
33#define ISCSI_SESSION_ATTRS 21 34#define ISCSI_SESSION_ATTRS 22
34#define ISCSI_CONN_ATTRS 13 35#define ISCSI_CONN_ATTRS 13
35#define ISCSI_HOST_ATTRS 4 36#define ISCSI_HOST_ATTRS 4
36 37
@@ -627,8 +628,10 @@ static void __iscsi_block_session(struct work_struct *work)
627 spin_unlock_irqrestore(&session->lock, flags); 628 spin_unlock_irqrestore(&session->lock, flags);
628 scsi_target_block(&session->dev); 629 scsi_target_block(&session->dev);
629 ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); 630 ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
630 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, 631 if (session->recovery_tmo >= 0)
631 session->recovery_tmo * HZ); 632 queue_delayed_work(iscsi_eh_timer_workq,
633 &session->recovery_work,
634 session->recovery_tmo * HZ);
632} 635}
633 636
634void iscsi_block_session(struct iscsi_cls_session *session) 637void iscsi_block_session(struct iscsi_cls_session *session)
@@ -1348,8 +1351,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1348 switch (ev->u.set_param.param) { 1351 switch (ev->u.set_param.param) {
1349 case ISCSI_PARAM_SESS_RECOVERY_TMO: 1352 case ISCSI_PARAM_SESS_RECOVERY_TMO:
1350 sscanf(data, "%d", &value); 1353 sscanf(data, "%d", &value);
1351 if (value != 0) 1354 session->recovery_tmo = value;
1352 session->recovery_tmo = value;
1353 break; 1355 break;
1354 default: 1356 default:
1355 err = transport->set_param(conn, ev->u.set_param.param, 1357 err = transport->set_param(conn, ev->u.set_param.param,
@@ -1759,6 +1761,7 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
1759iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); 1761iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
1760iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); 1762iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
1761iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); 1763iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
1764iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0);
1762iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); 1765iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
1763iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0) 1766iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
1764 1767
@@ -2000,6 +2003,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
2000 SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); 2003 SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
2001 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); 2004 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
2002 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); 2005 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
2006 SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO);
2003 SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME); 2007 SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
2004 SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME); 2008 SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
2005 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 2009 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index fd47cb1bee1b..927e99cb7225 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -155,6 +155,17 @@ static struct {
155sas_bitfield_name_search(linkspeed, sas_linkspeed_names) 155sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
156sas_bitfield_name_set(linkspeed, sas_linkspeed_names) 156sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
157 157
158static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev)
159{
160 struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target);
161 struct sas_end_device *rdev;
162
163 BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
164
165 rdev = rphy_to_end_device(rphy);
166 return rdev;
167}
168
158static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, 169static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
159 struct sas_rphy *rphy) 170 struct sas_rphy *rphy)
160{ 171{
@@ -358,6 +369,85 @@ void sas_remove_host(struct Scsi_Host *shost)
358} 369}
359EXPORT_SYMBOL(sas_remove_host); 370EXPORT_SYMBOL(sas_remove_host);
360 371
372/**
373 * sas_tlr_supported - checking TLR bit in vpd 0x90
374 * @sdev: scsi device struct
375 *
376 * Check Transport Layer Retries are supported or not.
377 * If vpd page 0x90 is present, TRL is supported.
378 *
379 */
380unsigned int
381sas_tlr_supported(struct scsi_device *sdev)
382{
383 const int vpd_len = 32;
384 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
385 char *buffer = kzalloc(vpd_len, GFP_KERNEL);
386 int ret = 0;
387
388 if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len))
389 goto out;
390
391 /*
392 * Magic numbers: the VPD Protocol page (0x90)
393 * has a 4 byte header and then one entry per device port
394 * the TLR bit is at offset 8 on each port entry
395 * if we take the first port, that's at total offset 12
396 */
397 ret = buffer[12] & 0x01;
398
399 out:
400 kfree(buffer);
401 rdev->tlr_supported = ret;
402 return ret;
403
404}
405EXPORT_SYMBOL_GPL(sas_tlr_supported);
406
407/**
408 * sas_disable_tlr - setting TLR flags
409 * @sdev: scsi device struct
410 *
411 * Seting tlr_enabled flag to 0.
412 *
413 */
414void
415sas_disable_tlr(struct scsi_device *sdev)
416{
417 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
418
419 rdev->tlr_enabled = 0;
420}
421EXPORT_SYMBOL_GPL(sas_disable_tlr);
422
423/**
424 * sas_enable_tlr - setting TLR flags
425 * @sdev: scsi device struct
426 *
427 * Seting tlr_enabled flag 1.
428 *
429 */
430void sas_enable_tlr(struct scsi_device *sdev)
431{
432 unsigned int tlr_supported = 0;
433 tlr_supported = sas_tlr_supported(sdev);
434
435 if (tlr_supported) {
436 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
437
438 rdev->tlr_enabled = 1;
439 }
440
441 return;
442}
443EXPORT_SYMBOL_GPL(sas_enable_tlr);
444
445unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
446{
447 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
448 return rdev->tlr_enabled;
449}
450EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
361 451
362/* 452/*
363 * SAS Phy attributes 453 * SAS Phy attributes
@@ -666,7 +756,7 @@ EXPORT_SYMBOL(sas_phy_add);
666 * 756 *
667 * Note: 757 * Note:
668 * This function must only be called on a PHY that has not 758 * This function must only be called on a PHY that has not
669 * sucessfully been added using sas_phy_add(). 759 * successfully been added using sas_phy_add().
670 */ 760 */
671void sas_phy_free(struct sas_phy *phy) 761void sas_phy_free(struct sas_phy *phy)
672{ 762{
@@ -896,7 +986,7 @@ EXPORT_SYMBOL(sas_port_add);
896 * 986 *
897 * Note: 987 * Note:
898 * This function must only be called on a PORT that has not 988 * This function must only be called on a PORT that has not
899 * sucessfully been added using sas_port_add(). 989 * successfully been added using sas_port_add().
900 */ 990 */
901void sas_port_free(struct sas_port *port) 991void sas_port_free(struct sas_port *port)
902{ 992{
@@ -1146,15 +1236,10 @@ sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
1146int sas_read_port_mode_page(struct scsi_device *sdev) 1236int sas_read_port_mode_page(struct scsi_device *sdev)
1147{ 1237{
1148 char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata; 1238 char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
1149 struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target); 1239 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
1150 struct sas_end_device *rdev;
1151 struct scsi_mode_data mode_data; 1240 struct scsi_mode_data mode_data;
1152 int res, error; 1241 int res, error;
1153 1242
1154 BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
1155
1156 rdev = rphy_to_end_device(rphy);
1157
1158 if (!buffer) 1243 if (!buffer)
1159 return -ENOMEM; 1244 return -ENOMEM;
1160 1245
@@ -1207,6 +1292,10 @@ sas_end_dev_simple_attr(I_T_nexus_loss_timeout, I_T_nexus_loss_timeout,
1207 "%d\n", int); 1292 "%d\n", int);
1208sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout, 1293sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout,
1209 "%d\n", int); 1294 "%d\n", int);
1295sas_end_dev_simple_attr(tlr_supported, tlr_supported,
1296 "%d\n", int);
1297sas_end_dev_simple_attr(tlr_enabled, tlr_enabled,
1298 "%d\n", int);
1210 1299
1211static DECLARE_TRANSPORT_CLASS(sas_expander_class, 1300static DECLARE_TRANSPORT_CLASS(sas_expander_class,
1212 "sas_expander", NULL, NULL, NULL); 1301 "sas_expander", NULL, NULL, NULL);
@@ -1476,7 +1565,7 @@ EXPORT_SYMBOL(sas_rphy_add);
1476 * 1565 *
1477 * Note: 1566 * Note:
1478 * This function must only be called on a remote 1567 * This function must only be called on a remote
1479 * PHY that has not sucessfully been added using 1568 * PHY that has not successfully been added using
1480 * sas_rphy_add() (or has been sas_rphy_remove()'d) 1569 * sas_rphy_add() (or has been sas_rphy_remove()'d)
1481 */ 1570 */
1482void sas_rphy_free(struct sas_rphy *rphy) 1571void sas_rphy_free(struct sas_rphy *rphy)
@@ -1733,6 +1822,8 @@ sas_attach_transport(struct sas_function_template *ft)
1733 SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning); 1822 SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning);
1734 SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout); 1823 SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout);
1735 SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout); 1824 SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout);
1825 SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported);
1826 SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled);
1736 i->end_dev_attrs[count] = NULL; 1827 i->end_dev_attrs[count] = NULL;
1737 1828
1738 count = 0; 1829 count = 0;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index c25bd9a34e02..8a172d4f4564 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -25,6 +25,7 @@
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/sysfs.h> 27#include <linux/sysfs.h>
28#include <linux/slab.h>
28#include <scsi/scsi.h> 29#include <scsi/scsi.h>
29#include "scsi_priv.h" 30#include "scsi_priv.h"
30#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 3f21bc65e8c6..6803b1e26ecc 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <linux/fs.h> 15#include <linux/fs.h>
15#include <linux/genhd.h> 16#include <linux/genhd.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9093c7261f33..de6c60320f6f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -49,6 +49,7 @@
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/string_helpers.h> 50#include <linux/string_helpers.h>
51#include <linux/async.h> 51#include <linux/async.h>
52#include <linux/slab.h>
52#include <asm/uaccess.h> 53#include <asm/uaccess.h>
53#include <asm/unaligned.h> 54#include <asm/unaligned.h>
54 55
@@ -264,6 +265,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
264 return snprintf(buf, 20, "%u\n", sdkp->ATO); 265 return snprintf(buf, 20, "%u\n", sdkp->ATO);
265} 266}
266 267
268static ssize_t
269sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
270 char *buf)
271{
272 struct scsi_disk *sdkp = to_scsi_disk(dev);
273
274 return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
275}
276
267static struct device_attribute sd_disk_attrs[] = { 277static struct device_attribute sd_disk_attrs[] = {
268 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 278 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
269 sd_store_cache_type), 279 sd_store_cache_type),
@@ -274,6 +284,7 @@ static struct device_attribute sd_disk_attrs[] = {
274 sd_store_manage_start_stop), 284 sd_store_manage_start_stop),
275 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), 285 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
276 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 286 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
287 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
277 __ATTR_NULL, 288 __ATTR_NULL,
278}; 289};
279 290
@@ -399,6 +410,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
399} 410}
400 411
401/** 412/**
413 * sd_prepare_discard - unmap blocks on thinly provisioned device
414 * @rq: Request to prepare
415 *
416 * Will issue either UNMAP or WRITE SAME(16) depending on preference
417 * indicated by target device.
418 **/
419static int sd_prepare_discard(struct request *rq)
420{
421 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
422 struct bio *bio = rq->bio;
423 sector_t sector = bio->bi_sector;
424 unsigned int num = bio_sectors(bio);
425
426 if (sdkp->device->sector_size == 4096) {
427 sector >>= 3;
428 num >>= 3;
429 }
430
431 rq->cmd_type = REQ_TYPE_BLOCK_PC;
432 rq->timeout = SD_TIMEOUT;
433
434 memset(rq->cmd, 0, rq->cmd_len);
435
436 if (sdkp->unmap) {
437 char *buf = kmap_atomic(bio_page(bio), KM_USER0);
438
439 rq->cmd[0] = UNMAP;
440 rq->cmd[8] = 24;
441 rq->cmd_len = 10;
442
443 /* Ensure that data length matches payload */
444 rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
445
446 put_unaligned_be16(6 + 16, &buf[0]);
447 put_unaligned_be16(16, &buf[2]);
448 put_unaligned_be64(sector, &buf[8]);
449 put_unaligned_be32(num, &buf[16]);
450
451 kunmap_atomic(buf, KM_USER0);
452 } else {
453 rq->cmd[0] = WRITE_SAME_16;
454 rq->cmd[1] = 0x8; /* UNMAP */
455 put_unaligned_be64(sector, &rq->cmd[2]);
456 put_unaligned_be32(num, &rq->cmd[10]);
457 rq->cmd_len = 16;
458 }
459
460 return BLKPREP_OK;
461}
462
463/**
402 * sd_init_command - build a scsi (read or write) command from 464 * sd_init_command - build a scsi (read or write) command from
403 * information in the request structure. 465 * information in the request structure.
404 * @SCpnt: pointer to mid-level's per scsi command structure that 466 * @SCpnt: pointer to mid-level's per scsi command structure that
@@ -418,6 +480,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
418 int ret, host_dif; 480 int ret, host_dif;
419 unsigned char protect; 481 unsigned char protect;
420 482
483 /*
484 * Discard request come in as REQ_TYPE_FS but we turn them into
485 * block PC requests to make life easier.
486 */
487 if (blk_discard_rq(rq))
488 ret = sd_prepare_discard(rq);
489
421 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 490 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
422 ret = scsi_setup_blk_pc_cmnd(sdp, rq); 491 ret = scsi_setup_blk_pc_cmnd(sdp, rq);
423 goto out; 492 goto out;
@@ -971,6 +1040,7 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq)
971{ 1040{
972 rq->cmd_type = REQ_TYPE_BLOCK_PC; 1041 rq->cmd_type = REQ_TYPE_BLOCK_PC;
973 rq->timeout = SD_TIMEOUT; 1042 rq->timeout = SD_TIMEOUT;
1043 rq->retries = SD_MAX_RETRIES;
974 rq->cmd[0] = SYNCHRONIZE_CACHE; 1044 rq->cmd[0] = SYNCHRONIZE_CACHE;
975 rq->cmd_len = 10; 1045 rq->cmd_len = 10;
976} 1046}
@@ -1128,19 +1198,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1128 SCpnt->result = 0; 1198 SCpnt->result = 0;
1129 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1199 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1130 break; 1200 break;
1131 case ABORTED_COMMAND: 1201 case ABORTED_COMMAND: /* DIF: Target detected corruption */
1132 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */ 1202 case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
1133 scsi_print_result(SCpnt); 1203 if (sshdr.asc == 0x10)
1134 scsi_print_sense("sd", SCpnt);
1135 good_bytes = sd_completed_bytes(SCpnt); 1204 good_bytes = sd_completed_bytes(SCpnt);
1136 }
1137 break;
1138 case ILLEGAL_REQUEST:
1139 if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
1140 scsi_print_result(SCpnt);
1141 scsi_print_sense("sd", SCpnt);
1142 good_bytes = sd_completed_bytes(SCpnt);
1143 }
1144 break; 1205 break;
1145 default: 1206 default:
1146 break; 1207 break;
@@ -1150,8 +1211,19 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1150 sd_dif_complete(SCpnt, good_bytes); 1211 sd_dif_complete(SCpnt, good_bytes);
1151 1212
1152 if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type) 1213 if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
1153 == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) 1214 == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
1215
1216 /* We have to print a failed command here as the
1217 * extended CDB gets freed before scsi_io_completion()
1218 * is called.
1219 */
1220 if (result)
1221 scsi_print_command(SCpnt);
1222
1154 mempool_free(SCpnt->cmnd, sd_cdb_pool); 1223 mempool_free(SCpnt->cmnd, sd_cdb_pool);
1224 SCpnt->cmnd = NULL;
1225 SCpnt->cmd_len = 0;
1226 }
1155 1227
1156 return good_bytes; 1228 return good_bytes;
1157} 1229}
@@ -1432,6 +1504,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1432 sd_printk(KERN_NOTICE, sdkp, 1504 sd_printk(KERN_NOTICE, sdkp,
1433 "physical block alignment offset: %u\n", alignment); 1505 "physical block alignment offset: %u\n", alignment);
1434 1506
1507 if (buffer[14] & 0x80) { /* TPE */
1508 struct request_queue *q = sdp->request_queue;
1509
1510 sdkp->thin_provisioning = 1;
1511 q->limits.discard_granularity = sdkp->hw_sector_size;
1512 q->limits.max_discard_sectors = 0xffffffff;
1513
1514 if (buffer[14] & 0x40) /* TPRZ */
1515 q->limits.discard_zeroes_data = 1;
1516
1517 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1518 }
1519
1435 sdkp->capacity = lba + 1; 1520 sdkp->capacity = lba + 1;
1436 return sector_size; 1521 return sector_size;
1437} 1522}
@@ -1863,20 +1948,47 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1863 */ 1948 */
1864static void sd_read_block_limits(struct scsi_disk *sdkp) 1949static void sd_read_block_limits(struct scsi_disk *sdkp)
1865{ 1950{
1951 struct request_queue *q = sdkp->disk->queue;
1866 unsigned int sector_sz = sdkp->device->sector_size; 1952 unsigned int sector_sz = sdkp->device->sector_size;
1867 char *buffer; 1953 const int vpd_len = 64;
1954 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
1868 1955
1869 /* Block Limits VPD */ 1956 if (!buffer ||
1870 buffer = scsi_get_vpd_page(sdkp->device, 0xb0); 1957 /* Block Limits VPD */
1871 1958 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
1872 if (buffer == NULL) 1959 goto out;
1873 return;
1874 1960
1875 blk_queue_io_min(sdkp->disk->queue, 1961 blk_queue_io_min(sdkp->disk->queue,
1876 get_unaligned_be16(&buffer[6]) * sector_sz); 1962 get_unaligned_be16(&buffer[6]) * sector_sz);
1877 blk_queue_io_opt(sdkp->disk->queue, 1963 blk_queue_io_opt(sdkp->disk->queue,
1878 get_unaligned_be32(&buffer[12]) * sector_sz); 1964 get_unaligned_be32(&buffer[12]) * sector_sz);
1879 1965
1966 /* Thin provisioning enabled and page length indicates TP support */
1967 if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
1968 unsigned int lba_count, desc_count, granularity;
1969
1970 lba_count = get_unaligned_be32(&buffer[20]);
1971 desc_count = get_unaligned_be32(&buffer[24]);
1972
1973 if (lba_count) {
1974 q->limits.max_discard_sectors =
1975 lba_count * sector_sz >> 9;
1976
1977 if (desc_count)
1978 sdkp->unmap = 1;
1979 }
1980
1981 granularity = get_unaligned_be32(&buffer[28]);
1982
1983 if (granularity)
1984 q->limits.discard_granularity = granularity * sector_sz;
1985
1986 if (buffer[32] & 0x80)
1987 q->limits.discard_alignment =
1988 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
1989 }
1990
1991 out:
1880 kfree(buffer); 1992 kfree(buffer);
1881} 1993}
1882 1994
@@ -1886,20 +1998,23 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
1886 */ 1998 */
1887static void sd_read_block_characteristics(struct scsi_disk *sdkp) 1999static void sd_read_block_characteristics(struct scsi_disk *sdkp)
1888{ 2000{
1889 char *buffer; 2001 unsigned char *buffer;
1890 u16 rot; 2002 u16 rot;
2003 const int vpd_len = 64;
1891 2004
1892 /* Block Device Characteristics VPD */ 2005 buffer = kmalloc(vpd_len, GFP_KERNEL);
1893 buffer = scsi_get_vpd_page(sdkp->device, 0xb1);
1894 2006
1895 if (buffer == NULL) 2007 if (!buffer ||
1896 return; 2008 /* Block Device Characteristics VPD */
2009 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
2010 goto out;
1897 2011
1898 rot = get_unaligned_be16(&buffer[4]); 2012 rot = get_unaligned_be16(&buffer[4]);
1899 2013
1900 if (rot == 1) 2014 if (rot == 1)
1901 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); 2015 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
1902 2016
2017 out:
1903 kfree(buffer); 2018 kfree(buffer);
1904} 2019}
1905 2020
@@ -1998,7 +2113,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1998 * which is followed by sdaaa. 2113 * which is followed by sdaaa.
1999 * 2114 *
2000 * This is basically 26 base counting with one extra 'nil' entry 2115 * This is basically 26 base counting with one extra 'nil' entry
2001 * at the beggining from the second digit on and can be 2116 * at the beginning from the second digit on and can be
2002 * determined using similar method as 26 base conversion with the 2117 * determined using similar method as 26 base conversion with the
2003 * index shifted -1 after each digit is computed. 2118 * index shifted -1 after each digit is computed.
2004 * 2119 *
@@ -2072,7 +2187,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2072 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 2187 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
2073 2188
2074 gd->driverfs_dev = &sdp->sdev_gendev; 2189 gd->driverfs_dev = &sdp->sdev_gendev;
2075 gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS; 2190 gd->flags = GENHD_FL_EXT_DEVT;
2076 if (sdp->removable) 2191 if (sdp->removable)
2077 gd->flags |= GENHD_FL_REMOVABLE; 2192 gd->flags |= GENHD_FL_REMOVABLE;
2078 2193
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index e374804d26fb..43d3caf268ef 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -60,6 +60,8 @@ struct scsi_disk {
60 unsigned RCD : 1; /* state of disk RCD bit, unused */ 60 unsigned RCD : 1; /* state of disk RCD bit, unused */
61 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 61 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
62 unsigned first_scan : 1; 62 unsigned first_scan : 1;
63 unsigned thin_provisioning : 1;
64 unsigned unmap : 1;
63}; 65};
64#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) 66#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
65 67
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 55b034b72708..7f5a6a86f820 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -21,6 +21,7 @@
21**----------------------------------------------------------------------------- 21**-----------------------------------------------------------------------------
22*/ 22*/
23 23
24#include <linux/slab.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/enclosure.h> 27#include <linux/enclosure.h>
@@ -448,13 +449,17 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
448 .addr = 0, 449 .addr = 0,
449 }; 450 };
450 451
451 buf = scsi_get_vpd_page(sdev, 0x83); 452 buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
452 if (!buf) 453 if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
453 return; 454 goto free;
454 455
455 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 456 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
456 457
457 vpd_len = ((buf[2] << 8) | buf[3]) + 4; 458 vpd_len = ((buf[2] << 8) | buf[3]) + 4;
459 kfree(buf);
460 buf = kmalloc(vpd_len, GFP_KERNEL);
461 if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
462 goto free;
458 463
459 desc = buf + 4; 464 desc = buf + 4;
460 while (desc < buf + vpd_len) { 465 while (desc < buf + vpd_len) {
@@ -591,8 +596,6 @@ static int ses_intf_add(struct device *cdev,
591 ses_dev->page10_len = len; 596 ses_dev->page10_len = len;
592 buf = NULL; 597 buf = NULL;
593 } 598 }
594 kfree(hdr_buf);
595
596 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); 599 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
597 if (!scomp) 600 if (!scomp)
598 goto err_free; 601 goto err_free;
@@ -604,6 +607,8 @@ static int ses_intf_add(struct device *cdev,
604 goto err_free; 607 goto err_free;
605 } 608 }
606 609
610 kfree(hdr_buf);
611
607 edev->scratch = ses_dev; 612 edev->scratch = ses_dev;
608 for (i = 0; i < components; i++) 613 for (i = 0; i < components; i++)
609 edev->component[i].scratch = scomp + i; 614 edev->component[i].scratch = scomp + i;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 040f751809ea..dee1c96288d4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -38,6 +38,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/mtio.h> 39#include <linux/mtio.h>
40#include <linux/ioctl.h> 40#include <linux/ioctl.h>
41#include <linux/slab.h>
41#include <linux/fcntl.h> 42#include <linux/fcntl.h>
42#include <linux/init.h> 43#include <linux/init.h>
43#include <linux/poll.h> 44#include <linux/poll.h>
@@ -287,8 +288,7 @@ sg_open(struct inode *inode, struct file *filp)
287 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 288 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
288 sdp->sgdebug = 0; 289 sdp->sgdebug = 0;
289 q = sdp->device->request_queue; 290 q = sdp->device->request_queue;
290 sdp->sg_tablesize = min(queue_max_hw_segments(q), 291 sdp->sg_tablesize = queue_max_segments(q);
291 queue_max_phys_segments(q));
292 } 292 }
293 if ((sfp = sg_add_sfp(sdp, dev))) 293 if ((sfp = sg_add_sfp(sdp, dev)))
294 filp->private_data = sfp; 294 filp->private_data = sfp;
@@ -1376,8 +1376,7 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1376 sdp->device = scsidp; 1376 sdp->device = scsidp;
1377 INIT_LIST_HEAD(&sdp->sfds); 1377 INIT_LIST_HEAD(&sdp->sfds);
1378 init_waitqueue_head(&sdp->o_excl_wait); 1378 init_waitqueue_head(&sdp->o_excl_wait);
1379 sdp->sg_tablesize = min(queue_max_hw_segments(q), 1379 sdp->sg_tablesize = queue_max_segments(q);
1380 queue_max_phys_segments(q));
1381 sdp->index = k; 1380 sdp->index = k;
1382 kref_init(&sdp->d_ref); 1381 kref_init(&sdp->d_ref);
1383 1382
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 0807b260268b..fef0e3c75b16 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -226,7 +226,7 @@ static struct scsi_host_template sgiwd93_template = {
226 .use_clustering = DISABLE_CLUSTERING, 226 .use_clustering = DISABLE_CLUSTERING,
227}; 227};
228 228
229static int __init sgiwd93_probe(struct platform_device *pdev) 229static int __devinit sgiwd93_probe(struct platform_device *pdev)
230{ 230{
231 struct sgiwd93_platform_data *pd = pdev->dev.platform_data; 231 struct sgiwd93_platform_data *pd = pdev->dev.platform_data;
232 unsigned char *wdregs = pd->wdregs; 232 unsigned char *wdregs = pd->wdregs;
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 6dc8b846c112..8ac6ce792b69 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/slab.h>
30 31
31#include <linux/blkdev.h> 32#include <linux/blkdev.h>
32#include <linux/device.h> 33#include <linux/device.h>
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 37b3359e863e..9acc2b2a3601 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/slab.h>
33#include <linux/stat.h> 34#include <linux/stat.h>
34#include <linux/mm.h> 35#include <linux/mm.h>
35#include <linux/blkdev.h> 36#include <linux/blkdev.h>
@@ -64,7 +65,7 @@ static struct scsi_host_template snirm710_template = {
64 .module = THIS_MODULE, 65 .module = THIS_MODULE,
65}; 66};
66 67
67static int __init snirm710_probe(struct platform_device *dev) 68static int __devinit snirm710_probe(struct platform_device *dev)
68{ 69{
69 unsigned long base; 70 unsigned long base;
70 struct NCR_700_Host_Parameters *hostdata; 71 struct NCR_700_Host_Parameters *hostdata;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d6f340f48a3b..0a90abc7f140 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -44,6 +44,7 @@
44#include <linux/init.h> 44#include <linux/init.h>
45#include <linux/blkdev.h> 45#include <linux/blkdev.h>
46#include <linux/mutex.h> 46#include <linux/mutex.h>
47#include <linux/slab.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48 49
49#include <scsi/scsi.h> 50#include <scsi/scsi.h>
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 291236e6e435..cbb38c5197fa 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -7,6 +7,7 @@
7#include <linux/blkpg.h> 7#include <linux/blkpg.h>
8#include <linux/cdrom.h> 8#include <linux/cdrom.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/slab.h>
10#include <asm/io.h> 11#include <asm/io.h>
11#include <asm/uaccess.h> 12#include <asm/uaccess.h>
12 13
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 4ad3e017213f..92cc2efb25d7 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -39,6 +39,7 @@
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/bcd.h> 40#include <linux/bcd.h>
41#include <linux/blkdev.h> 41#include <linux/blkdev.h>
42#include <linux/slab.h>
42 43
43#include <scsi/scsi.h> 44#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h> 45#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 12d58a7ed6bc..3ea1a713ef25 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -27,6 +27,7 @@ static const char *verstr = "20081215";
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/slab.h>
30#include <linux/errno.h> 31#include <linux/errno.h>
31#include <linux/mtio.h> 32#include <linux/mtio.h>
32#include <linux/cdrom.h> 33#include <linux/cdrom.h>
@@ -552,13 +553,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
552 SRpnt->waiting = waiting; 553 SRpnt->waiting = waiting;
553 554
554 if (STp->buffer->do_dio) { 555 if (STp->buffer->do_dio) {
556 mdata->page_order = 0;
555 mdata->nr_entries = STp->buffer->sg_segs; 557 mdata->nr_entries = STp->buffer->sg_segs;
556 mdata->pages = STp->buffer->mapped_pages; 558 mdata->pages = STp->buffer->mapped_pages;
557 } else { 559 } else {
560 mdata->page_order = STp->buffer->reserved_page_order;
558 mdata->nr_entries = 561 mdata->nr_entries =
559 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order); 562 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
560 STp->buffer->map_data.pages = STp->buffer->reserved_pages; 563 mdata->pages = STp->buffer->reserved_pages;
561 STp->buffer->map_data.offset = 0; 564 mdata->offset = 0;
562 } 565 }
563 566
564 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); 567 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
@@ -2280,7 +2283,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
2280 } else if (code == MT_ST_SET_CLN) { 2283 } else if (code == MT_ST_SET_CLN) {
2281 value = (options & ~MT_ST_OPTIONS) & 0xff; 2284 value = (options & ~MT_ST_OPTIONS) & 0xff;
2282 if (value != 0 && 2285 if (value != 0 &&
2283 value < EXTENDED_SENSE_START && value >= SCSI_SENSE_BUFFERSIZE) 2286 (value < EXTENDED_SENSE_START ||
2287 value >= SCSI_SENSE_BUFFERSIZE))
2284 return (-EINVAL); 2288 return (-EINVAL);
2285 STp->cln_mode = value; 2289 STp->cln_mode = value;
2286 STp->cln_sense_mask = (options >> 8) & 0xff; 2290 STp->cln_sense_mask = (options >> 8) & 0xff;
@@ -3718,7 +3722,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3718 priority |= __GFP_ZERO; 3722 priority |= __GFP_ZERO;
3719 3723
3720 if (STbuffer->frp_segs) { 3724 if (STbuffer->frp_segs) {
3721 order = STbuffer->map_data.page_order; 3725 order = STbuffer->reserved_page_order;
3722 b_size = PAGE_SIZE << order; 3726 b_size = PAGE_SIZE << order;
3723 } else { 3727 } else {
3724 for (b_size = PAGE_SIZE, order = 0; 3728 for (b_size = PAGE_SIZE, order = 0;
@@ -3751,7 +3755,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3751 segs++; 3755 segs++;
3752 } 3756 }
3753 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); 3757 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
3754 STbuffer->map_data.page_order = order; 3758 STbuffer->reserved_page_order = order;
3755 3759
3756 return 1; 3760 return 1;
3757} 3761}
@@ -3764,7 +3768,7 @@ static void clear_buffer(struct st_buffer * st_bp)
3764 3768
3765 for (i=0; i < st_bp->frp_segs; i++) 3769 for (i=0; i < st_bp->frp_segs; i++)
3766 memset(page_address(st_bp->reserved_pages[i]), 0, 3770 memset(page_address(st_bp->reserved_pages[i]), 0,
3767 PAGE_SIZE << st_bp->map_data.page_order); 3771 PAGE_SIZE << st_bp->reserved_page_order);
3768 st_bp->cleared = 1; 3772 st_bp->cleared = 1;
3769} 3773}
3770 3774
@@ -3772,7 +3776,7 @@ static void clear_buffer(struct st_buffer * st_bp)
3772/* Release the extra buffer */ 3776/* Release the extra buffer */
3773static void normalize_buffer(struct st_buffer * STbuffer) 3777static void normalize_buffer(struct st_buffer * STbuffer)
3774{ 3778{
3775 int i, order = STbuffer->map_data.page_order; 3779 int i, order = STbuffer->reserved_page_order;
3776 3780
3777 for (i = 0; i < STbuffer->frp_segs; i++) { 3781 for (i = 0; i < STbuffer->frp_segs; i++) {
3778 __free_pages(STbuffer->reserved_pages[i], order); 3782 __free_pages(STbuffer->reserved_pages[i], order);
@@ -3780,7 +3784,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3780 } 3784 }
3781 STbuffer->frp_segs = 0; 3785 STbuffer->frp_segs = 0;
3782 STbuffer->sg_segs = 0; 3786 STbuffer->sg_segs = 0;
3783 STbuffer->map_data.page_order = 0; 3787 STbuffer->reserved_page_order = 0;
3784 STbuffer->map_data.offset = 0; 3788 STbuffer->map_data.offset = 0;
3785} 3789}
3786 3790
@@ -3790,7 +3794,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3790static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) 3794static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
3791{ 3795{
3792 int i, cnt, res, offset; 3796 int i, cnt, res, offset;
3793 int length = PAGE_SIZE << st_bp->map_data.page_order; 3797 int length = PAGE_SIZE << st_bp->reserved_page_order;
3794 3798
3795 for (i = 0, offset = st_bp->buffer_bytes; 3799 for (i = 0, offset = st_bp->buffer_bytes;
3796 i < st_bp->frp_segs && offset >= length; i++) 3800 i < st_bp->frp_segs && offset >= length; i++)
@@ -3822,7 +3826,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
3822static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) 3826static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
3823{ 3827{
3824 int i, cnt, res, offset; 3828 int i, cnt, res, offset;
3825 int length = PAGE_SIZE << st_bp->map_data.page_order; 3829 int length = PAGE_SIZE << st_bp->reserved_page_order;
3826 3830
3827 for (i = 0, offset = st_bp->read_pointer; 3831 for (i = 0, offset = st_bp->read_pointer;
3828 i < st_bp->frp_segs && offset >= length; i++) 3832 i < st_bp->frp_segs && offset >= length; i++)
@@ -3855,7 +3859,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3855{ 3859{
3856 int src_seg, dst_seg, src_offset = 0, dst_offset; 3860 int src_seg, dst_seg, src_offset = 0, dst_offset;
3857 int count, total; 3861 int count, total;
3858 int length = PAGE_SIZE << st_bp->map_data.page_order; 3862 int length = PAGE_SIZE << st_bp->reserved_page_order;
3859 3863
3860 if (offset == 0) 3864 if (offset == 0)
3861 return; 3865 return;
@@ -3980,8 +3984,7 @@ static int st_probe(struct device *dev)
3980 return -ENODEV; 3984 return -ENODEV;
3981 } 3985 }
3982 3986
3983 i = min(queue_max_hw_segments(SDp->request_queue), 3987 i = queue_max_segments(SDp->request_queue);
3984 queue_max_phys_segments(SDp->request_queue));
3985 if (st_max_sg_segs < i) 3988 if (st_max_sg_segs < i)
3986 i = st_max_sg_segs; 3989 i = st_max_sg_segs;
3987 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
@@ -4577,7 +4580,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4577 } 4580 }
4578 4581
4579 mdata->offset = uaddr & ~PAGE_MASK; 4582 mdata->offset = uaddr & ~PAGE_MASK;
4580 mdata->page_order = 0;
4581 STbp->mapped_pages = pages; 4583 STbp->mapped_pages = pages;
4582 4584
4583 return nr_pages; 4585 return nr_pages;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 544dc6b1f548..f91a67c6d968 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -46,6 +46,7 @@ struct st_buffer {
46 struct st_request *last_SRpnt; 46 struct st_request *last_SRpnt;
47 struct st_cmdstatus cmdstat; 47 struct st_cmdstatus cmdstat;
48 struct page **reserved_pages; 48 struct page **reserved_pages;
49 int reserved_page_order;
49 struct page **mapped_pages; 50 struct page **mapped_pages;
50 struct rq_map_data map_data; 51 struct rq_map_data map_data;
51 unsigned char *b_data; 52 unsigned char *b_data;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 09fa8861fc58..9c73dbda3bbb 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/slab.h>
20#include <linux/time.h> 21#include <linux/time.h>
21#include <linux/pci.h> 22#include <linux/pci.h>
22#include <linux/blkdev.h> 23#include <linux/blkdev.h>
@@ -36,11 +37,11 @@
36#include <scsi/scsi_eh.h> 37#include <scsi/scsi_eh.h>
37 38
38#define DRV_NAME "stex" 39#define DRV_NAME "stex"
39#define ST_DRIVER_VERSION "4.6.0000.3" 40#define ST_DRIVER_VERSION "4.6.0000.4"
40#define ST_VER_MAJOR 4 41#define ST_VER_MAJOR 4
41#define ST_VER_MINOR 6 42#define ST_VER_MINOR 6
42#define ST_OEM 0 43#define ST_OEM 0
43#define ST_BUILD_VER 3 44#define ST_BUILD_VER 4
44 45
45enum { 46enum {
46 /* MU register offset */ 47 /* MU register offset */
@@ -64,24 +65,24 @@ enum {
64 YH2I_REQ_HI = 0xc4, 65 YH2I_REQ_HI = 0xc4,
65 66
66 /* MU register value */ 67 /* MU register value */
67 MU_INBOUND_DOORBELL_HANDSHAKE = 1, 68 MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0),
68 MU_INBOUND_DOORBELL_REQHEADCHANGED = 2, 69 MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1),
69 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4, 70 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2),
70 MU_INBOUND_DOORBELL_HMUSTOPPED = 8, 71 MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3),
71 MU_INBOUND_DOORBELL_RESET = 16, 72 MU_INBOUND_DOORBELL_RESET = (1 << 4),
72 73
73 MU_OUTBOUND_DOORBELL_HANDSHAKE = 1, 74 MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0),
74 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2, 75 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1),
75 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4, 76 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2),
76 MU_OUTBOUND_DOORBELL_BUSCHANGE = 8, 77 MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3),
77 MU_OUTBOUND_DOORBELL_HASEVENT = 16, 78 MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4),
79 MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27),
78 80
79 /* MU status code */ 81 /* MU status code */
80 MU_STATE_STARTING = 1, 82 MU_STATE_STARTING = 1,
81 MU_STATE_FMU_READY_FOR_HANDSHAKE = 2, 83 MU_STATE_STARTED = 2,
82 MU_STATE_SEND_HANDSHAKE_FRAME = 3, 84 MU_STATE_RESETTING = 3,
83 MU_STATE_STARTED = 4, 85 MU_STATE_FAILED = 4,
84 MU_STATE_RESETTING = 5,
85 86
86 MU_MAX_DELAY = 120, 87 MU_MAX_DELAY = 120,
87 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, 88 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
@@ -111,6 +112,8 @@ enum {
111 112
112 SS_H2I_INT_RESET = 0x100, 113 SS_H2I_INT_RESET = 0x100,
113 114
115 SS_I2H_REQUEST_RESET = 0x2000,
116
114 SS_MU_OPERATIONAL = 0x80000000, 117 SS_MU_OPERATIONAL = 0x80000000,
115 118
116 STEX_CDB_LENGTH = 16, 119 STEX_CDB_LENGTH = 16,
@@ -160,6 +163,7 @@ enum {
160 INQUIRY_EVPD = 0x01, 163 INQUIRY_EVPD = 0x01,
161 164
162 ST_ADDITIONAL_MEM = 0x200000, 165 ST_ADDITIONAL_MEM = 0x200000,
166 ST_ADDITIONAL_MEM_MIN = 0x80000,
163}; 167};
164 168
165struct st_sgitem { 169struct st_sgitem {
@@ -311,6 +315,10 @@ struct st_hba {
311 struct st_ccb *wait_ccb; 315 struct st_ccb *wait_ccb;
312 __le32 *scratch; 316 __le32 *scratch;
313 317
318 char work_q_name[20];
319 struct workqueue_struct *work_q;
320 struct work_struct reset_work;
321 wait_queue_head_t reset_waitq;
314 unsigned int mu_status; 322 unsigned int mu_status;
315 unsigned int cardtype; 323 unsigned int cardtype;
316 int msi_enabled; 324 int msi_enabled;
@@ -577,6 +585,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
577 lun = cmd->device->lun; 585 lun = cmd->device->lun;
578 hba = (struct st_hba *) &host->hostdata[0]; 586 hba = (struct st_hba *) &host->hostdata[0];
579 587
588 if (unlikely(hba->mu_status == MU_STATE_RESETTING))
589 return SCSI_MLQUEUE_HOST_BUSY;
590
580 switch (cmd->cmnd[0]) { 591 switch (cmd->cmnd[0]) {
581 case MODE_SENSE_10: 592 case MODE_SENSE_10:
582 { 593 {
@@ -613,6 +624,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
613 } 624 }
614 break; 625 break;
615 case INQUIRY: 626 case INQUIRY:
627 if (lun >= host->max_lun) {
628 cmd->result = DID_NO_CONNECT << 16;
629 done(cmd);
630 return 0;
631 }
616 if (id != host->max_id - 1) 632 if (id != host->max_id - 1)
617 break; 633 break;
618 if (!lun && !cmd->device->channel && 634 if (!lun && !cmd->device->channel &&
@@ -841,7 +857,6 @@ static irqreturn_t stex_intr(int irq, void *__hba)
841 void __iomem *base = hba->mmio_base; 857 void __iomem *base = hba->mmio_base;
842 u32 data; 858 u32 data;
843 unsigned long flags; 859 unsigned long flags;
844 int handled = 0;
845 860
846 spin_lock_irqsave(hba->host->host_lock, flags); 861 spin_lock_irqsave(hba->host->host_lock, flags);
847 862
@@ -852,12 +867,16 @@ static irqreturn_t stex_intr(int irq, void *__hba)
852 writel(data, base + ODBL); 867 writel(data, base + ODBL);
853 readl(base + ODBL); /* flush */ 868 readl(base + ODBL); /* flush */
854 stex_mu_intr(hba, data); 869 stex_mu_intr(hba, data);
855 handled = 1; 870 spin_unlock_irqrestore(hba->host->host_lock, flags);
871 if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
872 hba->cardtype == st_shasta))
873 queue_work(hba->work_q, &hba->reset_work);
874 return IRQ_HANDLED;
856 } 875 }
857 876
858 spin_unlock_irqrestore(hba->host->host_lock, flags); 877 spin_unlock_irqrestore(hba->host->host_lock, flags);
859 878
860 return IRQ_RETVAL(handled); 879 return IRQ_NONE;
861} 880}
862 881
863static void stex_ss_mu_intr(struct st_hba *hba) 882static void stex_ss_mu_intr(struct st_hba *hba)
@@ -939,7 +958,6 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba)
939 void __iomem *base = hba->mmio_base; 958 void __iomem *base = hba->mmio_base;
940 u32 data; 959 u32 data;
941 unsigned long flags; 960 unsigned long flags;
942 int handled = 0;
943 961
944 spin_lock_irqsave(hba->host->host_lock, flags); 962 spin_lock_irqsave(hba->host->host_lock, flags);
945 963
@@ -948,12 +966,15 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba)
948 /* clear the interrupt */ 966 /* clear the interrupt */
949 writel(data, base + YI2H_INT_C); 967 writel(data, base + YI2H_INT_C);
950 stex_ss_mu_intr(hba); 968 stex_ss_mu_intr(hba);
951 handled = 1; 969 spin_unlock_irqrestore(hba->host->host_lock, flags);
970 if (unlikely(data & SS_I2H_REQUEST_RESET))
971 queue_work(hba->work_q, &hba->reset_work);
972 return IRQ_HANDLED;
952 } 973 }
953 974
954 spin_unlock_irqrestore(hba->host->host_lock, flags); 975 spin_unlock_irqrestore(hba->host->host_lock, flags);
955 976
956 return IRQ_RETVAL(handled); 977 return IRQ_NONE;
957} 978}
958 979
959static int stex_common_handshake(struct st_hba *hba) 980static int stex_common_handshake(struct st_hba *hba)
@@ -1001,7 +1022,7 @@ static int stex_common_handshake(struct st_hba *hba)
1001 h->partner_type = HMU_PARTNER_TYPE; 1022 h->partner_type = HMU_PARTNER_TYPE;
1002 if (hba->extra_offset) { 1023 if (hba->extra_offset) {
1003 h->extra_offset = cpu_to_le32(hba->extra_offset); 1024 h->extra_offset = cpu_to_le32(hba->extra_offset);
1004 h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM); 1025 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1005 } else 1026 } else
1006 h->extra_offset = h->extra_size = 0; 1027 h->extra_offset = h->extra_size = 0;
1007 1028
@@ -1046,7 +1067,7 @@ static int stex_ss_handshake(struct st_hba *hba)
1046 struct st_msg_header *msg_h; 1067 struct st_msg_header *msg_h;
1047 struct handshake_frame *h; 1068 struct handshake_frame *h;
1048 __le32 *scratch; 1069 __le32 *scratch;
1049 u32 data; 1070 u32 data, scratch_size;
1050 unsigned long before; 1071 unsigned long before;
1051 int ret = 0; 1072 int ret = 0;
1052 1073
@@ -1074,13 +1095,16 @@ static int stex_ss_handshake(struct st_hba *hba)
1074 stex_gettime(&h->hosttime); 1095 stex_gettime(&h->hosttime);
1075 h->partner_type = HMU_PARTNER_TYPE; 1096 h->partner_type = HMU_PARTNER_TYPE;
1076 h->extra_offset = h->extra_size = 0; 1097 h->extra_offset = h->extra_size = 0;
1077 h->scratch_size = cpu_to_le32((hba->sts_count+1)*sizeof(u32)); 1098 scratch_size = (hba->sts_count+1)*sizeof(u32);
1099 h->scratch_size = cpu_to_le32(scratch_size);
1078 1100
1079 data = readl(base + YINT_EN); 1101 data = readl(base + YINT_EN);
1080 data &= ~4; 1102 data &= ~4;
1081 writel(data, base + YINT_EN); 1103 writel(data, base + YINT_EN);
1082 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); 1104 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1105 readl(base + YH2I_REQ_HI);
1083 writel(hba->dma_handle, base + YH2I_REQ); 1106 writel(hba->dma_handle, base + YH2I_REQ);
1107 readl(base + YH2I_REQ); /* flush */
1084 1108
1085 scratch = hba->scratch; 1109 scratch = hba->scratch;
1086 before = jiffies; 1110 before = jiffies;
@@ -1096,7 +1120,7 @@ static int stex_ss_handshake(struct st_hba *hba)
1096 msleep(1); 1120 msleep(1);
1097 } 1121 }
1098 1122
1099 *scratch = 0; 1123 memset(scratch, 0, scratch_size);
1100 msg_h->flag = 0; 1124 msg_h->flag = 0;
1101 return ret; 1125 return ret;
1102} 1126}
@@ -1105,19 +1129,24 @@ static int stex_handshake(struct st_hba *hba)
1105{ 1129{
1106 int err; 1130 int err;
1107 unsigned long flags; 1131 unsigned long flags;
1132 unsigned int mu_status;
1108 1133
1109 err = (hba->cardtype == st_yel) ? 1134 err = (hba->cardtype == st_yel) ?
1110 stex_ss_handshake(hba) : stex_common_handshake(hba); 1135 stex_ss_handshake(hba) : stex_common_handshake(hba);
1136 spin_lock_irqsave(hba->host->host_lock, flags);
1137 mu_status = hba->mu_status;
1111 if (err == 0) { 1138 if (err == 0) {
1112 spin_lock_irqsave(hba->host->host_lock, flags);
1113 hba->req_head = 0; 1139 hba->req_head = 0;
1114 hba->req_tail = 0; 1140 hba->req_tail = 0;
1115 hba->status_head = 0; 1141 hba->status_head = 0;
1116 hba->status_tail = 0; 1142 hba->status_tail = 0;
1117 hba->out_req_cnt = 0; 1143 hba->out_req_cnt = 0;
1118 hba->mu_status = MU_STATE_STARTED; 1144 hba->mu_status = MU_STATE_STARTED;
1119 spin_unlock_irqrestore(hba->host->host_lock, flags); 1145 } else
1120 } 1146 hba->mu_status = MU_STATE_FAILED;
1147 if (mu_status == MU_STATE_RESETTING)
1148 wake_up_all(&hba->reset_waitq);
1149 spin_unlock_irqrestore(hba->host->host_lock, flags);
1121 return err; 1150 return err;
1122} 1151}
1123 1152
@@ -1137,17 +1166,11 @@ static int stex_abort(struct scsi_cmnd *cmd)
1137 1166
1138 base = hba->mmio_base; 1167 base = hba->mmio_base;
1139 spin_lock_irqsave(host->host_lock, flags); 1168 spin_lock_irqsave(host->host_lock, flags);
1140 if (tag < host->can_queue && hba->ccb[tag].cmd == cmd) 1169 if (tag < host->can_queue &&
1170 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1141 hba->wait_ccb = &hba->ccb[tag]; 1171 hba->wait_ccb = &hba->ccb[tag];
1142 else { 1172 else
1143 for (tag = 0; tag < host->can_queue; tag++) 1173 goto out;
1144 if (hba->ccb[tag].cmd == cmd) {
1145 hba->wait_ccb = &hba->ccb[tag];
1146 break;
1147 }
1148 if (tag >= host->can_queue)
1149 goto out;
1150 }
1151 1174
1152 if (hba->cardtype == st_yel) { 1175 if (hba->cardtype == st_yel) {
1153 data = readl(base + YI2H_INT); 1176 data = readl(base + YI2H_INT);
@@ -1221,6 +1244,37 @@ static void stex_hard_reset(struct st_hba *hba)
1221 hba->pdev->saved_config_space[i]); 1244 hba->pdev->saved_config_space[i]);
1222} 1245}
1223 1246
1247static int stex_yos_reset(struct st_hba *hba)
1248{
1249 void __iomem *base;
1250 unsigned long flags, before;
1251 int ret = 0;
1252
1253 base = hba->mmio_base;
1254 writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
1255 readl(base + IDBL); /* flush */
1256 before = jiffies;
1257 while (hba->out_req_cnt > 0) {
1258 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1259 printk(KERN_WARNING DRV_NAME
1260 "(%s): reset timeout\n", pci_name(hba->pdev));
1261 ret = -1;
1262 break;
1263 }
1264 msleep(1);
1265 }
1266
1267 spin_lock_irqsave(hba->host->host_lock, flags);
1268 if (ret == -1)
1269 hba->mu_status = MU_STATE_FAILED;
1270 else
1271 hba->mu_status = MU_STATE_STARTED;
1272 wake_up_all(&hba->reset_waitq);
1273 spin_unlock_irqrestore(hba->host->host_lock, flags);
1274
1275 return ret;
1276}
1277
1224static void stex_ss_reset(struct st_hba *hba) 1278static void stex_ss_reset(struct st_hba *hba)
1225{ 1279{
1226 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); 1280 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
@@ -1228,66 +1282,86 @@ static void stex_ss_reset(struct st_hba *hba)
1228 ssleep(5); 1282 ssleep(5);
1229} 1283}
1230 1284
1231static int stex_reset(struct scsi_cmnd *cmd) 1285static int stex_do_reset(struct st_hba *hba)
1232{ 1286{
1233 struct st_hba *hba; 1287 struct st_ccb *ccb;
1234 void __iomem *base; 1288 unsigned long flags;
1235 unsigned long flags, before; 1289 unsigned int mu_status = MU_STATE_RESETTING;
1290 u16 tag;
1236 1291
1237 hba = (struct st_hba *) &cmd->device->host->hostdata[0]; 1292 spin_lock_irqsave(hba->host->host_lock, flags);
1293 if (hba->mu_status == MU_STATE_STARTING) {
1294 spin_unlock_irqrestore(hba->host->host_lock, flags);
1295 printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
1296 pci_name(hba->pdev));
1297 return 0;
1298 }
1299 while (hba->mu_status == MU_STATE_RESETTING) {
1300 spin_unlock_irqrestore(hba->host->host_lock, flags);
1301 wait_event_timeout(hba->reset_waitq,
1302 hba->mu_status != MU_STATE_RESETTING,
1303 MU_MAX_DELAY * HZ);
1304 spin_lock_irqsave(hba->host->host_lock, flags);
1305 mu_status = hba->mu_status;
1306 }
1238 1307
1239 printk(KERN_INFO DRV_NAME 1308 if (mu_status != MU_STATE_RESETTING) {
1240 "(%s): resetting host\n", pci_name(hba->pdev)); 1309 spin_unlock_irqrestore(hba->host->host_lock, flags);
1241 scsi_print_command(cmd); 1310 return (mu_status == MU_STATE_STARTED) ? 0 : -1;
1311 }
1242 1312
1243 hba->mu_status = MU_STATE_RESETTING; 1313 hba->mu_status = MU_STATE_RESETTING;
1314 spin_unlock_irqrestore(hba->host->host_lock, flags);
1315
1316 if (hba->cardtype == st_yosemite)
1317 return stex_yos_reset(hba);
1244 1318
1245 if (hba->cardtype == st_shasta) 1319 if (hba->cardtype == st_shasta)
1246 stex_hard_reset(hba); 1320 stex_hard_reset(hba);
1247 else if (hba->cardtype == st_yel) 1321 else if (hba->cardtype == st_yel)
1248 stex_ss_reset(hba); 1322 stex_ss_reset(hba);
1249 1323
1250 if (hba->cardtype != st_yosemite) { 1324 spin_lock_irqsave(hba->host->host_lock, flags);
1251 if (stex_handshake(hba)) { 1325 for (tag = 0; tag < hba->host->can_queue; tag++) {
1252 printk(KERN_WARNING DRV_NAME 1326 ccb = &hba->ccb[tag];
1253 "(%s): resetting: handshake failed\n", 1327 if (ccb->req == NULL)
1254 pci_name(hba->pdev)); 1328 continue;
1255 return FAILED; 1329 ccb->req = NULL;
1330 if (ccb->cmd) {
1331 scsi_dma_unmap(ccb->cmd);
1332 ccb->cmd->result = DID_RESET << 16;
1333 ccb->cmd->scsi_done(ccb->cmd);
1334 ccb->cmd = NULL;
1256 } 1335 }
1257 return SUCCESS;
1258 } 1336 }
1337 spin_unlock_irqrestore(hba->host->host_lock, flags);
1259 1338
1260 /* st_yosemite */ 1339 if (stex_handshake(hba) == 0)
1261 writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL); 1340 return 0;
1262 readl(hba->mmio_base + IDBL); /* flush */
1263 before = jiffies;
1264 while (hba->out_req_cnt > 0) {
1265 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1266 printk(KERN_WARNING DRV_NAME
1267 "(%s): reset timeout\n", pci_name(hba->pdev));
1268 return FAILED;
1269 }
1270 msleep(1);
1271 }
1272 1341
1273 base = hba->mmio_base; 1342 printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
1274 writel(0, base + IMR0); 1343 pci_name(hba->pdev));
1275 readl(base + IMR0); 1344 return -1;
1276 writel(0, base + OMR0); 1345}
1277 readl(base + OMR0); 1346
1278 writel(0, base + IMR1); 1347static int stex_reset(struct scsi_cmnd *cmd)
1279 readl(base + IMR1); 1348{
1280 writel(0, base + OMR1); 1349 struct st_hba *hba;
1281 readl(base + OMR1); /* flush */ 1350
1282 spin_lock_irqsave(hba->host->host_lock, flags); 1351 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1283 hba->req_head = 0; 1352
1284 hba->req_tail = 0; 1353 printk(KERN_INFO DRV_NAME
1285 hba->status_head = 0; 1354 "(%s): resetting host\n", pci_name(hba->pdev));
1286 hba->status_tail = 0; 1355 scsi_print_command(cmd);
1287 hba->out_req_cnt = 0; 1356
1288 hba->mu_status = MU_STATE_STARTED; 1357 return stex_do_reset(hba) ? FAILED : SUCCESS;
1289 spin_unlock_irqrestore(hba->host->host_lock, flags); 1358}
1290 return SUCCESS; 1359
1360static void stex_reset_work(struct work_struct *work)
1361{
1362 struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1363
1364 stex_do_reset(hba);
1291} 1365}
1292 1366
1293static int stex_biosparam(struct scsi_device *sdev, 1367static int stex_biosparam(struct scsi_device *sdev,
@@ -1420,8 +1494,8 @@ static int stex_set_dma_mask(struct pci_dev * pdev)
1420{ 1494{
1421 int ret; 1495 int ret;
1422 1496
1423 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1497 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1424 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 1498 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1425 return 0; 1499 return 0;
1426 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1500 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1427 if (!ret) 1501 if (!ret)
@@ -1528,10 +1602,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1528 hba->dma_mem = dma_alloc_coherent(&pdev->dev, 1602 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1529 hba->dma_size, &hba->dma_handle, GFP_KERNEL); 1603 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1530 if (!hba->dma_mem) { 1604 if (!hba->dma_mem) {
1531 err = -ENOMEM; 1605 /* Retry minimum coherent mapping for st_seq and st_vsc */
1532 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", 1606 if (hba->cardtype == st_seq ||
1533 pci_name(pdev)); 1607 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1534 goto out_iounmap; 1608 printk(KERN_WARNING DRV_NAME
1609 "(%s): allocating min buffer for controller\n",
1610 pci_name(pdev));
1611 hba->dma_size = hba->extra_offset
1612 + ST_ADDITIONAL_MEM_MIN;
1613 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1614 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1615 }
1616
1617 if (!hba->dma_mem) {
1618 err = -ENOMEM;
1619 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1620 pci_name(pdev));
1621 goto out_iounmap;
1622 }
1535 } 1623 }
1536 1624
1537 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); 1625 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
@@ -1568,12 +1656,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1568 1656
1569 hba->host = host; 1657 hba->host = host;
1570 hba->pdev = pdev; 1658 hba->pdev = pdev;
1659 init_waitqueue_head(&hba->reset_waitq);
1660
1661 snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1662 "stex_wq_%d", host->host_no);
1663 hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1664 if (!hba->work_q) {
1665 printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
1666 pci_name(pdev));
1667 err = -ENOMEM;
1668 goto out_ccb_free;
1669 }
1670 INIT_WORK(&hba->reset_work, stex_reset_work);
1571 1671
1572 err = stex_request_irq(hba); 1672 err = stex_request_irq(hba);
1573 if (err) { 1673 if (err) {
1574 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", 1674 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1575 pci_name(pdev)); 1675 pci_name(pdev));
1576 goto out_ccb_free; 1676 goto out_free_wq;
1577 } 1677 }
1578 1678
1579 err = stex_handshake(hba); 1679 err = stex_handshake(hba);
@@ -1602,6 +1702,8 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1602 1702
1603out_free_irq: 1703out_free_irq:
1604 stex_free_irq(hba); 1704 stex_free_irq(hba);
1705out_free_wq:
1706 destroy_workqueue(hba->work_q);
1605out_ccb_free: 1707out_ccb_free:
1606 kfree(hba->ccb); 1708 kfree(hba->ccb);
1607out_pci_free: 1709out_pci_free:
@@ -1669,6 +1771,8 @@ static void stex_hba_free(struct st_hba *hba)
1669{ 1771{
1670 stex_free_irq(hba); 1772 stex_free_irq(hba);
1671 1773
1774 destroy_workqueue(hba->work_q);
1775
1672 iounmap(hba->mmio_base); 1776 iounmap(hba->mmio_base);
1673 1777
1674 pci_release_regions(hba->pdev); 1778 pci_release_regions(hba->pdev);
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 75da6e58ce55..b5838d547c68 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -645,6 +645,7 @@ __inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { };
645 * interrupt or bottom half. 645 * interrupt or bottom half.
646 */ 646 */
647 647
648#include <linux/gfp.h>
648#include <linux/workqueue.h> 649#include <linux/workqueue.h>
649#include <linux/interrupt.h> 650#include <linux/interrupt.h>
650 651
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 34a99620e5bd..0621037f0271 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/gfp.h>
7#include <linux/types.h> 8#include <linux/types.h>
8#include <linux/delay.h> 9#include <linux/delay.h>
9#include <linux/module.h> 10#include <linux/module.h>
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 3d73aad4bc82..fc23d273fb1a 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -12,6 +12,7 @@
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/of_device.h> 14#include <linux/of_device.h>
15#include <linux/gfp.h>
15 16
16#include <asm/irq.h> 17#include <asm/irq.h>
17#include <asm/io.h> 18#include <asm/io.h>
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 45374d66d26a..8b955b534a36 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -984,7 +984,7 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
984 } 984 }
985} 985}
986 986
987static int skip_spaces(char *ptr, int len) 987static int sym_skip_spaces(char *ptr, int len)
988{ 988{
989 int cnt, c; 989 int cnt, c;
990 990
@@ -1012,7 +1012,7 @@ static int is_keyword(char *ptr, int len, char *verb)
1012} 1012}
1013 1013
1014#define SKIP_SPACES(ptr, len) \ 1014#define SKIP_SPACES(ptr, len) \
1015 if ((arg_len = skip_spaces(ptr, len)) < 1) \ 1015 if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \
1016 return -EINVAL; \ 1016 return -EINVAL; \
1017 ptr += arg_len; len -= arg_len; 1017 ptr += arg_len; len -= arg_len;
1018 1018
@@ -1864,7 +1864,7 @@ static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev)
1864 * 1864 *
1865 * This routine is similar to sym_set_workarounds(), except 1865 * This routine is similar to sym_set_workarounds(), except
1866 * that, at this point, we already know that the device was 1866 * that, at this point, we already know that the device was
1867 * succesfully intialized at least once before, and so most 1867 * successfully intialized at least once before, and so most
1868 * of the steps taken there are un-needed here. 1868 * of the steps taken there are un-needed here.
1869 */ 1869 */
1870static void sym2_reset_workarounds(struct pci_dev *pdev) 1870static void sym2_reset_workarounds(struct pci_dev *pdev)
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 297deb817a5d..a7bc8b7b09ac 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -2692,7 +2692,7 @@ static void sym_int_ma (struct sym_hcb *np)
2692 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids 2692 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
2693 * bloat for such a should_not_happen situation). 2693 * bloat for such a should_not_happen situation).
2694 * In all other situation, we reset the BUS. 2694 * In all other situation, we reset the BUS.
2695 * Are these assumptions reasonnable ? (Wait and see ...) 2695 * Are these assumptions reasonable ? (Wait and see ...)
2696 */ 2696 */
2697unexpected_phase: 2697unexpected_phase:
2698 dsp -= 8; 2698 dsp -= 8;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 053e63c86822..5a80cbac3f92 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -54,7 +54,7 @@
54 * 54 *
55 * SYM_OPT_LIMIT_COMMAND_REORDERING 55 * SYM_OPT_LIMIT_COMMAND_REORDERING
56 * When this option is set, the driver tries to limit tagged 56 * When this option is set, the driver tries to limit tagged
57 * command reordering to some reasonnable value. 57 * command reordering to some reasonable value.
58 * (set for Linux) 58 * (set for Linux)
59 */ 59 */
60#if 0 60#if 0
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 9a4273445c0d..27866b0adfeb 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -233,6 +233,7 @@
233#include <linux/interrupt.h> 233#include <linux/interrupt.h>
234#include <linux/init.h> 234#include <linux/init.h>
235#include <linux/spinlock.h> 235#include <linux/spinlock.h>
236#include <linux/slab.h>
236#include <asm/io.h> 237#include <asm/io.h>
237 238
238#include <scsi/scsi.h> 239#include <scsi/scsi.h>
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 54023d41fd15..5d9fdeeb2315 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -420,6 +420,7 @@
420#include <linux/init.h> 420#include <linux/init.h>
421#include <linux/ctype.h> 421#include <linux/ctype.h>
422#include <linux/spinlock.h> 422#include <linux/spinlock.h>
423#include <linux/slab.h>
423#include <asm/dma.h> 424#include <asm/dma.h>
424#include <asm/irq.h> 425#include <asm/irq.h>
425 426
@@ -1070,7 +1071,7 @@ static int option_setup(char *str) {
1070 char *cur = str; 1071 char *cur = str;
1071 int i = 1; 1072 int i = 1;
1072 1073
1073 while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) { 1074 while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
1074 ints[i++] = simple_strtoul(cur, NULL, 0); 1075 ints[i++] = simple_strtoul(cur, NULL, 0);
1075 1076
1076 if ((cur = strchr(cur, ',')) != NULL) cur++; 1077 if ((cur = strchr(cur, ',')) != NULL) cur++;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
new file mode 100644
index 000000000000..26894459c37f
--- /dev/null
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -0,0 +1,1409 @@
1/*
2 * Linux driver for VMware's para-virtualized SCSI HBA.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Maintained by: Alok N Kataria <akataria@vmware.com>
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/interrupt.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29#include <linux/pci.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_device.h>
35
36#include "vmw_pvscsi.h"
37
38#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
39
40MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
41MODULE_AUTHOR("VMware, Inc.");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
44
45#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
46#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
47#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
48#define SGL_SIZE PAGE_SIZE
49
50struct pvscsi_sg_list {
51 struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
52};
53
54struct pvscsi_ctx {
55 /*
56 * The index of the context in cmd_map serves as the context ID for a
57 * 1-to-1 mapping completions back to requests.
58 */
59 struct scsi_cmnd *cmd;
60 struct pvscsi_sg_list *sgl;
61 struct list_head list;
62 dma_addr_t dataPA;
63 dma_addr_t sensePA;
64 dma_addr_t sglPA;
65};
66
67struct pvscsi_adapter {
68 char *mmioBase;
69 unsigned int irq;
70 u8 rev;
71 bool use_msi;
72 bool use_msix;
73 bool use_msg;
74
75 spinlock_t hw_lock;
76
77 struct workqueue_struct *workqueue;
78 struct work_struct work;
79
80 struct PVSCSIRingReqDesc *req_ring;
81 unsigned req_pages;
82 unsigned req_depth;
83 dma_addr_t reqRingPA;
84
85 struct PVSCSIRingCmpDesc *cmp_ring;
86 unsigned cmp_pages;
87 dma_addr_t cmpRingPA;
88
89 struct PVSCSIRingMsgDesc *msg_ring;
90 unsigned msg_pages;
91 dma_addr_t msgRingPA;
92
93 struct PVSCSIRingsState *rings_state;
94 dma_addr_t ringStatePA;
95
96 struct pci_dev *dev;
97 struct Scsi_Host *host;
98
99 struct list_head cmd_pool;
100 struct pvscsi_ctx *cmd_map;
101};
102
103
104/* Command line parameters */
105static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
106static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
107static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
108static bool pvscsi_disable_msi;
109static bool pvscsi_disable_msix;
110static bool pvscsi_use_msg = true;
111
112#define PVSCSI_RW (S_IRUSR | S_IWUSR)
113
114module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
115MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
116 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
117
118module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
119MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
120 __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
121
122module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
123MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
124 __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
125
126module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
127MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
128
129module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
130MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
131
132module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
133MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
134
135static const struct pci_device_id pvscsi_pci_tbl[] = {
136 { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
137 { 0 }
138};
139
140MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
141
142static struct device *
143pvscsi_dev(const struct pvscsi_adapter *adapter)
144{
145 return &(adapter->dev->dev);
146}
147
148static struct pvscsi_ctx *
149pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
150{
151 struct pvscsi_ctx *ctx, *end;
152
153 end = &adapter->cmd_map[adapter->req_depth];
154 for (ctx = adapter->cmd_map; ctx < end; ctx++)
155 if (ctx->cmd == cmd)
156 return ctx;
157
158 return NULL;
159}
160
161static struct pvscsi_ctx *
162pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
163{
164 struct pvscsi_ctx *ctx;
165
166 if (list_empty(&adapter->cmd_pool))
167 return NULL;
168
169 ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
170 ctx->cmd = cmd;
171 list_del(&ctx->list);
172
173 return ctx;
174}
175
176static void pvscsi_release_context(struct pvscsi_adapter *adapter,
177 struct pvscsi_ctx *ctx)
178{
179 ctx->cmd = NULL;
180 list_add(&ctx->list, &adapter->cmd_pool);
181}
182
183/*
184 * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
185 * non-zero integer. ctx always points to an entry in cmd_map array, hence
186 * the return value is always >=1.
187 */
188static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
189 const struct pvscsi_ctx *ctx)
190{
191 return ctx - adapter->cmd_map + 1;
192}
193
194static struct pvscsi_ctx *
195pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
196{
197 return &adapter->cmd_map[context - 1];
198}
199
200static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
201 u32 offset, u32 val)
202{
203 writel(val, adapter->mmioBase + offset);
204}
205
206static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
207{
208 return readl(adapter->mmioBase + offset);
209}
210
211static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
212{
213 return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
214}
215
216static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
217 u32 val)
218{
219 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
220}
221
222static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
223{
224 u32 intr_bits;
225
226 intr_bits = PVSCSI_INTR_CMPL_MASK;
227 if (adapter->use_msg)
228 intr_bits |= PVSCSI_INTR_MSG_MASK;
229
230 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
231}
232
233static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
234{
235 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
236}
237
238static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
239 u32 cmd, const void *desc, size_t len)
240{
241 const u32 *ptr = desc;
242 size_t i;
243
244 len /= sizeof(*ptr);
245 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
246 for (i = 0; i < len; i++)
247 pvscsi_reg_write(adapter,
248 PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
249}
250
251static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
252 const struct pvscsi_ctx *ctx)
253{
254 struct PVSCSICmdDescAbortCmd cmd = { 0 };
255
256 cmd.target = ctx->cmd->device->id;
257 cmd.context = pvscsi_map_context(adapter, ctx);
258
259 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
260}
261
262static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
263{
264 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
265}
266
267static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
268{
269 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
270}
271
272static int scsi_is_rw(unsigned char op)
273{
274 return op == READ_6 || op == WRITE_6 ||
275 op == READ_10 || op == WRITE_10 ||
276 op == READ_12 || op == WRITE_12 ||
277 op == READ_16 || op == WRITE_16;
278}
279
280static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
281 unsigned char op)
282{
283 if (scsi_is_rw(op))
284 pvscsi_kick_rw_io(adapter);
285 else
286 pvscsi_process_request_ring(adapter);
287}
288
289static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
290{
291 dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
292
293 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
294}
295
296static void ll_bus_reset(const struct pvscsi_adapter *adapter)
297{
298 dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
299
300 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
301}
302
303static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
304{
305 struct PVSCSICmdDescResetDevice cmd = { 0 };
306
307 dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
308
309 cmd.target = target;
310
311 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
312 &cmd, sizeof(cmd));
313}
314
315static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
316 struct scatterlist *sg, unsigned count)
317{
318 unsigned i;
319 struct PVSCSISGElement *sge;
320
321 BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
322
323 sge = &ctx->sgl->sge[0];
324 for (i = 0; i < count; i++, sg++) {
325 sge[i].addr = sg_dma_address(sg);
326 sge[i].length = sg_dma_len(sg);
327 sge[i].flags = 0;
328 }
329}
330
331/*
332 * Map all data buffers for a command into PCI space and
333 * setup the scatter/gather list if needed.
334 */
335static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
336 struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
337 struct PVSCSIRingReqDesc *e)
338{
339 unsigned count;
340 unsigned bufflen = scsi_bufflen(cmd);
341 struct scatterlist *sg;
342
343 e->dataLen = bufflen;
344 e->dataAddr = 0;
345 if (bufflen == 0)
346 return;
347
348 sg = scsi_sglist(cmd);
349 count = scsi_sg_count(cmd);
350 if (count != 0) {
351 int segs = scsi_dma_map(cmd);
352 if (segs > 1) {
353 pvscsi_create_sg(ctx, sg, segs);
354
355 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
356 ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
357 SGL_SIZE, PCI_DMA_TODEVICE);
358 e->dataAddr = ctx->sglPA;
359 } else
360 e->dataAddr = sg_dma_address(sg);
361 } else {
362 /*
363 * In case there is no S/G list, scsi_sglist points
364 * directly to the buffer.
365 */
366 ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
367 cmd->sc_data_direction);
368 e->dataAddr = ctx->dataPA;
369 }
370}
371
372static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
373 struct pvscsi_ctx *ctx)
374{
375 struct scsi_cmnd *cmd;
376 unsigned bufflen;
377
378 cmd = ctx->cmd;
379 bufflen = scsi_bufflen(cmd);
380
381 if (bufflen != 0) {
382 unsigned count = scsi_sg_count(cmd);
383
384 if (count != 0) {
385 scsi_dma_unmap(cmd);
386 if (ctx->sglPA) {
387 pci_unmap_single(adapter->dev, ctx->sglPA,
388 SGL_SIZE, PCI_DMA_TODEVICE);
389 ctx->sglPA = 0;
390 }
391 } else
392 pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
393 cmd->sc_data_direction);
394 }
395 if (cmd->sense_buffer)
396 pci_unmap_single(adapter->dev, ctx->sensePA,
397 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
398}
399
400static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
401{
402 adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
403 &adapter->ringStatePA);
404 if (!adapter->rings_state)
405 return -ENOMEM;
406
407 adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
408 pvscsi_ring_pages);
409 adapter->req_depth = adapter->req_pages
410 * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
411 adapter->req_ring = pci_alloc_consistent(adapter->dev,
412 adapter->req_pages * PAGE_SIZE,
413 &adapter->reqRingPA);
414 if (!adapter->req_ring)
415 return -ENOMEM;
416
417 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
418 pvscsi_ring_pages);
419 adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
420 adapter->cmp_pages * PAGE_SIZE,
421 &adapter->cmpRingPA);
422 if (!adapter->cmp_ring)
423 return -ENOMEM;
424
425 BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
426 BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
427 BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
428
429 if (!adapter->use_msg)
430 return 0;
431
432 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
433 pvscsi_msg_ring_pages);
434 adapter->msg_ring = pci_alloc_consistent(adapter->dev,
435 adapter->msg_pages * PAGE_SIZE,
436 &adapter->msgRingPA);
437 if (!adapter->msg_ring)
438 return -ENOMEM;
439 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
440
441 return 0;
442}
443
444static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
445{
446 struct PVSCSICmdDescSetupRings cmd = { 0 };
447 dma_addr_t base;
448 unsigned i;
449
450 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
451 cmd.reqRingNumPages = adapter->req_pages;
452 cmd.cmpRingNumPages = adapter->cmp_pages;
453
454 base = adapter->reqRingPA;
455 for (i = 0; i < adapter->req_pages; i++) {
456 cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
457 base += PAGE_SIZE;
458 }
459
460 base = adapter->cmpRingPA;
461 for (i = 0; i < adapter->cmp_pages; i++) {
462 cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
463 base += PAGE_SIZE;
464 }
465
466 memset(adapter->rings_state, 0, PAGE_SIZE);
467 memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
468 memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
469
470 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
471 &cmd, sizeof(cmd));
472
473 if (adapter->use_msg) {
474 struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
475
476 cmd_msg.numPages = adapter->msg_pages;
477
478 base = adapter->msgRingPA;
479 for (i = 0; i < adapter->msg_pages; i++) {
480 cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
481 base += PAGE_SIZE;
482 }
483 memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
484
485 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
486 &cmd_msg, sizeof(cmd_msg));
487 }
488}
489
490/*
491 * Pull a completion descriptor off and pass the completion back
492 * to the SCSI mid layer.
493 */
494static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
495 const struct PVSCSIRingCmpDesc *e)
496{
497 struct pvscsi_ctx *ctx;
498 struct scsi_cmnd *cmd;
499 u32 btstat = e->hostStatus;
500 u32 sdstat = e->scsiStatus;
501
502 ctx = pvscsi_get_context(adapter, e->context);
503 cmd = ctx->cmd;
504 pvscsi_unmap_buffers(adapter, ctx);
505 pvscsi_release_context(adapter, ctx);
506 cmd->result = 0;
507
508 if (sdstat != SAM_STAT_GOOD &&
509 (btstat == BTSTAT_SUCCESS ||
510 btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
511 btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
512 cmd->result = (DID_OK << 16) | sdstat;
513 if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
514 cmd->result |= (DRIVER_SENSE << 24);
515 } else
516 switch (btstat) {
517 case BTSTAT_SUCCESS:
518 case BTSTAT_LINKED_COMMAND_COMPLETED:
519 case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
520 /* If everything went fine, let's move on.. */
521 cmd->result = (DID_OK << 16);
522 break;
523
524 case BTSTAT_DATARUN:
525 case BTSTAT_DATA_UNDERRUN:
526 /* Report residual data in underruns */
527 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
528 cmd->result = (DID_ERROR << 16);
529 break;
530
531 case BTSTAT_SELTIMEO:
532 /* Our emulation returns this for non-connected devs */
533 cmd->result = (DID_BAD_TARGET << 16);
534 break;
535
536 case BTSTAT_LUNMISMATCH:
537 case BTSTAT_TAGREJECT:
538 case BTSTAT_BADMSG:
539 cmd->result = (DRIVER_INVALID << 24);
540 /* fall through */
541
542 case BTSTAT_HAHARDWARE:
543 case BTSTAT_INVPHASE:
544 case BTSTAT_HATIMEOUT:
545 case BTSTAT_NORESPONSE:
546 case BTSTAT_DISCONNECT:
547 case BTSTAT_HASOFTWARE:
548 case BTSTAT_BUSFREE:
549 case BTSTAT_SENSFAILED:
550 cmd->result |= (DID_ERROR << 16);
551 break;
552
553 case BTSTAT_SENTRST:
554 case BTSTAT_RECVRST:
555 case BTSTAT_BUSRESET:
556 cmd->result = (DID_RESET << 16);
557 break;
558
559 case BTSTAT_ABORTQUEUE:
560 cmd->result = (DID_ABORT << 16);
561 break;
562
563 case BTSTAT_SCSIPARITY:
564 cmd->result = (DID_PARITY << 16);
565 break;
566
567 default:
568 cmd->result = (DID_ERROR << 16);
569 scmd_printk(KERN_DEBUG, cmd,
570 "Unknown completion status: 0x%x\n",
571 btstat);
572 }
573
574 dev_dbg(&cmd->device->sdev_gendev,
575 "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
576 cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
577
578 cmd->scsi_done(cmd);
579}
580
581/*
582 * barrier usage : Since the PVSCSI device is emulated, there could be cases
583 * where we may want to serialize some accesses between the driver and the
584 * emulation layer. We use compiler barriers instead of the more expensive
585 * memory barriers because PVSCSI is only supported on X86 which has strong
586 * memory access ordering.
587 */
588static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
589{
590 struct PVSCSIRingsState *s = adapter->rings_state;
591 struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
592 u32 cmp_entries = s->cmpNumEntriesLog2;
593
594 while (s->cmpConsIdx != s->cmpProdIdx) {
595 struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
596 MASK(cmp_entries));
597 /*
598 * This barrier() ensures that *e is not dereferenced while
599 * the device emulation still writes data into the slot.
600 * Since the device emulation advances s->cmpProdIdx only after
601 * updating the slot we want to check it first.
602 */
603 barrier();
604 pvscsi_complete_request(adapter, e);
605 /*
606 * This barrier() ensures that compiler doesn't reorder write
607 * to s->cmpConsIdx before the read of (*e) inside
608 * pvscsi_complete_request. Otherwise, device emulation may
609 * overwrite *e before we had a chance to read it.
610 */
611 barrier();
612 s->cmpConsIdx++;
613 }
614}
615
616/*
617 * Translate a Linux SCSI request into a request ring entry.
618 */
619static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
620 struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
621{
622 struct PVSCSIRingsState *s;
623 struct PVSCSIRingReqDesc *e;
624 struct scsi_device *sdev;
625 u32 req_entries;
626
627 s = adapter->rings_state;
628 sdev = cmd->device;
629 req_entries = s->reqNumEntriesLog2;
630
631 /*
632 * If this condition holds, we might have room on the request ring, but
633 * we might not have room on the completion ring for the response.
634 * However, we have already ruled out this possibility - we would not
635 * have successfully allocated a context if it were true, since we only
636 * have one context per request entry. Check for it anyway, since it
637 * would be a serious bug.
638 */
639 if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
640 scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
641 "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
642 s->reqProdIdx, s->cmpConsIdx);
643 return -1;
644 }
645
646 e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
647
648 e->bus = sdev->channel;
649 e->target = sdev->id;
650 memset(e->lun, 0, sizeof(e->lun));
651 e->lun[1] = sdev->lun;
652
653 if (cmd->sense_buffer) {
654 ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
655 SCSI_SENSE_BUFFERSIZE,
656 PCI_DMA_FROMDEVICE);
657 e->senseAddr = ctx->sensePA;
658 e->senseLen = SCSI_SENSE_BUFFERSIZE;
659 } else {
660 e->senseLen = 0;
661 e->senseAddr = 0;
662 }
663 e->cdbLen = cmd->cmd_len;
664 e->vcpuHint = smp_processor_id();
665 memcpy(e->cdb, cmd->cmnd, e->cdbLen);
666
667 e->tag = SIMPLE_QUEUE_TAG;
668 if (sdev->tagged_supported &&
669 (cmd->tag == HEAD_OF_QUEUE_TAG ||
670 cmd->tag == ORDERED_QUEUE_TAG))
671 e->tag = cmd->tag;
672
673 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
674 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
675 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
676 e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
677 else if (cmd->sc_data_direction == DMA_NONE)
678 e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
679 else
680 e->flags = 0;
681
682 pvscsi_map_buffers(adapter, ctx, cmd, e);
683
684 e->context = pvscsi_map_context(adapter, ctx);
685
686 barrier();
687
688 s->reqProdIdx++;
689
690 return 0;
691}
692
693static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
694{
695 struct Scsi_Host *host = cmd->device->host;
696 struct pvscsi_adapter *adapter = shost_priv(host);
697 struct pvscsi_ctx *ctx;
698 unsigned long flags;
699
700 spin_lock_irqsave(&adapter->hw_lock, flags);
701
702 ctx = pvscsi_acquire_context(adapter, cmd);
703 if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
704 if (ctx)
705 pvscsi_release_context(adapter, ctx);
706 spin_unlock_irqrestore(&adapter->hw_lock, flags);
707 return SCSI_MLQUEUE_HOST_BUSY;
708 }
709
710 cmd->scsi_done = done;
711
712 dev_dbg(&cmd->device->sdev_gendev,
713 "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
714
715 spin_unlock_irqrestore(&adapter->hw_lock, flags);
716
717 pvscsi_kick_io(adapter, cmd->cmnd[0]);
718
719 return 0;
720}
721
722static int pvscsi_abort(struct scsi_cmnd *cmd)
723{
724 struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
725 struct pvscsi_ctx *ctx;
726 unsigned long flags;
727
728 scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
729 adapter->host->host_no, cmd);
730
731 spin_lock_irqsave(&adapter->hw_lock, flags);
732
733 /*
734 * Poll the completion ring first - we might be trying to abort
735 * a command that is waiting to be dispatched in the completion ring.
736 */
737 pvscsi_process_completion_ring(adapter);
738
739 /*
740 * If there is no context for the command, it either already succeeded
741 * or else was never properly issued. Not our problem.
742 */
743 ctx = pvscsi_find_context(adapter, cmd);
744 if (!ctx) {
745 scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
746 goto out;
747 }
748
749 pvscsi_abort_cmd(adapter, ctx);
750
751 pvscsi_process_completion_ring(adapter);
752
753out:
754 spin_unlock_irqrestore(&adapter->hw_lock, flags);
755 return SUCCESS;
756}
757
758/*
759 * Abort all outstanding requests. This is only safe to use if the completion
760 * ring will never be walked again or the device has been reset, because it
761 * destroys the 1-1 mapping between context field passed to emulation and our
762 * request structure.
763 */
764static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
765{
766 unsigned i;
767
768 for (i = 0; i < adapter->req_depth; i++) {
769 struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
770 struct scsi_cmnd *cmd = ctx->cmd;
771 if (cmd) {
772 scmd_printk(KERN_ERR, cmd,
773 "Forced reset on cmd %p\n", cmd);
774 pvscsi_unmap_buffers(adapter, ctx);
775 pvscsi_release_context(adapter, ctx);
776 cmd->result = (DID_RESET << 16);
777 cmd->scsi_done(cmd);
778 }
779 }
780}
781
782static int pvscsi_host_reset(struct scsi_cmnd *cmd)
783{
784 struct Scsi_Host *host = cmd->device->host;
785 struct pvscsi_adapter *adapter = shost_priv(host);
786 unsigned long flags;
787 bool use_msg;
788
789 scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
790
791 spin_lock_irqsave(&adapter->hw_lock, flags);
792
793 use_msg = adapter->use_msg;
794
795 if (use_msg) {
796 adapter->use_msg = 0;
797 spin_unlock_irqrestore(&adapter->hw_lock, flags);
798
799 /*
800 * Now that we know that the ISR won't add more work on the
801 * workqueue we can safely flush any outstanding work.
802 */
803 flush_workqueue(adapter->workqueue);
804 spin_lock_irqsave(&adapter->hw_lock, flags);
805 }
806
807 /*
808 * We're going to tear down the entire ring structure and set it back
809 * up, so stalling new requests until all completions are flushed and
810 * the rings are back in place.
811 */
812
813 pvscsi_process_request_ring(adapter);
814
815 ll_adapter_reset(adapter);
816
817 /*
818 * Now process any completions. Note we do this AFTER adapter reset,
819 * which is strange, but stops races where completions get posted
820 * between processing the ring and issuing the reset. The backend will
821 * not touch the ring memory after reset, so the immediately pre-reset
822 * completion ring state is still valid.
823 */
824 pvscsi_process_completion_ring(adapter);
825
826 pvscsi_reset_all(adapter);
827 adapter->use_msg = use_msg;
828 pvscsi_setup_all_rings(adapter);
829 pvscsi_unmask_intr(adapter);
830
831 spin_unlock_irqrestore(&adapter->hw_lock, flags);
832
833 return SUCCESS;
834}
835
836static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
837{
838 struct Scsi_Host *host = cmd->device->host;
839 struct pvscsi_adapter *adapter = shost_priv(host);
840 unsigned long flags;
841
842 scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
843
844 /*
845 * We don't want to queue new requests for this bus after
846 * flushing all pending requests to emulation, since new
847 * requests could then sneak in during this bus reset phase,
848 * so take the lock now.
849 */
850 spin_lock_irqsave(&adapter->hw_lock, flags);
851
852 pvscsi_process_request_ring(adapter);
853 ll_bus_reset(adapter);
854 pvscsi_process_completion_ring(adapter);
855
856 spin_unlock_irqrestore(&adapter->hw_lock, flags);
857
858 return SUCCESS;
859}
860
861static int pvscsi_device_reset(struct scsi_cmnd *cmd)
862{
863 struct Scsi_Host *host = cmd->device->host;
864 struct pvscsi_adapter *adapter = shost_priv(host);
865 unsigned long flags;
866
867 scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
868 host->host_no, cmd->device->id);
869
870 /*
871 * We don't want to queue new requests for this device after flushing
872 * all pending requests to emulation, since new requests could then
873 * sneak in during this device reset phase, so take the lock now.
874 */
875 spin_lock_irqsave(&adapter->hw_lock, flags);
876
877 pvscsi_process_request_ring(adapter);
878 ll_device_reset(adapter, cmd->device->id);
879 pvscsi_process_completion_ring(adapter);
880
881 spin_unlock_irqrestore(&adapter->hw_lock, flags);
882
883 return SUCCESS;
884}
885
886static struct scsi_host_template pvscsi_template;
887
888static const char *pvscsi_info(struct Scsi_Host *host)
889{
890 struct pvscsi_adapter *adapter = shost_priv(host);
891 static char buf[256];
892
893 sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
894 "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
895 adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
896 pvscsi_template.cmd_per_lun);
897
898 return buf;
899}
900
901static struct scsi_host_template pvscsi_template = {
902 .module = THIS_MODULE,
903 .name = "VMware PVSCSI Host Adapter",
904 .proc_name = "vmw_pvscsi",
905 .info = pvscsi_info,
906 .queuecommand = pvscsi_queue,
907 .this_id = -1,
908 .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
909 .dma_boundary = UINT_MAX,
910 .max_sectors = 0xffff,
911 .use_clustering = ENABLE_CLUSTERING,
912 .eh_abort_handler = pvscsi_abort,
913 .eh_device_reset_handler = pvscsi_device_reset,
914 .eh_bus_reset_handler = pvscsi_bus_reset,
915 .eh_host_reset_handler = pvscsi_host_reset,
916};
917
918static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
919 const struct PVSCSIRingMsgDesc *e)
920{
921 struct PVSCSIRingsState *s = adapter->rings_state;
922 struct Scsi_Host *host = adapter->host;
923 struct scsi_device *sdev;
924
925 printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
926 e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
927
928 BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
929
930 if (e->type == PVSCSI_MSG_DEV_ADDED) {
931 struct PVSCSIMsgDescDevStatusChanged *desc;
932 desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
933
934 printk(KERN_INFO
935 "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
936 desc->bus, desc->target, desc->lun[1]);
937
938 if (!scsi_host_get(host))
939 return;
940
941 sdev = scsi_device_lookup(host, desc->bus, desc->target,
942 desc->lun[1]);
943 if (sdev) {
944 printk(KERN_INFO "vmw_pvscsi: device already exists\n");
945 scsi_device_put(sdev);
946 } else
947 scsi_add_device(adapter->host, desc->bus,
948 desc->target, desc->lun[1]);
949
950 scsi_host_put(host);
951 } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
952 struct PVSCSIMsgDescDevStatusChanged *desc;
953 desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
954
955 printk(KERN_INFO
956 "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
957 desc->bus, desc->target, desc->lun[1]);
958
959 if (!scsi_host_get(host))
960 return;
961
962 sdev = scsi_device_lookup(host, desc->bus, desc->target,
963 desc->lun[1]);
964 if (sdev) {
965 scsi_remove_device(sdev);
966 scsi_device_put(sdev);
967 } else
968 printk(KERN_INFO
969 "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
970 desc->bus, desc->target, desc->lun[1]);
971
972 scsi_host_put(host);
973 }
974}
975
976static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
977{
978 struct PVSCSIRingsState *s = adapter->rings_state;
979
980 return s->msgProdIdx != s->msgConsIdx;
981}
982
983static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
984{
985 struct PVSCSIRingsState *s = adapter->rings_state;
986 struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
987 u32 msg_entries = s->msgNumEntriesLog2;
988
989 while (pvscsi_msg_pending(adapter)) {
990 struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
991 MASK(msg_entries));
992
993 barrier();
994 pvscsi_process_msg(adapter, e);
995 barrier();
996 s->msgConsIdx++;
997 }
998}
999
1000static void pvscsi_msg_workqueue_handler(struct work_struct *data)
1001{
1002 struct pvscsi_adapter *adapter;
1003
1004 adapter = container_of(data, struct pvscsi_adapter, work);
1005
1006 pvscsi_process_msg_ring(adapter);
1007}
1008
1009static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
1010{
1011 char name[32];
1012
1013 if (!pvscsi_use_msg)
1014 return 0;
1015
1016 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
1017 PVSCSI_CMD_SETUP_MSG_RING);
1018
1019 if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
1020 return 0;
1021
1022 snprintf(name, sizeof(name),
1023 "vmw_pvscsi_wq_%u", adapter->host->host_no);
1024
1025 adapter->workqueue = create_singlethread_workqueue(name);
1026 if (!adapter->workqueue) {
1027 printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
1028 return 0;
1029 }
1030 INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
1031
1032 return 1;
1033}
1034
1035static irqreturn_t pvscsi_isr(int irq, void *devp)
1036{
1037 struct pvscsi_adapter *adapter = devp;
1038 int handled;
1039
1040 if (adapter->use_msi || adapter->use_msix)
1041 handled = true;
1042 else {
1043 u32 val = pvscsi_read_intr_status(adapter);
1044 handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
1045 if (handled)
1046 pvscsi_write_intr_status(devp, val);
1047 }
1048
1049 if (handled) {
1050 unsigned long flags;
1051
1052 spin_lock_irqsave(&adapter->hw_lock, flags);
1053
1054 pvscsi_process_completion_ring(adapter);
1055 if (adapter->use_msg && pvscsi_msg_pending(adapter))
1056 queue_work(adapter->workqueue, &adapter->work);
1057
1058 spin_unlock_irqrestore(&adapter->hw_lock, flags);
1059 }
1060
1061 return IRQ_RETVAL(handled);
1062}
1063
1064static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
1065{
1066 struct pvscsi_ctx *ctx = adapter->cmd_map;
1067 unsigned i;
1068
1069 for (i = 0; i < adapter->req_depth; ++i, ++ctx)
1070 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
1071}
1072
1073static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
1074 unsigned int *irq)
1075{
1076 struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
1077 int ret;
1078
1079 ret = pci_enable_msix(adapter->dev, &entry, 1);
1080 if (ret)
1081 return ret;
1082
1083 *irq = entry.vector;
1084
1085 return 0;
1086}
1087
1088static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
1089{
1090 if (adapter->irq) {
1091 free_irq(adapter->irq, adapter);
1092 adapter->irq = 0;
1093 }
1094 if (adapter->use_msi) {
1095 pci_disable_msi(adapter->dev);
1096 adapter->use_msi = 0;
1097 } else if (adapter->use_msix) {
1098 pci_disable_msix(adapter->dev);
1099 adapter->use_msix = 0;
1100 }
1101}
1102
1103static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
1104{
1105 pvscsi_shutdown_intr(adapter);
1106
1107 if (adapter->workqueue)
1108 destroy_workqueue(adapter->workqueue);
1109
1110 if (adapter->mmioBase)
1111 pci_iounmap(adapter->dev, adapter->mmioBase);
1112
1113 pci_release_regions(adapter->dev);
1114
1115 if (adapter->cmd_map) {
1116 pvscsi_free_sgls(adapter);
1117 kfree(adapter->cmd_map);
1118 }
1119
1120 if (adapter->rings_state)
1121 pci_free_consistent(adapter->dev, PAGE_SIZE,
1122 adapter->rings_state, adapter->ringStatePA);
1123
1124 if (adapter->req_ring)
1125 pci_free_consistent(adapter->dev,
1126 adapter->req_pages * PAGE_SIZE,
1127 adapter->req_ring, adapter->reqRingPA);
1128
1129 if (adapter->cmp_ring)
1130 pci_free_consistent(adapter->dev,
1131 adapter->cmp_pages * PAGE_SIZE,
1132 adapter->cmp_ring, adapter->cmpRingPA);
1133
1134 if (adapter->msg_ring)
1135 pci_free_consistent(adapter->dev,
1136 adapter->msg_pages * PAGE_SIZE,
1137 adapter->msg_ring, adapter->msgRingPA);
1138}
1139
1140/*
1141 * Allocate scatter gather lists.
1142 *
1143 * These are statically allocated. Trying to be clever was not worth it.
1144 *
1145 * Dynamic allocation can fail, and we can't go deeep into the memory
1146 * allocator, since we're a SCSI driver, and trying too hard to allocate
1147 * memory might generate disk I/O. We also don't want to fail disk I/O
1148 * in that case because we can't get an allocation - the I/O could be
1149 * trying to swap out data to free memory. Since that is pathological,
1150 * just use a statically allocated scatter list.
1151 *
1152 */
1153static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
1154{
1155 struct pvscsi_ctx *ctx;
1156 int i;
1157
1158 ctx = adapter->cmd_map;
1159 BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
1160
1161 for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
1162 ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
1163 get_order(SGL_SIZE));
1164 ctx->sglPA = 0;
1165 BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
1166 if (!ctx->sgl) {
1167 for (; i >= 0; --i, --ctx) {
1168 free_pages((unsigned long)ctx->sgl,
1169 get_order(SGL_SIZE));
1170 ctx->sgl = NULL;
1171 }
1172 return -ENOMEM;
1173 }
1174 }
1175
1176 return 0;
1177}
1178
1179static int __devinit pvscsi_probe(struct pci_dev *pdev,
1180 const struct pci_device_id *id)
1181{
1182 struct pvscsi_adapter *adapter;
1183 struct Scsi_Host *host;
1184 unsigned int i;
1185 unsigned long flags = 0;
1186 int error;
1187
1188 error = -ENODEV;
1189
1190 if (pci_enable_device(pdev))
1191 return error;
1192
1193 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
1194 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1195 printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
1196 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
1197 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
1198 printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
1199 } else {
1200 printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
1201 goto out_disable_device;
1202 }
1203
1204 pvscsi_template.can_queue =
1205 min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
1206 PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1207 pvscsi_template.cmd_per_lun =
1208 min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
1209 host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
1210 if (!host) {
1211 printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
1212 goto out_disable_device;
1213 }
1214
1215 adapter = shost_priv(host);
1216 memset(adapter, 0, sizeof(*adapter));
1217 adapter->dev = pdev;
1218 adapter->host = host;
1219
1220 spin_lock_init(&adapter->hw_lock);
1221
1222 host->max_channel = 0;
1223 host->max_id = 16;
1224 host->max_lun = 1;
1225 host->max_cmd_len = 16;
1226
1227 adapter->rev = pdev->revision;
1228
1229 if (pci_request_regions(pdev, "vmw_pvscsi")) {
1230 printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
1231 goto out_free_host;
1232 }
1233
1234 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1235 if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
1236 continue;
1237
1238 if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
1239 continue;
1240
1241 break;
1242 }
1243
1244 if (i == DEVICE_COUNT_RESOURCE) {
1245 printk(KERN_ERR
1246 "vmw_pvscsi: adapter has no suitable MMIO region\n");
1247 goto out_release_resources;
1248 }
1249
1250 adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
1251
1252 if (!adapter->mmioBase) {
1253 printk(KERN_ERR
1254 "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
1255 i, PVSCSI_MEM_SPACE_SIZE);
1256 goto out_release_resources;
1257 }
1258
1259 pci_set_master(pdev);
1260 pci_set_drvdata(pdev, host);
1261
1262 ll_adapter_reset(adapter);
1263
1264 adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
1265
1266 error = pvscsi_allocate_rings(adapter);
1267 if (error) {
1268 printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
1269 goto out_release_resources;
1270 }
1271
1272 /*
1273 * From this point on we should reset the adapter if anything goes
1274 * wrong.
1275 */
1276 pvscsi_setup_all_rings(adapter);
1277
1278 adapter->cmd_map = kcalloc(adapter->req_depth,
1279 sizeof(struct pvscsi_ctx), GFP_KERNEL);
1280 if (!adapter->cmd_map) {
1281 printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
1282 error = -ENOMEM;
1283 goto out_reset_adapter;
1284 }
1285
1286 INIT_LIST_HEAD(&adapter->cmd_pool);
1287 for (i = 0; i < adapter->req_depth; i++) {
1288 struct pvscsi_ctx *ctx = adapter->cmd_map + i;
1289 list_add(&ctx->list, &adapter->cmd_pool);
1290 }
1291
1292 error = pvscsi_allocate_sg(adapter);
1293 if (error) {
1294 printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
1295 goto out_reset_adapter;
1296 }
1297
1298 if (!pvscsi_disable_msix &&
1299 pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
1300 printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
1301 adapter->use_msix = 1;
1302 } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
1303 printk(KERN_INFO "vmw_pvscsi: using MSI\n");
1304 adapter->use_msi = 1;
1305 adapter->irq = pdev->irq;
1306 } else {
1307 printk(KERN_INFO "vmw_pvscsi: using INTx\n");
1308 adapter->irq = pdev->irq;
1309 flags = IRQF_SHARED;
1310 }
1311
1312 error = request_irq(adapter->irq, pvscsi_isr, flags,
1313 "vmw_pvscsi", adapter);
1314 if (error) {
1315 printk(KERN_ERR
1316 "vmw_pvscsi: unable to request IRQ: %d\n", error);
1317 adapter->irq = 0;
1318 goto out_reset_adapter;
1319 }
1320
1321 error = scsi_add_host(host, &pdev->dev);
1322 if (error) {
1323 printk(KERN_ERR
1324 "vmw_pvscsi: scsi_add_host failed: %d\n", error);
1325 goto out_reset_adapter;
1326 }
1327
1328 dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
1329 adapter->rev, host->host_no);
1330
1331 pvscsi_unmask_intr(adapter);
1332
1333 scsi_scan_host(host);
1334
1335 return 0;
1336
1337out_reset_adapter:
1338 ll_adapter_reset(adapter);
1339out_release_resources:
1340 pvscsi_release_resources(adapter);
1341out_free_host:
1342 scsi_host_put(host);
1343out_disable_device:
1344 pci_set_drvdata(pdev, NULL);
1345 pci_disable_device(pdev);
1346
1347 return error;
1348}
1349
1350static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
1351{
1352 pvscsi_mask_intr(adapter);
1353
1354 if (adapter->workqueue)
1355 flush_workqueue(adapter->workqueue);
1356
1357 pvscsi_shutdown_intr(adapter);
1358
1359 pvscsi_process_request_ring(adapter);
1360 pvscsi_process_completion_ring(adapter);
1361 ll_adapter_reset(adapter);
1362}
1363
1364static void pvscsi_shutdown(struct pci_dev *dev)
1365{
1366 struct Scsi_Host *host = pci_get_drvdata(dev);
1367 struct pvscsi_adapter *adapter = shost_priv(host);
1368
1369 __pvscsi_shutdown(adapter);
1370}
1371
1372static void pvscsi_remove(struct pci_dev *pdev)
1373{
1374 struct Scsi_Host *host = pci_get_drvdata(pdev);
1375 struct pvscsi_adapter *adapter = shost_priv(host);
1376
1377 scsi_remove_host(host);
1378
1379 __pvscsi_shutdown(adapter);
1380 pvscsi_release_resources(adapter);
1381
1382 scsi_host_put(host);
1383
1384 pci_set_drvdata(pdev, NULL);
1385 pci_disable_device(pdev);
1386}
1387
1388static struct pci_driver pvscsi_pci_driver = {
1389 .name = "vmw_pvscsi",
1390 .id_table = pvscsi_pci_tbl,
1391 .probe = pvscsi_probe,
1392 .remove = __devexit_p(pvscsi_remove),
1393 .shutdown = pvscsi_shutdown,
1394};
1395
1396static int __init pvscsi_init(void)
1397{
1398 pr_info("%s - version %s\n",
1399 PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
1400 return pci_register_driver(&pvscsi_pci_driver);
1401}
1402
1403static void __exit pvscsi_exit(void)
1404{
1405 pci_unregister_driver(&pvscsi_pci_driver);
1406}
1407
1408module_init(pvscsi_init);
1409module_exit(pvscsi_exit);
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
new file mode 100644
index 000000000000..62e36e75715e
--- /dev/null
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -0,0 +1,397 @@
1/*
2 * VMware PVSCSI header file
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Maintained by: Alok N Kataria <akataria@vmware.com>
21 *
22 */
23
24#ifndef _VMW_PVSCSI_H_
25#define _VMW_PVSCSI_H_
26
27#include <linux/types.h>
28
29#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k"
30
31#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
32
33#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
34
35#define PCI_VENDOR_ID_VMWARE 0x15AD
36#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
37
38/*
39 * host adapter status/error codes
40 */
41enum HostBusAdapterStatus {
42 BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
43 BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
44 BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
45 BTSTAT_DATA_UNDERRUN = 0x0c,
46 BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
47 BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
48 BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
49 BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
50 BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
51 BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
52 BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
53 BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
54 BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
55 BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
56 BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
57 BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
58 BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
59 BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
60 BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
61 BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
62 BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
63 BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
64};
65
66/*
67 * Register offsets.
68 *
69 * These registers are accessible both via i/o space and mm i/o.
70 */
71
72enum PVSCSIRegOffset {
73 PVSCSI_REG_OFFSET_COMMAND = 0x0,
74 PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
75 PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
76 PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
77 PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
78 PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
79 PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
80 PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
81 PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
82 PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
83 PVSCSI_REG_OFFSET_DEBUG = 0x3018,
84 PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
85};
86
87/*
88 * Virtual h/w commands.
89 */
90
91enum PVSCSICommands {
92 PVSCSI_CMD_FIRST = 0, /* has to be first */
93
94 PVSCSI_CMD_ADAPTER_RESET = 1,
95 PVSCSI_CMD_ISSUE_SCSI = 2,
96 PVSCSI_CMD_SETUP_RINGS = 3,
97 PVSCSI_CMD_RESET_BUS = 4,
98 PVSCSI_CMD_RESET_DEVICE = 5,
99 PVSCSI_CMD_ABORT_CMD = 6,
100 PVSCSI_CMD_CONFIG = 7,
101 PVSCSI_CMD_SETUP_MSG_RING = 8,
102 PVSCSI_CMD_DEVICE_UNPLUG = 9,
103
104 PVSCSI_CMD_LAST = 10 /* has to be last */
105};
106
107/*
108 * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
109 */
110
111struct PVSCSICmdDescResetDevice {
112 u32 target;
113 u8 lun[8];
114} __packed;
115
116/*
117 * Command descriptor for PVSCSI_CMD_ABORT_CMD --
118 *
119 * - currently does not support specifying the LUN.
120 * - _pad should be 0.
121 */
122
123struct PVSCSICmdDescAbortCmd {
124 u64 context;
125 u32 target;
126 u32 _pad;
127} __packed;
128
129/*
130 * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
131 *
132 * Notes:
133 * - reqRingNumPages and cmpRingNumPages need to be power of two.
134 * - reqRingNumPages and cmpRingNumPages need to be different from 0,
135 * - reqRingNumPages and cmpRingNumPages need to be inferior to
136 * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
137 */
138
139#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
140struct PVSCSICmdDescSetupRings {
141 u32 reqRingNumPages;
142 u32 cmpRingNumPages;
143 u64 ringsStatePPN;
144 u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
145 u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
146} __packed;
147
148/*
149 * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
150 *
151 * Notes:
152 * - this command was not supported in the initial revision of the h/w
153 * interface. Before using it, you need to check that it is supported by
154 * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
155 * immediately after read the 'command status' register:
156 * * a value of -1 means that the cmd is NOT supported,
157 * * a value != -1 means that the cmd IS supported.
158 * If it's supported the 'command status' register should return:
159 * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
160 * - this command should be issued _after_ the usual SETUP_RINGS so that the
161 * RingsState page is already setup. If not, the command is a nop.
162 * - numPages needs to be a power of two,
163 * - numPages needs to be different from 0,
164 * - _pad should be zero.
165 */
166
167#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
168
169struct PVSCSICmdDescSetupMsgRing {
170 u32 numPages;
171 u32 _pad;
172 u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
173} __packed;
174
175enum PVSCSIMsgType {
176 PVSCSI_MSG_DEV_ADDED = 0,
177 PVSCSI_MSG_DEV_REMOVED = 1,
178 PVSCSI_MSG_LAST = 2,
179};
180
181/*
182 * Msg descriptor.
183 *
184 * sizeof(struct PVSCSIRingMsgDesc) == 128.
185 *
186 * - type is of type enum PVSCSIMsgType.
187 * - the content of args depend on the type of event being delivered.
188 */
189
190struct PVSCSIRingMsgDesc {
191 u32 type;
192 u32 args[31];
193} __packed;
194
195struct PVSCSIMsgDescDevStatusChanged {
196 u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
197 u32 bus;
198 u32 target;
199 u8 lun[8];
200 u32 pad[27];
201} __packed;
202
203/*
204 * Rings state.
205 *
206 * - the fields:
207 * . msgProdIdx,
208 * . msgConsIdx,
209 * . msgNumEntriesLog2,
210 * .. are only used once the SETUP_MSG_RING cmd has been issued.
211 * - '_pad' helps to ensure that the msg related fields are on their own
212 * cache-line.
213 */
214
215struct PVSCSIRingsState {
216 u32 reqProdIdx;
217 u32 reqConsIdx;
218 u32 reqNumEntriesLog2;
219
220 u32 cmpProdIdx;
221 u32 cmpConsIdx;
222 u32 cmpNumEntriesLog2;
223
224 u8 _pad[104];
225
226 u32 msgProdIdx;
227 u32 msgConsIdx;
228 u32 msgNumEntriesLog2;
229} __packed;
230
231/*
232 * Request descriptor.
233 *
234 * sizeof(RingReqDesc) = 128
235 *
236 * - context: is a unique identifier of a command. It could normally be any
237 * 64bit value, however we currently store it in the serialNumber variable
238 * of struct SCSI_Command, so we have the following restrictions due to the
239 * way this field is handled in the vmkernel storage stack:
240 * * this value can't be 0,
241 * * the upper 32bit need to be 0 since serialNumber is as a u32.
242 * Currently tracked as PR 292060.
243 * - dataLen: contains the total number of bytes that need to be transferred.
244 * - dataAddr:
245 * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
246 * s/g table segment, each s/g segment is entirely contained on a single
247 * page of physical memory,
248 * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
249 * the buffer used for the DMA transfer,
250 * - flags:
251 * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
252 * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
253 * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
254 * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
255 * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
256 * 16bytes. To be specified.
257 * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
258 * completion of the i/o. For guest OSes that use lowest priority message
259 * delivery mode (such as windows), we use this "hint" to deliver the
260 * completion action to the proper vcpu. For now, we can use the vcpuId of
261 * the processor that initiated the i/o as a likely candidate for the vcpu
262 * that will be waiting for the completion..
263 * - bus should be 0: we currently only support bus 0 for now.
264 * - unused should be zero'd.
265 */
266
267#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
268#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
269#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
270#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
271#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
272
273struct PVSCSIRingReqDesc {
274 u64 context;
275 u64 dataAddr;
276 u64 dataLen;
277 u64 senseAddr;
278 u32 senseLen;
279 u32 flags;
280 u8 cdb[16];
281 u8 cdbLen;
282 u8 lun[8];
283 u8 tag;
284 u8 bus;
285 u8 target;
286 u8 vcpuHint;
287 u8 unused[59];
288} __packed;
289
290/*
291 * Scatter-gather list management.
292 *
293 * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
294 * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
295 * table segment.
296 *
297 * - each segment of the s/g table contain a succession of struct
298 * PVSCSISGElement.
299 * - each segment is entirely contained on a single physical page of memory.
300 * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
301 * PVSCSISGElement.flags and in this case:
302 * * addr is the PA of the next s/g segment,
303 * * length is undefined, assumed to be 0.
304 */
305
306struct PVSCSISGElement {
307 u64 addr;
308 u32 length;
309 u32 flags;
310} __packed;
311
312/*
313 * Completion descriptor.
314 *
315 * sizeof(RingCmpDesc) = 32
316 *
317 * - context: identifier of the command. The same thing that was specified
318 * under "context" as part of struct RingReqDesc at initiation time,
319 * - dataLen: number of bytes transferred for the actual i/o operation,
320 * - senseLen: number of bytes written into the sense buffer,
321 * - hostStatus: adapter status,
322 * - scsiStatus: device status,
323 * - _pad should be zero.
324 */
325
326struct PVSCSIRingCmpDesc {
327 u64 context;
328 u64 dataLen;
329 u32 senseLen;
330 u16 hostStatus;
331 u16 scsiStatus;
332 u32 _pad[2];
333} __packed;
334
335/*
336 * Interrupt status / IRQ bits.
337 */
338
339#define PVSCSI_INTR_CMPL_0 (1 << 0)
340#define PVSCSI_INTR_CMPL_1 (1 << 1)
341#define PVSCSI_INTR_CMPL_MASK MASK(2)
342
343#define PVSCSI_INTR_MSG_0 (1 << 2)
344#define PVSCSI_INTR_MSG_1 (1 << 3)
345#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
346
347#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
348
349/*
350 * Number of MSI-X vectors supported.
351 */
352#define PVSCSI_MAX_INTRS 24
353
354/*
355 * Enumeration of supported MSI-X vectors
356 */
357#define PVSCSI_VECTOR_COMPLETION 0
358
359/*
360 * Misc constants for the rings.
361 */
362
363#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
364#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
365#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
366
367#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
368 (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
369
370#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
371 (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
372
373#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
374#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
375#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
376#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
377#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
378
379enum PVSCSIMemSpace {
380 PVSCSI_MEM_SPACE_COMMAND_PAGE = 0,
381 PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1,
382 PVSCSI_MEM_SPACE_MISC_PAGE = 2,
383 PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4,
384 PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6,
385 PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7,
386};
387
388#define PVSCSI_MEM_SPACE_NUM_PAGES \
389 (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
390 PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
391 PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
392 PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
393 PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
394
395#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
396
397#endif /* _VMW_PVSCSI_H_ */
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 093610bcfcce..333580bf37c5 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -161,7 +161,7 @@
161 * 161 *
162 * 2003/02/12 - Christoph Hellwig <hch@infradead.org> 162 * 2003/02/12 - Christoph Hellwig <hch@infradead.org>
163 * 163 *
164 * Cleaned up host template defintion 164 * Cleaned up host template definition
165 * Removed now obsolete wd7000.h 165 * Removed now obsolete wd7000.h
166 */ 166 */
167 167
@@ -171,7 +171,6 @@
171#include <linux/kernel.h> 171#include <linux/kernel.h>
172#include <linux/types.h> 172#include <linux/types.h>
173#include <linux/string.h> 173#include <linux/string.h>
174#include <linux/slab.h>
175#include <linux/spinlock.h> 174#include <linux/spinlock.h>
176#include <linux/ioport.h> 175#include <linux/ioport.h>
177#include <linux/proc_fs.h> 176#include <linux/proc_fs.h>
@@ -1588,7 +1587,7 @@ static int wd7000_host_reset(struct scsi_cmnd *SCpnt)
1588{ 1587{
1589 Adapter *host = (Adapter *) SCpnt->device->host->hostdata; 1588 Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
1590 1589
1591 spin_unlock_irq(SCpnt->device->host->host_lock); 1590 spin_lock_irq(SCpnt->device->host->host_lock);
1592 1591
1593 if (wd7000_adapter_reset(host) < 0) { 1592 if (wd7000_adapter_reset(host) < 0) {
1594 spin_unlock_irq(SCpnt->device->host->host_lock); 1593 spin_unlock_irq(SCpnt->device->host->host_lock);
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 64d40a2d4d4d..105449c15fa9 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/zorro.h> 15#include <linux/zorro.h>
16#include <linux/slab.h>
16 17
17#include <asm/amigahw.h> 18#include <asm/amigahw.h>
18#include <asm/amigaints.h> 19#include <asm/amigaints.h>