aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Lindgren <tony@atomide.com>2011-01-10 17:23:41 -0500
committerTony Lindgren <tony@atomide.com>2011-01-10 17:23:41 -0500
commit274353674dd0337bdeeaee08a9f2047777b07ab0 (patch)
treeb788d77dd7c0e8f27bbcc89fc742c212c28872c0
parent1740d483ba4d79f9fa6984dccd7152b6b208f1bf (diff)
parentd7cd5c73cec2dfa9f259a2adcf802c9f8fcc125f (diff)
Merge branch 'ehci-omap-clock' into omap-fixes
-rw-r--r--Documentation/filesystems/Locking29
-rw-r--r--Documentation/filesystems/dentry-locking.txt174
-rw-r--r--Documentation/filesystems/path-lookup.txt382
-rw-r--r--Documentation/filesystems/porting69
-rw-r--r--Documentation/filesystems/vfs.txt74
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas22
-rw-r--r--Documentation/usb/power-management.txt113
-rw-r--r--arch/arm/mach-davinci/usb.c6
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c35
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c5
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c14
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c2
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c2
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c13
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c7
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c149
-rw-r--r--arch/arm/mach-omap2/usb-ehci.c144
-rw-r--r--arch/arm/mach-omap2/usb-musb.c104
-rw-r--r--arch/arm/mach-omap2/usb-tusb6010.c2
-rw-r--r--arch/arm/plat-omap/include/plat/omap44xx.h5
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h10
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c2
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c2
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c4
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c4
-rw-r--r--arch/ia64/kernel/perfmon.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c18
-rw-r--r--arch/sh/Kconfig5
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c35
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c5
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c2
-rw-r--r--drivers/media/video/tlg2300/pd-main.c3
-rw-r--r--drivers/mfd/twl-core.c44
-rw-r--r--drivers/mfd/twl6030-irq.c9
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c9
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c14
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c8
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c1177
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h497
-rw-r--r--drivers/s390/scsi/zfcp_erp.c141
-rw-r--r--drivers/s390/scsi/zfcp_ext.h52
-rw-r--r--drivers/s390/scsi/zfcp_fc.c20
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c113
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c10
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c43
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c9
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c3
-rw-r--r--drivers/scsi/be2iscsi/be_main.c5
-rw-r--r--drivers/scsi/bfa/Makefile4
-rw-r--r--drivers/scsi/bfa/bfa.h36
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim.h169
-rw-r--r--drivers/scsi/bfa/bfa_core.c373
-rw-r--r--drivers/scsi/bfa/bfa_cs.h96
-rw-r--r--drivers/scsi/bfa/bfa_defs.h6
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h8
-rw-r--r--drivers/scsi/bfa/bfa_drv.c107
-rw-r--r--drivers/scsi/bfa/bfa_fc.h633
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c109
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h30
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c698
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h188
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c206
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h124
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c30
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c390
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c230
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c3
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c5
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c749
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h44
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c97
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c120
-rw-r--r--drivers/scsi/bfa/bfa_modules.h3
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h143
-rw-r--r--drivers/scsi/bfa/bfa_plog.h4
-rw-r--r--drivers/scsi/bfa/bfa_port.c37
-rw-r--r--drivers/scsi/bfa/bfa_port.h1
-rw-r--r--drivers/scsi/bfa/bfa_svc.c746
-rw-r--r--drivers/scsi/bfa/bfa_svc.h63
-rw-r--r--drivers/scsi/bfa/bfad.c38
-rw-r--r--drivers/scsi/bfa/bfad_attr.c18
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c46
-rw-r--r--drivers/scsi/bfa/bfad_drv.h45
-rw-r--r--drivers/scsi/bfa/bfad_im.c59
-rw-r--r--drivers/scsi/bfa/bfad_im.h16
-rw-r--r--drivers/scsi/bfa/bfi.h8
-rw-r--r--drivers/scsi/bfa/bfi_cbreg.h1
-rw-r--r--drivers/scsi/bfa/bfi_ctreg.h41
-rw-r--r--drivers/scsi/bfa/bfi_ms.h66
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h3
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h15
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c96
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c108
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c148
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c11
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fcoe/libfcoe.c300
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/gdth_proc.c16
-rw-r--r--drivers/scsi/hpsa.c11
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c28
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/ipr.c54
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/libfc/fc_exch.c48
-rw-r--r--drivers/scsi/libfc/fc_fcp.c147
-rw-r--r--drivers/scsi/libfc/fc_libfc.h16
-rw-r--r--drivers/scsi/libfc/fc_lport.c16
-rw-r--r--drivers/scsi/libfc/fc_rport.c3
-rw-r--r--drivers/scsi/libiscsi.c64
-rw-r--r--drivers/scsi/libsas/sas_port.c18
-rw-r--r--drivers/scsi/lpfc/lpfc.h26
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c104
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c327
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c239
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h222
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h292
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1062
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c663
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h58
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c10
-rw-r--r--drivers/scsi/megaraid/Makefile2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h78
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c (renamed from drivers/scsi/megaraid/megaraid_sas.c)777
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c516
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2248
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h695
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h20
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt3
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h9
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h8
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h6
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h9
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h49
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c199
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h27
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c343
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c185
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c89
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c217
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c63
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h5
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c9
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c62
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h4
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/scsi_error.c61
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c1
-rw-r--r--drivers/scsi/sd.c99
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/staging/autofs/root.c7
-rw-r--r--drivers/staging/bcm/InterfaceInit.c2
-rw-r--r--drivers/staging/pohmelfs/inode.c9
-rw-r--r--drivers/staging/pohmelfs/path_entry.c17
-rw-r--r--drivers/staging/smbfs/cache.c16
-rw-r--r--drivers/staging/smbfs/dir.c50
-rw-r--r--drivers/staging/smbfs/file.c5
-rw-r--r--drivers/staging/smbfs/inode.c9
-rw-r--r--drivers/usb/Kconfig9
-rw-r--r--drivers/usb/core/driver.c150
-rw-r--r--drivers/usb/core/hcd-pci.c1
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c11
-rw-r--r--drivers/usb/core/inode.c12
-rw-r--r--drivers/usb/core/message.c1
-rw-r--r--drivers/usb/core/quirks.c15
-rw-r--r--drivers/usb/core/sysfs.c84
-rw-r--r--drivers/usb/core/usb.c3
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/gadget/Kconfig77
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/amd5536udc.c1
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/ci13xxx_msm.c134
-rw-r--r--drivers/usb/gadget/ci13xxx_pci.c176
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c421
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.h20
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/dummy_hcd.c251
-rw-r--r--drivers/usb/gadget/f_fs.c437
-rw-r--r--drivers/usb/gadget/f_mass_storage.c524
-rw-r--r--drivers/usb/gadget/f_ncm.c1407
-rw-r--r--drivers/usb/gadget/file_storage.c29
-rw-r--r--drivers/usb/gadget/g_ffs.c41
-rw-r--r--drivers/usb/gadget/gadget_chips.h25
-rw-r--r--drivers/usb/gadget/imx_udc.c8
-rw-r--r--drivers/usb/gadget/imx_udc.h3
-rw-r--r--drivers/usb/gadget/langwell_udc.c23
-rw-r--r--drivers/usb/gadget/mass_storage.c2
-rw-r--r--drivers/usb/gadget/mv_udc.h294
-rw-r--r--drivers/usb/gadget/mv_udc_core.c2149
-rw-r--r--drivers/usb/gadget/mv_udc_phy.c214
-rw-r--r--drivers/usb/gadget/ncm.c248
-rw-r--r--drivers/usb/gadget/pch_udc.c2947
-rw-r--r--drivers/usb/gadget/u_audio.c10
-rw-r--r--drivers/usb/gadget/u_ether.c14
-rw-r--r--drivers/usb/gadget/u_ether.h5
-rw-r--r--drivers/usb/host/Kconfig19
-rw-r--r--drivers/usb/host/ehci-atmel.c3
-rw-r--r--drivers/usb/host/ehci-dbg.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c30
-rw-r--r--drivers/usb/host/ehci-msm.c345
-rw-r--r--drivers/usb/host/ehci-mxc.c29
-rw-r--r--drivers/usb/host/ehci-omap.c316
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/ehci-sched.c79
-rw-r--r--drivers/usb/host/ehci-sh.c243
-rw-r--r--drivers/usb/host/ehci-spear.c212
-rw-r--r--drivers/usb/host/ehci-vt8500.c172
-rw-r--r--drivers/usb/host/ehci-w90x900.c3
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c1
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-omap3.c2
-rw-r--r--drivers/usb/host/ohci-sh.c2
-rw-r--r--drivers/usb/host/ohci-spear.c240
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-q.c12
-rw-r--r--drivers/usb/host/whci/hcd.c2
-rw-r--r--drivers/usb/misc/usbled.c118
-rw-r--r--drivers/usb/mon/mon_bin.c34
-rw-r--r--drivers/usb/musb/Kconfig77
-rw-r--r--drivers/usb/musb/Makefile21
-rw-r--r--drivers/usb/musb/am35x.c410
-rw-r--r--drivers/usb/musb/blackfin.c181
-rw-r--r--drivers/usb/musb/cppi_dma.c2
-rw-r--r--drivers/usb/musb/da8xx.c170
-rw-r--r--drivers/usb/musb/davinci.c174
-rw-r--r--drivers/usb/musb/musb_core.c193
-rw-r--r--drivers/usb/musb/musb_core.h190
-rw-r--r--drivers/usb/musb/musb_gadget.c13
-rw-r--r--drivers/usb/musb/musb_io.h4
-rw-r--r--drivers/usb/musb/musb_regs.h4
-rw-r--r--drivers/usb/musb/musb_virthub.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c2
-rw-r--r--drivers/usb/musb/omap2430.c378
-rw-r--r--drivers/usb/musb/tusb6010.c181
-rw-r--r--drivers/usb/musb/ux500.c216
-rw-r--r--drivers/usb/otg/Kconfig32
-rw-r--r--drivers/usb/otg/Makefile3
-rw-r--r--drivers/usb/otg/ab8500-usb.c585
-rw-r--r--drivers/usb/otg/msm72k_otg.c1125
-rw-r--r--drivers/usb/otg/twl4030-usb.c3
-rw-r--r--drivers/usb/otg/twl6030-usb.c493
-rw-r--r--drivers/usb/serial/ftdi_sio.c27
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/ssu100.c56
-rw-r--r--drivers/usb/serial/usb-wwan.h2
-rw-r--r--drivers/usb/serial/usb_wwan.c79
-rw-r--r--drivers/usb/storage/uas.c82
-rw-r--r--drivers/uwb/i1480/i1480-est.c2
-rw-r--r--drivers/uwb/umc-dev.c7
-rw-r--r--drivers/uwb/whc-rc.c2
-rw-r--r--fs/9p/acl.c5
-rw-r--r--fs/9p/acl.h2
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_inode.c39
-rw-r--r--fs/adfs/dir.c13
-rw-r--r--fs/adfs/super.c11
-rw-r--r--fs/affs/amigaffs.c4
-rw-r--r--fs/affs/namei.c68
-rw-r--r--fs/affs/super.c11
-rw-r--r--fs/afs/dir.c10
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/security.c7
-rw-r--r--fs/afs/super.c10
-rw-r--r--fs/anon_inodes.c6
-rw-r--r--fs/autofs4/autofs_i.h21
-rw-r--r--fs/autofs4/expire.c141
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/root.c91
-rw-r--r--fs/autofs4/waitq.c23
-rw-r--r--fs/bad_inode.c5
-rw-r--r--fs/befs/linuxvfs.c10
-rw-r--r--fs/bfs/inode.c9
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/btrfs/acl.c21
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/export.c4
-rw-r--r--fs/btrfs/inode.c17
-rw-r--r--fs/ceph/dir.c28
-rw-r--r--fs/ceph/inode.c38
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/cifs/cifsfs.c16
-rw-r--r--fs/cifs/dir.c77
-rw-r--r--fs/cifs/inode.c14
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/readdir.c6
-rw-r--r--fs/coda/cache.c4
-rw-r--r--fs/coda/dir.c20
-rw-r--r--fs/coda/inode.c9
-rw-r--r--fs/coda/pioctl.c6
-rw-r--r--fs/configfs/configfs_internal.h4
-rw-r--r--fs/configfs/dir.c24
-rw-r--r--fs/configfs/inode.c8
-rw-r--r--fs/dcache.c1375
-rw-r--r--fs/ecryptfs/dentry.c9
-rw-r--r--fs/ecryptfs/inode.c12
-rw-r--r--fs/ecryptfs/main.c4
-rw-r--r--fs/ecryptfs/super.c12
-rw-r--r--fs/efs/super.c9
-rw-r--r--fs/exofs/super.c9
-rw-r--r--fs/exportfs/expfs.c14
-rw-r--r--fs/ext2/acl.c11
-rw-r--r--fs/ext2/acl.h2
-rw-r--r--fs/ext2/super.c9
-rw-r--r--fs/ext3/acl.c11
-rw-r--r--fs/ext3/acl.h2
-rw-r--r--fs/ext3/super.c9
-rw-r--r--fs/ext4/acl.c11
-rw-r--r--fs/ext4/acl.h2
-rw-r--r--fs/ext4/super.c9
-rw-r--r--fs/fat/inode.c13
-rw-r--r--fs/fat/namei_msdos.c23
-rw-r--r--fs/fat/namei_vfat.c55
-rw-r--r--fs/filesystems.c3
-rw-r--r--fs/freevxfs/vxfs_inode.c9
-rw-r--r--fs/fs_struct.c36
-rw-r--r--fs/fuse/dir.c18
-rw-r--r--fs/fuse/inode.c13
-rw-r--r--fs/generic_acl.c20
-rw-r--r--fs/gfs2/acl.c5
-rw-r--r--fs/gfs2/acl.h2
-rw-r--r--fs/gfs2/dentry.c22
-rw-r--r--fs/gfs2/export.c4
-rw-r--r--fs/gfs2/file.c2
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/inode.h2
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/ops_inode.c20
-rw-r--r--fs/gfs2/super.c9
-rw-r--r--fs/hfs/dir.c2
-rw-r--r--fs/hfs/hfs_fs.h8
-rw-r--r--fs/hfs/string.c17
-rw-r--r--fs/hfs/super.c11
-rw-r--r--fs/hfs/sysdep.c7
-rw-r--r--fs/hfsplus/dir.c2
-rw-r--r--fs/hfsplus/hfsplus_fs.h8
-rw-r--r--fs/hfsplus/super.c12
-rw-r--r--fs/hfsplus/unicode.c18
-rw-r--r--fs/hostfs/hostfs_kern.c44
-rw-r--r--fs/hpfs/dentry.c27
-rw-r--r--fs/hpfs/namei.c2
-rw-r--r--fs/hpfs/super.c9
-rw-r--r--fs/hppfs/hppfs.c9
-rw-r--r--fs/hugetlbfs/inode.c9
-rw-r--r--fs/inode.c50
-rw-r--r--fs/internal.h1
-rw-r--r--fs/isofs/inode.c131
-rw-r--r--fs/isofs/namei.c5
-rw-r--r--fs/jffs2/acl.c5
-rw-r--r--fs/jffs2/acl.h2
-rw-r--r--fs/jffs2/super.c9
-rw-r--r--fs/jfs/acl.c8
-rw-r--r--fs/jfs/jfs_acl.h2
-rw-r--r--fs/jfs/namei.c63
-rw-r--r--fs/jfs/super.c12
-rw-r--r--fs/libfs.c63
-rw-r--r--fs/locks.c2
-rw-r--r--fs/logfs/dir.c6
-rw-r--r--fs/logfs/inode.c9
-rw-r--r--fs/minix/inode.c9
-rw-r--r--fs/minix/namei.c2
-rw-r--r--fs/namei.c857
-rw-r--r--fs/namespace.c291
-rw-r--r--fs/ncpfs/dir.c88
-rw-r--r--fs/ncpfs/inode.c12
-rw-r--r--fs/ncpfs/ncplib_kernel.h16
-rw-r--r--fs/nfs/dir.c32
-rw-r--r--fs/nfs/getroot.c10
-rw-r--r--fs/nfs/inode.c9
-rw-r--r--fs/nfs/namespace.c17
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/nilfs2/inode.c10
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/nilfs2/super.c12
-rw-r--r--fs/notify/fsnotify.c8
-rw-r--r--fs/ntfs/inode.c9
-rw-r--r--fs/ocfs2/acl.c8
-rw-r--r--fs/ocfs2/acl.h2
-rw-r--r--fs/ocfs2/dcache.c20
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c9
-rw-r--r--fs/ocfs2/export.c4
-rw-r--r--fs/ocfs2/file.c7
-rw-r--r--fs/ocfs2/file.h2
-rw-r--r--fs/ocfs2/namei.c10
-rw-r--r--fs/ocfs2/super.c9
-rw-r--r--fs/openpromfs/inode.c9
-rw-r--r--fs/pipe.c12
-rw-r--r--fs/pnode.c4
-rw-r--r--fs/proc/base.c53
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/inode.c9
-rw-r--r--fs/proc/proc_sysctl.c31
-rw-r--r--fs/qnx4/inode.c9
-rw-r--r--fs/reiserfs/super.c9
-rw-r--r--fs/reiserfs/xattr.c18
-rw-r--r--fs/romfs/super.c9
-rw-r--r--fs/squashfs/super.c9
-rw-r--r--fs/super.c5
-rw-r--r--fs/sysfs/dir.c10
-rw-r--r--fs/sysfs/inode.c11
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/sysv/inode.c9
-rw-r--r--fs/sysv/namei.c5
-rw-r--r--fs/sysv/super.c2
-rw-r--r--fs/ubifs/super.c10
-rw-r--r--fs/udf/super.c9
-rw-r--r--fs/ufs/super.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c11
-rw-r--r--fs/xfs/xfs_acl.h2
-rw-r--r--fs/xfs/xfs_iget.c13
-rw-r--r--include/linux/bit_spinlock.h4
-rw-r--r--include/linux/coda_linux.h2
-rw-r--r--include/linux/dcache.h243
-rw-r--r--include/linux/fs.h63
-rw-r--r--include/linux/fs_struct.h3
-rw-r--r--include/linux/fsnotify.h2
-rw-r--r--include/linux/fsnotify_backend.h11
-rw-r--r--include/linux/generic_acl.h2
-rw-r--r--include/linux/i2c/twl.h7
-rw-r--r--include/linux/list_bl.h144
-rw-r--r--include/linux/mount.h53
-rw-r--r--include/linux/namei.h16
-rw-r--r--include/linux/ncp_fs.h4
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/path.h2
-rw-r--r--include/linux/posix_acl.h19
-rw-r--r--include/linux/rculist_bl.h127
-rw-r--r--include/linux/reiserfs_xattr.h2
-rw-r--r--include/linux/security.h8
-rw-r--r--include/linux/seqlock.h80
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/usb.h7
-rw-r--r--include/linux/usb/ch11.h47
-rw-r--r--include/linux/usb/ch9.h10
-rw-r--r--include/linux/usb/hcd.h4
-rw-r--r--include/linux/usb/msm_hsusb.h112
-rw-r--r--include/linux/usb/msm_hsusb_hw.h59
-rw-r--r--include/linux/usb/musb.h8
-rw-r--r--include/linux/usb/otg.h2
-rw-r--r--include/scsi/iscsi_if.h1
-rw-r--r--include/scsi/libfc.h10
-rw-r--r--include/scsi/libfcoe.h8
-rw-r--r--include/scsi/libiscsi.h4
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/scsi.h28
-rw-r--r--ipc/mqueue.c9
-rw-r--r--kernel/cgroup.c54
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/shmem.c9
-rw-r--r--mm/slab.c32
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c40
-rw-r--r--mm/util.c21
-rw-r--r--net/socket.c24
-rw-r--r--net/sunrpc/rpc_pipe.c14
-rw-r--r--security/security.c9
-rw-r--r--security/selinux/selinuxfs.c16
-rw-r--r--security/tomoyo/realpath.c1
510 files changed, 31303 insertions, 11326 deletions
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 33fa3e5d38fd..977d8919cc69 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -9,22 +9,25 @@ be able to use diff(1).
9 9
10--------------------------- dentry_operations -------------------------- 10--------------------------- dentry_operations --------------------------
11prototypes: 11prototypes:
12 int (*d_revalidate)(struct dentry *, int); 12 int (*d_revalidate)(struct dentry *, struct nameidata *);
13 int (*d_hash) (struct dentry *, struct qstr *); 13 int (*d_hash)(const struct dentry *, const struct inode *,
14 int (*d_compare) (struct dentry *, struct qstr *, struct qstr *); 14 struct qstr *);
15 int (*d_compare)(const struct dentry *, const struct inode *,
16 const struct dentry *, const struct inode *,
17 unsigned int, const char *, const struct qstr *);
15 int (*d_delete)(struct dentry *); 18 int (*d_delete)(struct dentry *);
16 void (*d_release)(struct dentry *); 19 void (*d_release)(struct dentry *);
17 void (*d_iput)(struct dentry *, struct inode *); 20 void (*d_iput)(struct dentry *, struct inode *);
18 char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); 21 char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
19 22
20locking rules: 23locking rules:
21 dcache_lock rename_lock ->d_lock may block 24 rename_lock ->d_lock may block rcu-walk
22d_revalidate: no no no yes 25d_revalidate: no no yes (ref-walk) maybe
23d_hash no no no yes 26d_hash no no no maybe
24d_compare: no yes no no 27d_compare: yes no no maybe
25d_delete: yes no yes no 28d_delete: no yes no no
26d_release: no no no yes 29d_release: no no yes no
27d_iput: no no no yes 30d_iput: no no yes no
28d_dname: no no no no 31d_dname: no no no no
29 32
30--------------------------- inode_operations --------------------------- 33--------------------------- inode_operations ---------------------------
@@ -44,8 +47,8 @@ ata *);
44 void * (*follow_link) (struct dentry *, struct nameidata *); 47 void * (*follow_link) (struct dentry *, struct nameidata *);
45 void (*put_link) (struct dentry *, struct nameidata *, void *); 48 void (*put_link) (struct dentry *, struct nameidata *, void *);
46 void (*truncate) (struct inode *); 49 void (*truncate) (struct inode *);
47 int (*permission) (struct inode *, int, struct nameidata *); 50 int (*permission) (struct inode *, int, unsigned int);
48 int (*check_acl)(struct inode *, int); 51 int (*check_acl)(struct inode *, int, unsigned int);
49 int (*setattr) (struct dentry *, struct iattr *); 52 int (*setattr) (struct dentry *, struct iattr *);
50 int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); 53 int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
51 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 54 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -73,7 +76,7 @@ follow_link: no
73put_link: no 76put_link: no
74truncate: yes (see below) 77truncate: yes (see below)
75setattr: yes 78setattr: yes
76permission: no 79permission: no (may not block if called in rcu-walk mode)
77check_acl: no 80check_acl: no
78getattr: no 81getattr: no
79setxattr: yes 82setxattr: yes
diff --git a/Documentation/filesystems/dentry-locking.txt b/Documentation/filesystems/dentry-locking.txt
deleted file mode 100644
index 79334ed5daa7..000000000000
--- a/Documentation/filesystems/dentry-locking.txt
+++ /dev/null
@@ -1,174 +0,0 @@
1RCU-based dcache locking model
2==============================
3
4On many workloads, the most common operation on dcache is to look up a
5dentry, given a parent dentry and the name of the child. Typically,
6for every open(), stat() etc., the dentry corresponding to the
7pathname will be looked up by walking the tree starting with the first
8component of the pathname and using that dentry along with the next
9component to look up the next level and so on. Since it is a frequent
10operation for workloads like multiuser environments and web servers,
11it is important to optimize this path.
12
13Prior to 2.5.10, dcache_lock was acquired in d_lookup and thus in
14every component during path look-up. Since 2.5.10 onwards, fast-walk
15algorithm changed this by holding the dcache_lock at the beginning and
16walking as many cached path component dentries as possible. This
17significantly decreases the number of acquisition of
18dcache_lock. However it also increases the lock hold time
19significantly and affects performance in large SMP machines. Since
202.5.62 kernel, dcache has been using a new locking model that uses RCU
21to make dcache look-up lock-free.
22
23The current dcache locking model is not very different from the
24existing dcache locking model. Prior to 2.5.62 kernel, dcache_lock
25protected the hash chain, d_child, d_alias, d_lru lists as well as
26d_inode and several other things like mount look-up. RCU-based changes
27affect only the way the hash chain is protected. For everything else
28the dcache_lock must be taken for both traversing as well as
29updating. The hash chain updates too take the dcache_lock. The
30significant change is the way d_lookup traverses the hash chain, it
31doesn't acquire the dcache_lock for this and rely on RCU to ensure
32that the dentry has not been *freed*.
33
34
35Dcache locking details
36======================
37
38For many multi-user workloads, open() and stat() on files are very
39frequently occurring operations. Both involve walking of path names to
40find the dentry corresponding to the concerned file. In 2.4 kernel,
41dcache_lock was held during look-up of each path component. Contention
42and cache-line bouncing of this global lock caused significant
43scalability problems. With the introduction of RCU in Linux kernel,
44this was worked around by making the look-up of path components during
45path walking lock-free.
46
47
48Safe lock-free look-up of dcache hash table
49===========================================
50
51Dcache is a complex data structure with the hash table entries also
52linked together in other lists. In 2.4 kernel, dcache_lock protected
53all the lists. We applied RCU only on hash chain walking. The rest of
54the lists are still protected by dcache_lock. Some of the important
55changes are :
56
571. The deletion from hash chain is done using hlist_del_rcu() macro
58 which doesn't initialize next pointer of the deleted dentry and
59 this allows us to walk safely lock-free while a deletion is
60 happening.
61
622. Insertion of a dentry into the hash table is done using
63 hlist_add_head_rcu() which take care of ordering the writes - the
64 writes to the dentry must be visible before the dentry is
65 inserted. This works in conjunction with hlist_for_each_rcu(),
66 which has since been replaced by hlist_for_each_entry_rcu(), while
67 walking the hash chain. The only requirement is that all
68 initialization to the dentry must be done before
69 hlist_add_head_rcu() since we don't have dcache_lock protection
70 while traversing the hash chain. This isn't different from the
71 existing code.
72
733. The dentry looked up without holding dcache_lock by cannot be
74 returned for walking if it is unhashed. It then may have a NULL
75 d_inode or other bogosity since RCU doesn't protect the other
76 fields in the dentry. We therefore use a flag DCACHE_UNHASHED to
77 indicate unhashed dentries and use this in conjunction with a
78 per-dentry lock (d_lock). Once looked up without the dcache_lock,
79 we acquire the per-dentry lock (d_lock) and check if the dentry is
80 unhashed. If so, the look-up is failed. If not, the reference count
81 of the dentry is increased and the dentry is returned.
82
834. Once a dentry is looked up, it must be ensured during the path walk
84 for that component it doesn't go away. In pre-2.5.10 code, this was
85 done holding a reference to the dentry. dcache_rcu does the same.
86 In some sense, dcache_rcu path walking looks like the pre-2.5.10
87 version.
88
895. All dentry hash chain updates must take the dcache_lock as well as
90 the per-dentry lock in that order. dput() does this to ensure that
91 a dentry that has just been looked up in another CPU doesn't get
92 deleted before dget() can be done on it.
93
946. There are several ways to do reference counting of RCU protected
95 objects. One such example is in ipv4 route cache where deferred
96 freeing (using call_rcu()) is done as soon as the reference count
97 goes to zero. This cannot be done in the case of dentries because
98 tearing down of dentries require blocking (dentry_iput()) which
99 isn't supported from RCU callbacks. Instead, tearing down of
100 dentries happen synchronously in dput(), but actual freeing happens
101 later when RCU grace period is over. This allows safe lock-free
102 walking of the hash chains, but a matched dentry may have been
103 partially torn down. The checking of DCACHE_UNHASHED flag with
104 d_lock held detects such dentries and prevents them from being
105 returned from look-up.
106
107
108Maintaining POSIX rename semantics
109==================================
110
111Since look-up of dentries is lock-free, it can race against a
112concurrent rename operation. For example, during rename of file A to
113B, look-up of either A or B must succeed. So, if look-up of B happens
114after A has been removed from the hash chain but not added to the new
115hash chain, it may fail. Also, a comparison while the name is being
116written concurrently by a rename may result in false positive matches
117violating rename semantics. Issues related to race with rename are
118handled as described below :
119
1201. Look-up can be done in two ways - d_lookup() which is safe from
121 simultaneous renames and __d_lookup() which is not. If
122 __d_lookup() fails, it must be followed up by a d_lookup() to
123 correctly determine whether a dentry is in the hash table or
124 not. d_lookup() protects look-ups using a sequence lock
125 (rename_lock).
126
1272. The name associated with a dentry (d_name) may be changed if a
128 rename is allowed to happen simultaneously. To avoid memcmp() in
129 __d_lookup() go out of bounds due to a rename and false positive
130 comparison, the name comparison is done while holding the
131 per-dentry lock. This prevents concurrent renames during this
132 operation.
133
1343. Hash table walking during look-up may move to a different bucket as
135 the current dentry is moved to a different bucket due to rename.
136 But we use hlists in dcache hash table and they are
137 null-terminated. So, even if a dentry moves to a different bucket,
138 hash chain walk will terminate. [with a list_head list, it may not
139 since termination is when the list_head in the original bucket is
140 reached]. Since we redo the d_parent check and compare name while
141 holding d_lock, lock-free look-up will not race against d_move().
142
1434. There can be a theoretical race when a dentry keeps coming back to
144 original bucket due to double moves. Due to this look-up may
145 consider that it has never moved and can end up in a infinite loop.
146 But this is not any worse that theoretical livelocks we already
147 have in the kernel.
148
149
150Important guidelines for filesystem developers related to dcache_rcu
151====================================================================
152
1531. Existing dcache interfaces (pre-2.5.62) exported to filesystem
154 don't change. Only dcache internal implementation changes. However
155 filesystems *must not* delete from the dentry hash chains directly
156 using the list macros like allowed earlier. They must use dcache
157 APIs like d_drop() or __d_drop() depending on the situation.
158
1592. d_flags is now protected by a per-dentry lock (d_lock). All access
160 to d_flags must be protected by it.
161
1623. For a hashed dentry, checking of d_count needs to be protected by
163 d_lock.
164
165
166Papers and other documentation on dcache locking
167================================================
168
1691. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
170
1712. http://lse.sourceforge.net/locking/dcache/dcache.html
172
173
174
diff --git a/Documentation/filesystems/path-lookup.txt b/Documentation/filesystems/path-lookup.txt
new file mode 100644
index 000000000000..eb59c8b44be9
--- /dev/null
+++ b/Documentation/filesystems/path-lookup.txt
@@ -0,0 +1,382 @@
1Path walking and name lookup locking
2====================================
3
4Path resolution is the finding a dentry corresponding to a path name string, by
5performing a path walk. Typically, for every open(), stat() etc., the path name
6will be resolved. Paths are resolved by walking the namespace tree, starting
7with the first component of the pathname (eg. root or cwd) with a known dentry,
8then finding the child of that dentry, which is named the next component in the
9path string. Then repeating the lookup from the child dentry and finding its
10child with the next element, and so on.
11
12Since it is a frequent operation for workloads like multiuser environments and
13web servers, it is important to optimize this code.
14
15Path walking synchronisation history:
16Prior to 2.5.10, dcache_lock was acquired in d_lookup (dcache hash lookup) and
17thus in every component during path look-up. Since 2.5.10 onwards, fast-walk
18algorithm changed this by holding the dcache_lock at the beginning and walking
19as many cached path component dentries as possible. This significantly
20decreases the number of acquisition of dcache_lock. However it also increases
21the lock hold time significantly and affects performance in large SMP machines.
22Since 2.5.62 kernel, dcache has been using a new locking model that uses RCU to
23make dcache look-up lock-free.
24
25All the above algorithms required taking a lock and reference count on the
26dentry that was looked up, so that may be used as the basis for walking the
27next path element. This is inefficient and unscalable. It is inefficient
28because of the locks and atomic operations required for every dentry element
29slows things down. It is not scalable because many parallel applications that
30are path-walk intensive tend to do path lookups starting from a common dentry
31(usually, the root "/" or current working directory). So contention on these
32common path elements causes lock and cacheline queueing.
33
34Since 2.6.38, RCU is used to make a significant part of the entire path walk
35(including dcache look-up) completely "store-free" (so, no locks, atomics, or
36even stores into cachelines of common dentries). This is known as "rcu-walk"
37path walking.
38
39Path walking overview
40=====================
41
42A name string specifies a start (root directory, cwd, fd-relative) and a
43sequence of elements (directory entry names), which together refer to a path in
44the namespace. A path is represented as a (dentry, vfsmount) tuple. The name
45elements are sub-strings, seperated by '/'.
46
47Name lookups will want to find a particular path that a name string refers to
48(usually the final element, or parent of final element). This is done by taking
49the path given by the name's starting point (which we know in advance -- eg.
50current->fs->cwd or current->fs->root) as the first parent of the lookup. Then
51iteratively for each subsequent name element, look up the child of the current
52parent with the given name and if it is not the desired entry, make it the
53parent for the next lookup.
54
55A parent, of course, must be a directory, and we must have appropriate
56permissions on the parent inode to be able to walk into it.
57
58Turning the child into a parent for the next lookup requires more checks and
59procedures. Symlinks essentially substitute the symlink name for the target
60name in the name string, and require some recursive path walking. Mount points
61must be followed into (thus changing the vfsmount that subsequent path elements
62refer to), switching from the mount point path to the root of the particular
63mounted vfsmount. These behaviours are variously modified depending on the
64exact path walking flags.
65
66Path walking then must, broadly, do several particular things:
67- find the start point of the walk;
68- perform permissions and validity checks on inodes;
69- perform dcache hash name lookups on (parent, name element) tuples;
70- traverse mount points;
71- traverse symlinks;
72- lookup and create missing parts of the path on demand.
73
74Safe store-free look-up of dcache hash table
75============================================
76
77Dcache name lookup
78------------------
79In order to lookup a dcache (parent, name) tuple, we take a hash on the tuple
80and use that to select a bucket in the dcache-hash table. The list of entries
81in that bucket is then walked, and we do a full comparison of each entry
82against our (parent, name) tuple.
83
84The hash lists are RCU protected, so list walking is not serialised with
85concurrent updates (insertion, deletion from the hash). This is a standard RCU
86list application with the exception of renames, which will be covered below.
87
88Parent and name members of a dentry, as well as its membership in the dcache
89hash, and its inode are protected by the per-dentry d_lock spinlock. A
90reference is taken on the dentry (while the fields are verified under d_lock),
91and this stabilises its d_inode pointer and actual inode. This gives a stable
92point to perform the next step of our path walk against.
93
94These members are also protected by d_seq seqlock, although this offers
95read-only protection and no durability of results, so care must be taken when
96using d_seq for synchronisation (see seqcount based lookups, below).
97
98Renames
99-------
100Back to the rename case. In usual RCU protected lists, the only operations that
101will happen to an object is insertion, and then eventually removal from the
102list. The object will not be reused until an RCU grace period is complete.
103This ensures the RCU list traversal primitives can run over the object without
104problems (see RCU documentation for how this works).
105
106However when a dentry is renamed, its hash value can change, requiring it to be
107moved to a new hash list. Allocating and inserting a new alias would be
108expensive and also problematic for directory dentries. Latency would be far to
109high to wait for a grace period after removing the dentry and before inserting
110it in the new hash bucket. So what is done is to insert the dentry into the
111new list immediately.
112
113However, when the dentry's list pointers are updated to point to objects in the
114new list before waiting for a grace period, this can result in a concurrent RCU
115lookup of the old list veering off into the new (incorrect) list and missing
116the remaining dentries on the list.
117
118There is no fundamental problem with walking down the wrong list, because the
119dentry comparisons will never match. However it is fatal to miss a matching
120dentry. So a seqlock is used to detect when a rename has occurred, and so the
121lookup can be retried.
122
123 1 2 3
124 +---+ +---+ +---+
125hlist-->| N-+->| N-+->| N-+->
126head <--+-P |<-+-P |<-+-P |
127 +---+ +---+ +---+
128
129Rename of dentry 2 may require it deleted from the above list, and inserted
130into a new list. Deleting 2 gives the following list.
131
132 1 3
133 +---+ +---+ (don't worry, the longer pointers do not
134hlist-->| N-+-------->| N-+-> impose a measurable performance overhead
135head <--+-P |<--------+-P | on modern CPUs)
136 +---+ +---+
137 ^ 2 ^
138 | +---+ |
139 | | N-+----+
140 +----+-P |
141 +---+
142
143This is a standard RCU-list deletion, which leaves the deleted object's
144pointers intact, so a concurrent list walker that is currently looking at
145object 2 will correctly continue to object 3 when it is time to traverse the
146next object.
147
148However, when inserting object 2 onto a new list, we end up with this:
149
150 1 3
151 +---+ +---+
152hlist-->| N-+-------->| N-+->
153head <--+-P |<--------+-P |
154 +---+ +---+
155 2
156 +---+
157 | N-+---->
158 <----+-P |
159 +---+
160
161Because we didn't wait for a grace period, there may be a concurrent lookup
162still at 2. Now when it follows 2's 'next' pointer, it will walk off into
163another list without ever having checked object 3.
164
165A related, but distinctly different, issue is that of rename atomicity versus
166lookup operations. If a file is renamed from 'A' to 'B', a lookup must only
167find either 'A' or 'B'. So if a lookup of 'A' returns NULL, a subsequent lookup
168of 'B' must succeed (note the reverse is not true).
169
170Between deleting the dentry from the old hash list, and inserting it on the new
171hash list, a lookup may find neither 'A' nor 'B' matching the dentry. The same
172rename seqlock is also used to cover this race in much the same way, by
173retrying a negative lookup result if a rename was in progress.
174
175Seqcount based lookups
176----------------------
177In refcount based dcache lookups, d_lock is used to serialise access to
178the dentry, stabilising it while comparing its name and parent and then
179taking a reference count (the reference count then gives a stable place to
180start the next part of the path walk from).
181
182As explained above, we would like to do path walking without taking locks or
183reference counts on intermediate dentries along the path. To do this, a per
184dentry seqlock (d_seq) is used to take a "coherent snapshot" of what the dentry
185looks like (its name, parent, and inode). That snapshot is then used to start
186the next part of the path walk. When loading the coherent snapshot under d_seq,
187care must be taken to load the members up-front, and use those pointers rather
188than reloading from the dentry later on (otherwise we'd have interesting things
189like d_inode going NULL underneath us, if the name was unlinked).
190
191Also important is to avoid performing any destructive operations (pretty much:
192no non-atomic stores to shared data), and to recheck the seqcount when we are
193"done" with the operation. Retry or abort if the seqcount does not match.
194Avoiding destructive or changing operations means we can easily unwind from
195failure.
196
197What this means is that a caller, provided they are holding RCU lock to
198protect the dentry object from disappearing, can perform a seqcount based
199lookup which does not increment the refcount on the dentry or write to
200it in any way. This returned dentry can be used for subsequent operations,
201provided that d_seq is rechecked after that operation is complete.
202
203Inodes are also rcu freed, so the seqcount lookup dentry's inode may also be
204queried for permissions.
205
206With this two parts of the puzzle, we can do path lookups without taking
207locks or refcounts on dentry elements.
208
209RCU-walk path walking design
210============================
211
212Path walking code now has two distinct modes, ref-walk and rcu-walk. ref-walk
213is the traditional[*] way of performing dcache lookups using d_lock to
214serialise concurrent modifications to the dentry and take a reference count on
215it. ref-walk is simple and obvious, and may sleep, take locks, etc while path
216walking is operating on each dentry. rcu-walk uses seqcount based dentry
217lookups, and can perform lookup of intermediate elements without any stores to
218shared data in the dentry or inode. rcu-walk can not be applied to all cases,
219eg. if the filesystem must sleep or perform non trivial operations, rcu-walk
220must be switched to ref-walk mode.
221
222[*] RCU is still used for the dentry hash lookup in ref-walk, but not the full
223 path walk.
224
225Where ref-walk uses a stable, refcounted ``parent'' to walk the remaining
226path string, rcu-walk uses a d_seq protected snapshot. When looking up a
227child of this parent snapshot, we open d_seq critical section on the child
228before closing d_seq critical section on the parent. This gives an interlocking
229ladder of snapshots to walk down.
230
231
232 proc 101
233 /----------------\
234 / comm: "vi" \
235 / fs.root: dentry0 \
236 \ fs.cwd: dentry2 /
237 \ /
238 \----------------/
239
240So when vi wants to open("/home/npiggin/test.c", O_RDWR), then it will
241start from current->fs->root, which is a pinned dentry. Alternatively,
242"./test.c" would start from cwd; both names refer to the same path in
243the context of proc101.
244
245 dentry 0
246 +---------------------+ rcu-walk begins here, we note d_seq, check the
247 | name: "/" | inode's permission, and then look up the next
248 | inode: 10 | path element which is "home"...
249 | children:"home", ...|
250 +---------------------+
251 |
252 dentry 1 V
253 +---------------------+ ... which brings us here. We find dentry1 via
254 | name: "home" | hash lookup, then note d_seq and compare name
255 | inode: 678 | string and parent pointer. When we have a match,
256 | children:"npiggin" | we now recheck the d_seq of dentry0. Then we
257 +---------------------+ check inode and look up the next element.
258 |
259 dentry2 V
260 +---------------------+ Note: if dentry0 is now modified, lookup is
261 | name: "npiggin" | not necessarily invalid, so we need only keep a
262 | inode: 543 | parent for d_seq verification, and grandparents
263 | children:"a.c", ... | can be forgotten.
264 +---------------------+
265 |
266 dentry3 V
267 +---------------------+ At this point we have our destination dentry.
268 | name: "a.c" | We now take its d_lock, verify d_seq of this
269 | inode: 14221 | dentry. If that checks out, we can increment
270 | children:NULL | its refcount because we're holding d_lock.
271 +---------------------+
272
273Taking a refcount on a dentry from rcu-walk mode, by taking its d_lock,
274re-checking its d_seq, and then incrementing its refcount is called
275"dropping rcu" or dropping from rcu-walk into ref-walk mode.
276
277It is, in some sense, a bit of a house of cards. If the seqcount check of the
278parent snapshot fails, the house comes down, because we had closed the d_seq
279section on the grandparent, so we have nothing left to stand on. In that case,
280the path walk must be fully restarted (which we do in ref-walk mode, to avoid
281live locks). It is costly to have a full restart, but fortunately they are
282quite rare.
283
284When we reach a point where sleeping is required, or a filesystem callout
285requires ref-walk, then instead of restarting the walk, we attempt to drop rcu
286at the last known good dentry we have. Avoiding a full restart in ref-walk in
287these cases is fundamental for performance and scalability because blocking
288operations such as creates and unlinks are not uncommon.
289
290The detailed design for rcu-walk is like this:
291* LOOKUP_RCU is set in nd->flags, which distinguishes rcu-walk from ref-walk.
292* Take the RCU lock for the entire path walk, starting with the acquiring
293 of the starting path (eg. root/cwd/fd-path). So now dentry refcounts are
294 not required for dentry persistence.
295* synchronize_rcu is called when unregistering a filesystem, so we can
296 access d_ops and i_ops during rcu-walk.
297* Similarly take the vfsmount lock for the entire path walk. So now mnt
298 refcounts are not required for persistence. Also we are free to perform mount
299 lookups, and to assume dentry mount points and mount roots are stable up and
300 down the path.
301* Have a per-dentry seqlock to protect the dentry name, parent, and inode,
302 so we can load this tuple atomically, and also check whether any of its
303 members have changed.
304* Dentry lookups (based on parent, candidate string tuple) recheck the parent
305 sequence after the child is found in case anything changed in the parent
306 during the path walk.
307* inode is also RCU protected so we can load d_inode and use the inode for
308 limited things.
309* i_mode, i_uid, i_gid can be tested for exec permissions during path walk.
310* i_op can be loaded.
311* When the destination dentry is reached, drop rcu there (ie. take d_lock,
312 verify d_seq, increment refcount).
313* If seqlock verification fails anywhere along the path, do a full restart
314 of the path lookup in ref-walk mode. -ECHILD tends to be used (for want of
315 a better errno) to signal an rcu-walk failure.
316
317The cases where rcu-walk cannot continue are:
318* NULL dentry (ie. any uncached path element)
319* Following links
320
321It may be possible eventually to make following links rcu-walk aware.
322
323Uncached path elements will always require dropping to ref-walk mode, at the
324very least because i_mutex needs to be grabbed, and objects allocated.
325
326Final note:
327"store-free" path walking is not strictly store free. We take vfsmount lock
328and refcounts (both of which can be made per-cpu), and we also store to the
329stack (which is essentially CPU-local), and we also have to take locks and
330refcount on final dentry.
331
332The point is that shared data, where practically possible, is not locked
333or stored into. The result is massive improvements in performance and
334scalability of path resolution.
335
336
337Interesting statistics
338======================
339
340The following table gives rcu lookup statistics for a few simple workloads
341(2s12c24t Westmere, debian non-graphical system). Ungraceful are attempts to
342drop rcu that fail due to d_seq failure and requiring the entire path lookup
343again. Other cases are successful rcu-drops that are required before the final
344element, nodentry for missing dentry, revalidate for filesystem revalidate
345routine requiring rcu drop, permission for permission check requiring drop,
346and link for symlink traversal requiring drop.
347
348 rcu-lookups restart nodentry link revalidate permission
349bootup 47121 0 4624 1010 10283 7852
350dbench 25386793 0 6778659(26.7%) 55 549 1156
351kbuild 2696672 10 64442(2.3%) 108764(4.0%) 1 1590
352git diff 39605 0 28 2 0 106
353vfstest 24185492 4945 708725(2.9%) 1076136(4.4%) 0 2651
354
355What this shows is that failed rcu-walk lookups, ie. ones that are restarted
356entirely with ref-walk, are quite rare. Even the "vfstest" case which
357specifically has concurrent renames/mkdir/rmdir/ creat/unlink/etc to excercise
358such races is not showing a huge amount of restarts.
359
360Dropping from rcu-walk to ref-walk mean that we have encountered a dentry where
361the reference count needs to be taken for some reason. This is either because
362we have reached the target of the path walk, or because we have encountered a
363condition that can't be resolved in rcu-walk mode. Ideally, we drop rcu-walk
364only when we have reached the target dentry, so the other statistics show where
365this does not happen.
366
367Note that a graceful drop from rcu-walk mode due to something such as the
368dentry not existing (which can be common) is not necessarily a failure of
369rcu-walk scheme, because some elements of the path may have been walked in
370rcu-walk mode. The further we get from common path elements (such as cwd or
371root), the less contended the dentry is likely to be. The closer we are to
372common path elements, the more likely they will exist in dentry cache.
373
374
375Papers and other documentation on dcache locking
376================================================
377
3781. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
379
3802. http://lse.sourceforge.net/locking/dcache/dcache.html
381
382
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index b12c89538680..07a32b42cf9c 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -216,7 +216,6 @@ had ->revalidate()) add calls in ->follow_link()/->readlink().
216->d_parent changes are not protected by BKL anymore. Read access is safe 216->d_parent changes are not protected by BKL anymore. Read access is safe
217if at least one of the following is true: 217if at least one of the following is true:
218 * filesystem has no cross-directory rename() 218 * filesystem has no cross-directory rename()
219 * dcache_lock is held
220 * we know that parent had been locked (e.g. we are looking at 219 * we know that parent had been locked (e.g. we are looking at
221->d_parent of ->lookup() argument). 220->d_parent of ->lookup() argument).
222 * we are called from ->rename(). 221 * we are called from ->rename().
@@ -318,3 +317,71 @@ if it's zero is not *and* *never* *had* *been* enough. Final unlink() and iput(
318may happen while the inode is in the middle of ->write_inode(); e.g. if you blindly 317may happen while the inode is in the middle of ->write_inode(); e.g. if you blindly
319free the on-disk inode, you may end up doing that while ->write_inode() is writing 318free the on-disk inode, you may end up doing that while ->write_inode() is writing
320to it. 319to it.
320
321---
322[mandatory]
323
324 .d_delete() now only advises the dcache as to whether or not to cache
325unreferenced dentries, and is now only called when the dentry refcount goes to
3260. Even on 0 refcount transition, it must be able to tolerate being called 0,
3271, or more times (eg. constant, idempotent).
328
329---
330[mandatory]
331
332 .d_compare() calling convention and locking rules are significantly
333changed. Read updated documentation in Documentation/filesystems/vfs.txt (and
334look at examples of other filesystems) for guidance.
335
336---
337[mandatory]
338
339 .d_hash() calling convention and locking rules are significantly
340changed. Read updated documentation in Documentation/filesystems/vfs.txt (and
341look at examples of other filesystems) for guidance.
342
343---
344[mandatory]
345 dcache_lock is gone, replaced by fine grained locks. See fs/dcache.c
346for details of what locks to replace dcache_lock with in order to protect
347particular things. Most of the time, a filesystem only needs ->d_lock, which
348protects *all* the dcache state of a given dentry.
349
350--
351[mandatory]
352
353 Filesystems must RCU-free their inodes, if they can have been accessed
354via rcu-walk path walk (basically, if the file can have had a path name in the
355vfs namespace).
356
357 i_dentry and i_rcu share storage in a union, and the vfs expects
358i_dentry to be reinitialized before it is freed, so an:
359
360 INIT_LIST_HEAD(&inode->i_dentry);
361
362must be done in the RCU callback.
363
364--
365[recommended]
366 vfs now tries to do path walking in "rcu-walk mode", which avoids
367atomic operations and scalability hazards on dentries and inodes (see
368Documentation/filesystems/path-walk.txt). d_hash and d_compare changes (above)
369are examples of the changes required to support this. For more complex
370filesystem callbacks, the vfs drops out of rcu-walk mode before the fs call, so
371no changes are required to the filesystem. However, this is costly and loses
372the benefits of rcu-walk mode. We will begin to add filesystem callbacks that
373are rcu-walk aware, shown below. Filesystems should take advantage of this
374where possible.
375
376--
377[mandatory]
378 d_revalidate is a callback that is made on every path element (if
379the filesystem provides it), which requires dropping out of rcu-walk mode. This
380may now be called in rcu-walk mode (nd->flags & LOOKUP_RCU). -ECHILD should be
381returned if the filesystem cannot handle rcu-walk. See
382Documentation/filesystems/vfs.txt for more details.
383
384 permission and check_acl are inode permission checks that are called
385on many or all directory inodes on the way down a path walk (to check for
386exec permission). These must now be rcu-walk aware (flags & IPERM_RCU). See
387Documentation/filesystems/vfs.txt for more details.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 20899e095e7e..fbb324e2bd43 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -325,7 +325,8 @@ struct inode_operations {
325 void * (*follow_link) (struct dentry *, struct nameidata *); 325 void * (*follow_link) (struct dentry *, struct nameidata *);
326 void (*put_link) (struct dentry *, struct nameidata *, void *); 326 void (*put_link) (struct dentry *, struct nameidata *, void *);
327 void (*truncate) (struct inode *); 327 void (*truncate) (struct inode *);
328 int (*permission) (struct inode *, int, struct nameidata *); 328 int (*permission) (struct inode *, int, unsigned int);
329 int (*check_acl)(struct inode *, int, unsigned int);
329 int (*setattr) (struct dentry *, struct iattr *); 330 int (*setattr) (struct dentry *, struct iattr *);
330 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); 331 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
331 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 332 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -414,6 +415,13 @@ otherwise noted.
414 permission: called by the VFS to check for access rights on a POSIX-like 415 permission: called by the VFS to check for access rights on a POSIX-like
415 filesystem. 416 filesystem.
416 417
418 May be called in rcu-walk mode (flags & IPERM_RCU). If in rcu-walk
419 mode, the filesystem must check the permission without blocking or
420 storing to the inode.
421
422 If a situation is encountered that rcu-walk cannot handle, return
423 -ECHILD and it will be called again in ref-walk mode.
424
417 setattr: called by the VFS to set attributes for a file. This method 425 setattr: called by the VFS to set attributes for a file. This method
418 is called by chmod(2) and related system calls. 426 is called by chmod(2) and related system calls.
419 427
@@ -847,9 +855,12 @@ defined:
847 855
848struct dentry_operations { 856struct dentry_operations {
849 int (*d_revalidate)(struct dentry *, struct nameidata *); 857 int (*d_revalidate)(struct dentry *, struct nameidata *);
850 int (*d_hash) (struct dentry *, struct qstr *); 858 int (*d_hash)(const struct dentry *, const struct inode *,
851 int (*d_compare) (struct dentry *, struct qstr *, struct qstr *); 859 struct qstr *);
852 int (*d_delete)(struct dentry *); 860 int (*d_compare)(const struct dentry *, const struct inode *,
861 const struct dentry *, const struct inode *,
862 unsigned int, const char *, const struct qstr *);
863 int (*d_delete)(const struct dentry *);
853 void (*d_release)(struct dentry *); 864 void (*d_release)(struct dentry *);
854 void (*d_iput)(struct dentry *, struct inode *); 865 void (*d_iput)(struct dentry *, struct inode *);
855 char *(*d_dname)(struct dentry *, char *, int); 866 char *(*d_dname)(struct dentry *, char *, int);
@@ -860,13 +871,45 @@ struct dentry_operations {
860 dcache. Most filesystems leave this as NULL, because all their 871 dcache. Most filesystems leave this as NULL, because all their
861 dentries in the dcache are valid 872 dentries in the dcache are valid
862 873
863 d_hash: called when the VFS adds a dentry to the hash table 874 d_revalidate may be called in rcu-walk mode (nd->flags & LOOKUP_RCU).
875 If in rcu-walk mode, the filesystem must revalidate the dentry without
876 blocking or storing to the dentry, d_parent and d_inode should not be
877 used without care (because they can go NULL), instead nd->inode should
878 be used.
879
880 If a situation is encountered that rcu-walk cannot handle, return
881 -ECHILD and it will be called again in ref-walk mode.
882
883 d_hash: called when the VFS adds a dentry to the hash table. The first
884 dentry passed to d_hash is the parent directory that the name is
885 to be hashed into. The inode is the dentry's inode.
886
887 Same locking and synchronisation rules as d_compare regarding
888 what is safe to dereference etc.
889
890 d_compare: called to compare a dentry name with a given name. The first
891 dentry is the parent of the dentry to be compared, the second is
892 the parent's inode, then the dentry and inode (may be NULL) of the
893 child dentry. len and name string are properties of the dentry to be
894 compared. qstr is the name to compare it with.
895
896 Must be constant and idempotent, and should not take locks if
897 possible, and should not or store into the dentry or inodes.
898 Should not dereference pointers outside the dentry or inodes without
899 lots of care (eg. d_parent, d_inode, d_name should not be used).
900
901 However, our vfsmount is pinned, and RCU held, so the dentries and
902 inodes won't disappear, neither will our sb or filesystem module.
903 ->i_sb and ->d_sb may be used.
864 904
865 d_compare: called when a dentry should be compared with another 905 It is a tricky calling convention because it needs to be called under
906 "rcu-walk", ie. without any locks or references on things.
866 907
867 d_delete: called when the last reference to a dentry is 908 d_delete: called when the last reference to a dentry is dropped and the
868 deleted. This means no-one is using the dentry, however it is 909 dcache is deciding whether or not to cache it. Return 1 to delete
869 still valid and in the dcache 910 immediately, or 0 to cache the dentry. Default is NULL which means to
911 always cache a reachable dentry. d_delete must be constant and
912 idempotent.
870 913
871 d_release: called when a dentry is really deallocated 914 d_release: called when a dentry is really deallocated
872 915
@@ -910,14 +953,11 @@ manipulate dentries:
910 the usage count) 953 the usage count)
911 954
912 dput: close a handle for a dentry (decrements the usage count). If 955 dput: close a handle for a dentry (decrements the usage count). If
913 the usage count drops to 0, the "d_delete" method is called 956 the usage count drops to 0, and the dentry is still in its
914 and the dentry is placed on the unused list if the dentry is 957 parent's hash, the "d_delete" method is called to check whether
915 still in its parents hash list. Putting the dentry on the 958 it should be cached. If it should not be cached, or if the dentry
916 unused list just means that if the system needs some RAM, it 959 is not hashed, it is deleted. Otherwise cached dentries are put
917 goes through the unused list of dentries and deallocates them. 960 into an LRU list to be reclaimed on memory shortage.
918 If the dentry has already been unhashed and the usage count
919 drops to 0, in this case the dentry is deallocated after the
920 "d_delete" method is called
921 961
922 d_drop: this unhashes a dentry from its parents hash list. A 962 d_drop: this unhashes a dentry from its parents hash list. A
923 subsequent call to dput() will deallocate the dentry if its 963 subsequent call to dput() will deallocate the dentry if its
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 00301ed9c371..b64d10d221ec 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,25 @@
1Release Date : Tues. Dec 14, 2010 17:00:00 PST 2010 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford
4Current Version : 00.00.05.29-rc1
5Old Version : 00.00.04.31-rc1
6 1. Rename megaraid_sas.c to megaraid_sas_base.c.
7 2. Update GPL headers.
8 3. Add MSI-X support and 'msix_disable' module parameter.
9 4. Use lowest memory bar (for SR-IOV VF support).
10 5. Add struct megasas_instance_temlate changes, and change all code to use
11 new instance entries:
12
13 irqreturn_t (*service_isr )(int irq, void *devp);
14 void (*tasklet)(unsigned long);
15 u32 (*init_adapter)(struct megasas_instance *);
16 u32 (*build_and_issue_cmd) (struct megasas_instance *,
17 struct scsi_cmnd *);
18 void (*issue_dcmd) (struct megasas_instance *instance,
19 struct megasas_cmd *cmd);
20
21 6. Add code to support MegaRAID 9265/9285 controllers device id (0x5b).
22-------------------------------------------------------------------------------
11 Release Date : Thur. May 03, 2010 09:12:45 PST 2009 - 231 Release Date : Thur. May 03, 2010 09:12:45 PST 2009 -
2 (emaild-id:megaraidlinux@lsi.com) 24 (emaild-id:megaraidlinux@lsi.com)
3 Bo Yang 25 Bo Yang
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index b29d8e56cf28..c9ffa9ced7ee 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -2,7 +2,7 @@
2 2
3 Alan Stern <stern@rowland.harvard.edu> 3 Alan Stern <stern@rowland.harvard.edu>
4 4
5 December 11, 2009 5 October 28, 2010
6 6
7 7
8 8
@@ -107,9 +107,14 @@ allowed to issue dynamic suspends.
107The user interface for controlling dynamic PM is located in the power/ 107The user interface for controlling dynamic PM is located in the power/
108subdirectory of each USB device's sysfs directory, that is, in 108subdirectory of each USB device's sysfs directory, that is, in
109/sys/bus/usb/devices/.../power/ where "..." is the device's ID. The 109/sys/bus/usb/devices/.../power/ where "..." is the device's ID. The
110relevant attribute files are: wakeup, control, and autosuspend. 110relevant attribute files are: wakeup, control, and
111(There may also be a file named "level"; this file was deprecated 111autosuspend_delay_ms. (There may also be a file named "level"; this
112as of the 2.6.35 kernel and replaced by the "control" file.) 112file was deprecated as of the 2.6.35 kernel and replaced by the
113"control" file. In 2.6.38 the "autosuspend" file will be deprecated
114and replaced by the "autosuspend_delay_ms" file. The only difference
115is that the newer file expresses the delay in milliseconds whereas the
116older file uses seconds. Confusingly, both files are present in 2.6.37
117but only "autosuspend" works.)
113 118
114 power/wakeup 119 power/wakeup
115 120
@@ -140,33 +145,36 @@ as of the 2.6.35 kernel and replaced by the "control" file.)
140 suspended and autoresume was not allowed. This 145 suspended and autoresume was not allowed. This
141 setting is no longer supported.) 146 setting is no longer supported.)
142 147
143 power/autosuspend 148 power/autosuspend_delay_ms
144 149
145 This file contains an integer value, which is the 150 This file contains an integer value, which is the
146 number of seconds the device should remain idle before 151 number of milliseconds the device should remain idle
147 the kernel will autosuspend it (the idle-delay time). 152 before the kernel will autosuspend it (the idle-delay
148 The default is 2. 0 means to autosuspend as soon as 153 time). The default is 2000. 0 means to autosuspend
149 the device becomes idle, and negative values mean 154 as soon as the device becomes idle, and negative
150 never to autosuspend. You can write a number to the 155 values mean never to autosuspend. You can write a
151 file to change the autosuspend idle-delay time. 156 number to the file to change the autosuspend
152 157 idle-delay time.
153Writing "-1" to power/autosuspend and writing "on" to power/control do 158
154essentially the same thing -- they both prevent the device from being 159Writing "-1" to power/autosuspend_delay_ms and writing "on" to
155autosuspended. Yes, this is a redundancy in the API. 160power/control do essentially the same thing -- they both prevent the
161device from being autosuspended. Yes, this is a redundancy in the
162API.
156 163
157(In 2.6.21 writing "0" to power/autosuspend would prevent the device 164(In 2.6.21 writing "0" to power/autosuspend would prevent the device
158from being autosuspended; the behavior was changed in 2.6.22. The 165from being autosuspended; the behavior was changed in 2.6.22. The
159power/autosuspend attribute did not exist prior to 2.6.21, and the 166power/autosuspend attribute did not exist prior to 2.6.21, and the
160power/level attribute did not exist prior to 2.6.22. power/control 167power/level attribute did not exist prior to 2.6.22. power/control
161was added in 2.6.34.) 168was added in 2.6.34, and power/autosuspend_delay_ms was added in
1692.6.37 but did not become functional until 2.6.38.)
162 170
163 171
164 Changing the default idle-delay time 172 Changing the default idle-delay time
165 ------------------------------------ 173 ------------------------------------
166 174
167The default autosuspend idle-delay time is controlled by a module 175The default autosuspend idle-delay time (in seconds) is controlled by
168parameter in usbcore. You can specify the value when usbcore is 176a module parameter in usbcore. You can specify the value when usbcore
169loaded. For example, to set it to 5 seconds instead of 2 you would 177is loaded. For example, to set it to 5 seconds instead of 2 you would
170do: 178do:
171 179
172 modprobe usbcore autosuspend=5 180 modprobe usbcore autosuspend=5
@@ -234,25 +242,23 @@ every device.
234 242
235If a driver knows that its device has proper suspend/resume support, 243If a driver knows that its device has proper suspend/resume support,
236it can enable autosuspend all by itself. For example, the video 244it can enable autosuspend all by itself. For example, the video
237driver for a laptop's webcam might do this, since these devices are 245driver for a laptop's webcam might do this (in recent kernels they
238rarely used and so should normally be autosuspended. 246do), since these devices are rarely used and so should normally be
247autosuspended.
239 248
240Sometimes it turns out that even when a device does work okay with 249Sometimes it turns out that even when a device does work okay with
241autosuspend there are still problems. For example, there are 250autosuspend there are still problems. For example, the usbhid driver,
242experimental patches adding autosuspend support to the usbhid driver, 251which manages keyboards and mice, has autosuspend support. Tests with
243which manages keyboards and mice, among other things. Tests with a 252a number of keyboards show that typing on a suspended keyboard, while
244number of keyboards showed that typing on a suspended keyboard, while 253causing the keyboard to do a remote wakeup all right, will nonetheless
245causing the keyboard to do a remote wakeup all right, would 254frequently result in lost keystrokes. Tests with mice show that some
246nonetheless frequently result in lost keystrokes. Tests with mice 255of them will issue a remote-wakeup request in response to button
247showed that some of them would issue a remote-wakeup request in 256presses but not to motion, and some in response to neither.
248response to button presses but not to motion, and some in response to
249neither.
250 257
251The kernel will not prevent you from enabling autosuspend on devices 258The kernel will not prevent you from enabling autosuspend on devices
252that can't handle it. It is even possible in theory to damage a 259that can't handle it. It is even possible in theory to damage a
253device by suspending it at the wrong time -- for example, suspending a 260device by suspending it at the wrong time. (Highly unlikely, but
254USB hard disk might cause it to spin down without parking the heads. 261possible.) Take care.
255(Highly unlikely, but possible.) Take care.
256 262
257 263
258 The driver interface for Power Management 264 The driver interface for Power Management
@@ -336,10 +342,6 @@ autosuspend the interface's device. When the usage counter is = 0
336then the interface is considered to be idle, and the kernel may 342then the interface is considered to be idle, and the kernel may
337autosuspend the device. 343autosuspend the device.
338 344
339(There is a similar usage counter field in struct usb_device,
340associated with the device itself rather than any of its interfaces.
341This counter is used only by the USB core.)
342
343Drivers need not be concerned about balancing changes to the usage 345Drivers need not be concerned about balancing changes to the usage
344counter; the USB core will undo any remaining "get"s when a driver 346counter; the USB core will undo any remaining "get"s when a driver
345is unbound from its interface. As a corollary, drivers must not call 347is unbound from its interface. As a corollary, drivers must not call
@@ -409,11 +411,11 @@ during autosuspend. For example, there's not much point
409autosuspending a keyboard if the user can't cause the keyboard to do a 411autosuspending a keyboard if the user can't cause the keyboard to do a
410remote wakeup by typing on it. If the driver sets 412remote wakeup by typing on it. If the driver sets
411intf->needs_remote_wakeup to 1, the kernel won't autosuspend the 413intf->needs_remote_wakeup to 1, the kernel won't autosuspend the
412device if remote wakeup isn't available or has been disabled through 414device if remote wakeup isn't available. (If the device is already
413the power/wakeup attribute. (If the device is already autosuspended, 415autosuspended, though, setting this flag won't cause the kernel to
414though, setting this flag won't cause the kernel to autoresume it. 416autoresume it. Normally a driver would set this flag in its probe
415Normally a driver would set this flag in its probe method, at which 417method, at which time the device is guaranteed not to be
416time the device is guaranteed not to be autosuspended.) 418autosuspended.)
417 419
418If a driver does its I/O asynchronously in interrupt context, it 420If a driver does its I/O asynchronously in interrupt context, it
419should call usb_autopm_get_interface_async() before starting output and 421should call usb_autopm_get_interface_async() before starting output and
@@ -422,20 +424,19 @@ it receives an input event, it should call
422 424
423 usb_mark_last_busy(struct usb_device *udev); 425 usb_mark_last_busy(struct usb_device *udev);
424 426
425in the event handler. This sets udev->last_busy to the current time. 427in the event handler. This tells the PM core that the device was just
426udev->last_busy is the field used for idle-delay calculations; 428busy and therefore the next autosuspend idle-delay expiration should
427updating it will cause any pending autosuspend to be moved back. Most 429be pushed back. Many of the usb_autopm_* routines also make this call,
428of the usb_autopm_* routines will also set the last_busy field to the 430so drivers need to worry only when interrupt-driven input arrives.
429current time.
430 431
431Asynchronous operation is always subject to races. For example, a 432Asynchronous operation is always subject to races. For example, a
432driver may call one of the usb_autopm_*_interface_async() routines at 433driver may call the usb_autopm_get_interface_async() routine at a time
433a time when the core has just finished deciding the device has been 434when the core has just finished deciding the device has been idle for
434idle for long enough but not yet gotten around to calling the driver's 435long enough but not yet gotten around to calling the driver's suspend
435suspend method. The suspend method must be responsible for 436method. The suspend method must be responsible for synchronizing with
436synchronizing with the output request routine and the URB completion 437the I/O request routine and the URB completion handler; it should
437handler; it should cause autosuspends to fail with -EBUSY if the 438cause autosuspends to fail with -EBUSY if the driver needs to use the
438driver needs to use the device. 439device.
439 440
440External suspend calls should never be allowed to fail in this way, 441External suspend calls should never be allowed to fail in this way,
441only autosuspend calls. The driver can tell them apart by checking 442only autosuspend calls. The driver can tell them apart by checking
@@ -472,7 +473,9 @@ Firstly, a device may already be autosuspended when a system suspend
472occurs. Since system suspends are supposed to be as transparent as 473occurs. Since system suspends are supposed to be as transparent as
473possible, the device should remain suspended following the system 474possible, the device should remain suspended following the system
474resume. But this theory may not work out well in practice; over time 475resume. But this theory may not work out well in practice; over time
475the kernel's behavior in this regard has changed. 476the kernel's behavior in this regard has changed. As of 2.6.37 the
477policy is to resume all devices during a system resume and let them
478handle their own runtime suspends afterward.
476 479
477Secondly, a dynamic power-management event may occur as a system 480Secondly, a dynamic power-management event may occur as a system
478suspend is underway. The window for this is short, since system 481suspend is underway. The window for this is short, since system
diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c
index 31f0cbea0caa..23d2b6d9fa63 100644
--- a/arch/arm/mach-davinci/usb.c
+++ b/arch/arm/mach-davinci/usb.c
@@ -64,17 +64,19 @@ static struct resource usb_resources[] = {
64 { 64 {
65 .start = IRQ_USBINT, 65 .start = IRQ_USBINT,
66 .flags = IORESOURCE_IRQ, 66 .flags = IORESOURCE_IRQ,
67 .name = "mc"
67 }, 68 },
68 { 69 {
69 /* placeholder for the dedicated CPPI IRQ */ 70 /* placeholder for the dedicated CPPI IRQ */
70 .flags = IORESOURCE_IRQ, 71 .flags = IORESOURCE_IRQ,
72 .name = "dma"
71 }, 73 },
72}; 74};
73 75
74static u64 usb_dmamask = DMA_BIT_MASK(32); 76static u64 usb_dmamask = DMA_BIT_MASK(32);
75 77
76static struct platform_device usb_dev = { 78static struct platform_device usb_dev = {
77 .name = "musb_hdrc", 79 .name = "musb-davinci",
78 .id = -1, 80 .id = -1,
79 .dev = { 81 .dev = {
80 .platform_data = &usb_data, 82 .platform_data = &usb_data,
@@ -110,6 +112,7 @@ static struct resource da8xx_usb20_resources[] = {
110 { 112 {
111 .start = IRQ_DA8XX_USB_INT, 113 .start = IRQ_DA8XX_USB_INT,
112 .flags = IORESOURCE_IRQ, 114 .flags = IORESOURCE_IRQ,
115 .name = "mc",
113 }, 116 },
114}; 117};
115 118
@@ -121,6 +124,7 @@ int __init da8xx_register_usb20(unsigned mA, unsigned potpgt)
121 124
122 usb_dev.resource = da8xx_usb20_resources; 125 usb_dev.resource = da8xx_usb20_resources;
123 usb_dev.num_resources = ARRAY_SIZE(da8xx_usb20_resources); 126 usb_dev.num_resources = ARRAY_SIZE(da8xx_usb20_resources);
127 usb_dev.name = "musb-da8xx";
124 128
125 return platform_device_register(&usb_dev); 129 return platform_device_register(&usb_dev);
126} 130}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 3e8c9e859f98..1a2cf6226a55 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -48,6 +48,7 @@ config ARCH_OMAP4
48 select ARM_ERRATA_720789 48 select ARM_ERRATA_720789
49 select ARCH_HAS_OPP 49 select ARCH_HAS_OPP
50 select PM_OPP if PM 50 select PM_OPP if PM
51 select USB_ARCH_HAS_EHCI
51 52
52comment "OMAP Core Type" 53comment "OMAP Core Type"
53 depends on ARCH_OMAP2 54 depends on ARCH_OMAP2
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 4ab82f6f15b1..cd7332f50b2d 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -209,9 +209,11 @@ obj-$(CONFIG_MACH_IGEP0030) += board-igep0030.o \
209obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \ 209obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \
210 hsmmc.o 210 hsmmc.o
211obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \ 211obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \
212 hsmmc.o 212 hsmmc.o \
213 omap_phy_internal.o
213obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o \ 214obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o \
214 hsmmc.o 215 hsmmc.o \
216 omap_phy_internal.o
215 217
216obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o 218obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
217 219
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 1cb208b6e626..a70bdf28e2bc 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -44,6 +44,7 @@
44#define ETH_KS8851_IRQ 34 44#define ETH_KS8851_IRQ 34
45#define ETH_KS8851_POWER_ON 48 45#define ETH_KS8851_POWER_ON 48
46#define ETH_KS8851_QUART 138 46#define ETH_KS8851_QUART 138
47#define OMAP4SDP_MDM_PWR_EN_GPIO 157
47#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 48#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184
48#define OMAP4_SFH7741_ENABLE_GPIO 188 49#define OMAP4_SFH7741_ENABLE_GPIO 188
49 50
@@ -250,12 +251,29 @@ static void __init omap_4430sdp_init_irq(void)
250 gic_init_irq(); 251 gic_init_irq();
251} 252}
252 253
254static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
255 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
256 .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
257 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
258 .phy_reset = false,
259 .reset_gpio_port[0] = -EINVAL,
260 .reset_gpio_port[1] = -EINVAL,
261 .reset_gpio_port[2] = -EINVAL,
262};
263
253static struct omap_musb_board_data musb_board_data = { 264static struct omap_musb_board_data musb_board_data = {
254 .interface_type = MUSB_INTERFACE_UTMI, 265 .interface_type = MUSB_INTERFACE_UTMI,
255 .mode = MUSB_PERIPHERAL, 266 .mode = MUSB_OTG,
256 .power = 100, 267 .power = 100,
257}; 268};
258 269
270static struct twl4030_usb_data omap4_usbphy_data = {
271 .phy_init = omap4430_phy_init,
272 .phy_exit = omap4430_phy_exit,
273 .phy_power = omap4430_phy_power,
274 .phy_set_clock = omap4430_phy_set_clk,
275};
276
259static struct omap2_hsmmc_info mmc[] = { 277static struct omap2_hsmmc_info mmc[] = {
260 { 278 {
261 .mmc = 1, 279 .mmc = 1,
@@ -475,6 +493,7 @@ static struct twl4030_platform_data sdp4430_twldata = {
475 .vaux1 = &sdp4430_vaux1, 493 .vaux1 = &sdp4430_vaux1,
476 .vaux2 = &sdp4430_vaux2, 494 .vaux2 = &sdp4430_vaux2,
477 .vaux3 = &sdp4430_vaux3, 495 .vaux3 = &sdp4430_vaux3,
496 .usb = &omap4_usbphy_data
478}; 497};
479 498
480static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = { 499static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = {
@@ -555,11 +574,15 @@ static void __init omap_4430sdp_init(void)
555 platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices)); 574 platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
556 omap_serial_init(); 575 omap_serial_init();
557 omap4_twl6030_hsmmc_init(mmc); 576 omap4_twl6030_hsmmc_init(mmc);
558 /* OMAP4 SDP uses internal transceiver so register nop transceiver */ 577
559 usb_nop_xceiv_register(); 578 /* Power on the ULPI PHY */
560 /* FIXME: allow multi-omap to boot until musb is updated for omap4 */ 579 if (gpio_is_valid(OMAP4SDP_MDM_PWR_EN_GPIO)) {
561 if (!cpu_is_omap44xx()) 580 /* FIXME: Assumes pad is already muxed for GPIO mode */
562 usb_musb_init(&musb_board_data); 581 gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
582 gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1);
583 }
584 usb_ehci_init(&ehci_pdata);
585 usb_musb_init(&musb_board_data);
563 586
564 status = omap_ethernet_init(); 587 status = omap_ethernet_init();
565 if (status) { 588 if (status) {
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 147d9005f320..f396756872b7 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -46,8 +46,7 @@ static struct device *mmc_device;
46#define TUSB6010_GPIO_ENABLE 0 46#define TUSB6010_GPIO_ENABLE 0
47#define TUSB6010_DMACHAN 0x3f 47#define TUSB6010_DMACHAN 0x3f
48 48
49#if defined(CONFIG_USB_TUSB6010) || \ 49#ifdef CONFIG_USB_MUSB_TUSB6010
50 defined(CONFIG_USB_TUSB6010_MODULE)
51/* 50/*
52 * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and 51 * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
53 * 1.5 V voltage regulators of PM companion chip. Companion chip will then 52 * 1.5 V voltage regulators of PM companion chip. Companion chip will then
@@ -134,7 +133,7 @@ err:
134 133
135static void __init n8x0_usb_init(void) {} 134static void __init n8x0_usb_init(void) {}
136 135
137#endif /*CONFIG_USB_TUSB6010 */ 136#endif /*CONFIG_USB_MUSB_TUSB6010 */
138 137
139 138
140static struct omap2_mcspi_device_config p54spi_mcspi_config = { 139static struct omap2_mcspi_device_config p54spi_mcspi_config = {
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 613bdd89bcfa..e001a048dc0c 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -144,10 +144,17 @@ error1:
144 144
145static struct omap_musb_board_data musb_board_data = { 145static struct omap_musb_board_data musb_board_data = {
146 .interface_type = MUSB_INTERFACE_UTMI, 146 .interface_type = MUSB_INTERFACE_UTMI,
147 .mode = MUSB_PERIPHERAL, 147 .mode = MUSB_OTG,
148 .power = 100, 148 .power = 100,
149}; 149};
150 150
151static struct twl4030_usb_data omap4_usbphy_data = {
152 .phy_init = omap4430_phy_init,
153 .phy_exit = omap4430_phy_exit,
154 .phy_power = omap4430_phy_power,
155 .phy_set_clock = omap4430_phy_set_clk,
156};
157
151static struct omap2_hsmmc_info mmc[] = { 158static struct omap2_hsmmc_info mmc[] = {
152 { 159 {
153 .mmc = 1, 160 .mmc = 1,
@@ -357,6 +364,7 @@ static struct twl4030_platform_data omap4_panda_twldata = {
357 .vaux1 = &omap4_panda_vaux1, 364 .vaux1 = &omap4_panda_vaux1,
358 .vaux2 = &omap4_panda_vaux2, 365 .vaux2 = &omap4_panda_vaux2,
359 .vaux3 = &omap4_panda_vaux3, 366 .vaux3 = &omap4_panda_vaux3,
367 .usb = &omap4_usbphy_data,
360}; 368};
361 369
362static struct i2c_board_info __initdata omap4_panda_i2c_boardinfo[] = { 370static struct i2c_board_info __initdata omap4_panda_i2c_boardinfo[] = {
@@ -404,9 +412,7 @@ static void __init omap4_panda_init(void)
404 /* OMAP4 Panda uses internal transceiver so register nop transceiver */ 412 /* OMAP4 Panda uses internal transceiver so register nop transceiver */
405 usb_nop_xceiv_register(); 413 usb_nop_xceiv_register();
406 omap4_ehci_init(); 414 omap4_ehci_init();
407 /* FIXME: allow multi-omap to boot until musb is updated for omap4 */ 415 usb_musb_init(&musb_board_data);
408 if (!cpu_is_omap44xx())
409 usb_musb_init(&musb_board_data);
410} 416}
411 417
412static void __init omap4_panda_map_io(void) 418static void __init omap4_panda_map_io(void)
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index ed1295f5046e..0a992bc8d0d8 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1877,7 +1877,7 @@ static struct omap_clk omap2420_clks[] = {
1877 CLK("omap-aes", "ick", &aes_ick, CK_242X), 1877 CLK("omap-aes", "ick", &aes_ick, CK_242X),
1878 CLK(NULL, "pka_ick", &pka_ick, CK_242X), 1878 CLK(NULL, "pka_ick", &pka_ick, CK_242X),
1879 CLK(NULL, "usb_fck", &usb_fck, CK_242X), 1879 CLK(NULL, "usb_fck", &usb_fck, CK_242X),
1880 CLK("musb_hdrc", "fck", &osc_ck, CK_242X), 1880 CLK("musb-hdrc", "fck", &osc_ck, CK_242X),
1881}; 1881};
1882 1882
1883/* 1883/*
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index 38341a71c6f8..c047dcd007e5 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -1983,7 +1983,7 @@ static struct omap_clk omap2430_clks[] = {
1983 CLK("omap-aes", "ick", &aes_ick, CK_243X), 1983 CLK("omap-aes", "ick", &aes_ick, CK_243X),
1984 CLK(NULL, "pka_ick", &pka_ick, CK_243X), 1984 CLK(NULL, "pka_ick", &pka_ick, CK_243X),
1985 CLK(NULL, "usb_fck", &usb_fck, CK_243X), 1985 CLK(NULL, "usb_fck", &usb_fck, CK_243X),
1986 CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X), 1986 CLK("musb-omap2430", "ick", &usbhs_ick, CK_243X),
1987 CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X), 1987 CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X),
1988 CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X), 1988 CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X),
1989 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X), 1989 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X),
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 9ab817e6c300..403a4a1d3f9c 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3286,6 +3286,7 @@ static struct omap_clk omap3xxx_clks[] = {
3286 CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), 3286 CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3287 CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), 3287 CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3288 CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), 3288 CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3289 CLK("ehci-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3289 CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX), 3290 CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX),
3290 CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX), 3291 CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX),
3291 CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX), 3292 CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
@@ -3313,14 +3314,15 @@ static struct omap_clk omap3xxx_clks[] = {
3313 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1), 3314 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1),
3314 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2PLUS | CK_36XX), 3315 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2PLUS | CK_36XX),
3315 CLK(NULL, "core_l3_ick", &core_l3_ick, CK_3XXX), 3316 CLK(NULL, "core_l3_ick", &core_l3_ick, CK_3XXX),
3316 CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es1, CK_3430ES1), 3317 CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1, CK_3430ES1),
3317 CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es2, CK_3430ES2PLUS | CK_36XX), 3318 CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2, CK_3430ES2PLUS | CK_36XX),
3318 CLK(NULL, "sdrc_ick", &sdrc_ick, CK_3XXX), 3319 CLK(NULL, "sdrc_ick", &sdrc_ick, CK_3XXX),
3319 CLK(NULL, "gpmc_fck", &gpmc_fck, CK_3XXX), 3320 CLK(NULL, "gpmc_fck", &gpmc_fck, CK_3XXX),
3320 CLK(NULL, "security_l3_ick", &security_l3_ick, CK_34XX | CK_36XX), 3321 CLK(NULL, "security_l3_ick", &security_l3_ick, CK_34XX | CK_36XX),
3321 CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX), 3322 CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX),
3322 CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX), 3323 CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX),
3323 CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), 3324 CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3325 CLK("ehci-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3324 CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), 3326 CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3325 CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX), 3327 CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX),
3326 CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX), 3328 CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX),
@@ -3366,8 +3368,11 @@ static struct omap_clk omap3xxx_clks[] = {
3366 CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX), 3368 CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX),
3367 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX), 3369 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX),
3368 CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), 3370 CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3371 CLK("ehci-omap.0", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3369 CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), 3372 CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3373 CLK("ehci-omap.0", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3370 CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), 3374 CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3375 CLK("ehci-omap.0", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
3371 CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX), 3376 CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX),
3372 CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX), 3377 CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX),
3373 CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX), 3378 CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX),
@@ -3445,8 +3450,8 @@ static struct omap_clk omap3xxx_clks[] = {
3445 CLK("davinci_emac", "phy_clk", &emac_fck, CK_AM35XX), 3450 CLK("davinci_emac", "phy_clk", &emac_fck, CK_AM35XX),
3446 CLK("vpfe-capture", "master", &vpfe_ick, CK_AM35XX), 3451 CLK("vpfe-capture", "master", &vpfe_ick, CK_AM35XX),
3447 CLK("vpfe-capture", "slave", &vpfe_fck, CK_AM35XX), 3452 CLK("vpfe-capture", "slave", &vpfe_fck, CK_AM35XX),
3448 CLK("musb_hdrc", "ick", &hsotgusb_ick_am35xx, CK_AM35XX), 3453 CLK("musb-am35x", "ick", &hsotgusb_ick_am35xx, CK_AM35XX),
3449 CLK("musb_hdrc", "fck", &hsotgusb_fck_am35xx, CK_AM35XX), 3454 CLK("musb-am35x", "fck", &hsotgusb_fck_am35xx, CK_AM35XX),
3450 CLK(NULL, "hecc_ck", &hecc_ck, CK_AM35XX), 3455 CLK(NULL, "hecc_ck", &hecc_ck, CK_AM35XX),
3451 CLK(NULL, "uart4_ick", &uart4_ick_am35xx, CK_AM35XX), 3456 CLK(NULL, "uart4_ick", &uart4_ick_am35xx, CK_AM35XX),
3452}; 3457};
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index c426adccad06..e8cb32fd7f13 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -3198,6 +3198,7 @@ static struct omap_clk omap44xx_clks[] = {
3198 CLK(NULL, "uart3_fck", &uart3_fck, CK_443X), 3198 CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
3199 CLK(NULL, "uart4_fck", &uart4_fck, CK_443X), 3199 CLK(NULL, "uart4_fck", &uart4_fck, CK_443X),
3200 CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X), 3200 CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X),
3201 CLK("ehci-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X),
3201 CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X), 3202 CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X),
3202 CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X), 3203 CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X),
3203 CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X), 3204 CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X),
@@ -3209,14 +3210,18 @@ static struct omap_clk omap44xx_clks[] = {
3209 CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X), 3210 CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X),
3210 CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X), 3211 CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X),
3211 CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X), 3212 CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X),
3213 CLK("ehci-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X),
3214 CLK("ehci-omap.0", "usbhost_ick", &dummy_ck, CK_443X),
3212 CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X), 3215 CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X),
3213 CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X), 3216 CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X),
3214 CLK("musb_hdrc", "ick", &usb_otg_hs_ick, CK_443X), 3217 CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X),
3215 CLK(NULL, "usb_phy_cm_clk32k", &usb_phy_cm_clk32k, CK_443X), 3218 CLK(NULL, "usb_phy_cm_clk32k", &usb_phy_cm_clk32k, CK_443X),
3216 CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X), 3219 CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X),
3217 CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X), 3220 CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X),
3218 CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X), 3221 CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X),
3219 CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X), 3222 CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X),
3223 CLK("ehci-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
3224 CLK("ehci-omap.0", "usbtll_fck", &dummy_ck, CK_443X),
3220 CLK(NULL, "usim_ck", &usim_ck, CK_443X), 3225 CLK(NULL, "usim_ck", &usim_ck, CK_443X),
3221 CLK(NULL, "usim_fclk", &usim_fclk, CK_443X), 3226 CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
3222 CLK(NULL, "usim_fck", &usim_fck, CK_443X), 3227 CLK(NULL, "usim_fck", &usim_fck, CK_443X),
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
new file mode 100644
index 000000000000..745252c60e32
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -0,0 +1,149 @@
1/*
2 * This file configures the internal USB PHY in OMAP4430. Used
3 * with TWL6030 transceiver and MUSB on OMAP4430.
4 *
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Author: Hema HK <hemahk@ti.com>
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/types.h>
25#include <linux/delay.h>
26#include <linux/clk.h>
27#include <linux/io.h>
28#include <linux/err.h>
29#include <linux/usb.h>
30
31#include <plat/usb.h>
32
33/* OMAP control module register for UTMI PHY */
34#define CONTROL_DEV_CONF 0x300
35#define PHY_PD 0x1
36
37#define USBOTGHS_CONTROL 0x33c
38#define AVALID BIT(0)
39#define BVALID BIT(1)
40#define VBUSVALID BIT(2)
41#define SESSEND BIT(3)
42#define IDDIG BIT(4)
43
44static struct clk *phyclk, *clk48m, *clk32k;
45static void __iomem *ctrl_base;
46
47int omap4430_phy_init(struct device *dev)
48{
49 ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K);
50 if (!ctrl_base) {
51 dev_err(dev, "control module ioremap failed\n");
52 return -ENOMEM;
53 }
54 /* Power down the phy */
55 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
56 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
57
58 if (IS_ERR(phyclk)) {
59 dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n");
60 iounmap(ctrl_base);
61 return PTR_ERR(phyclk);
62 }
63
64 clk48m = clk_get(dev, "ocp2scp_usb_phy_phy_48m");
65 if (IS_ERR(clk48m)) {
66 dev_err(dev, "cannot clk_get ocp2scp_usb_phy_phy_48m\n");
67 clk_put(phyclk);
68 iounmap(ctrl_base);
69 return PTR_ERR(clk48m);
70 }
71
72 clk32k = clk_get(dev, "usb_phy_cm_clk32k");
73 if (IS_ERR(clk32k)) {
74 dev_err(dev, "cannot clk_get usb_phy_cm_clk32k\n");
75 clk_put(phyclk);
76 clk_put(clk48m);
77 iounmap(ctrl_base);
78 return PTR_ERR(clk32k);
79 }
80 return 0;
81}
82
83int omap4430_phy_set_clk(struct device *dev, int on)
84{
85 static int state;
86
87 if (on && !state) {
88 /* Enable the phy clocks */
89 clk_enable(phyclk);
90 clk_enable(clk48m);
91 clk_enable(clk32k);
92 state = 1;
93 } else if (state) {
94 /* Disable the phy clocks */
95 clk_disable(phyclk);
96 clk_disable(clk48m);
97 clk_disable(clk32k);
98 state = 0;
99 }
100 return 0;
101}
102
103int omap4430_phy_power(struct device *dev, int ID, int on)
104{
105 if (on) {
106 /* enabled the clocks */
107 omap4430_phy_set_clk(dev, 1);
108 /* power on the phy */
109 if (__raw_readl(ctrl_base + CONTROL_DEV_CONF) & PHY_PD) {
110 __raw_writel(~PHY_PD, ctrl_base + CONTROL_DEV_CONF);
111 mdelay(200);
112 }
113 if (ID)
114 /* enable VBUS valid, IDDIG groung */
115 __raw_writel(AVALID | VBUSVALID, ctrl_base +
116 USBOTGHS_CONTROL);
117 else
118 /*
119 * Enable VBUS Valid, AValid and IDDIG
120 * high impedence
121 */
122 __raw_writel(IDDIG | AVALID | VBUSVALID,
123 ctrl_base + USBOTGHS_CONTROL);
124 } else {
125 /* Enable session END and IDIG to high impedence. */
126 __raw_writel(SESSEND | IDDIG, ctrl_base +
127 USBOTGHS_CONTROL);
128 /* Disable the clocks */
129 omap4430_phy_set_clk(dev, 0);
130 /* Power down the phy */
131 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
132 }
133
134 return 0;
135}
136
137int omap4430_phy_exit(struct device *dev)
138{
139 if (ctrl_base)
140 iounmap(ctrl_base);
141 if (phyclk)
142 clk_put(phyclk);
143 if (clk48m)
144 clk_put(clk48m);
145 if (clk32k)
146 clk_put(clk32k);
147
148 return 0;
149}
diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-ehci.c
index b11bf385d360..25eeadabc39b 100644
--- a/arch/arm/mach-omap2/usb-ehci.c
+++ b/arch/arm/mach-omap2/usb-ehci.c
@@ -34,22 +34,15 @@
34 34
35static struct resource ehci_resources[] = { 35static struct resource ehci_resources[] = {
36 { 36 {
37 .start = OMAP34XX_EHCI_BASE,
38 .end = OMAP34XX_EHCI_BASE + SZ_1K - 1,
39 .flags = IORESOURCE_MEM, 37 .flags = IORESOURCE_MEM,
40 }, 38 },
41 { 39 {
42 .start = OMAP34XX_UHH_CONFIG_BASE,
43 .end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1,
44 .flags = IORESOURCE_MEM, 40 .flags = IORESOURCE_MEM,
45 }, 41 },
46 { 42 {
47 .start = OMAP34XX_USBTLL_BASE,
48 .end = OMAP34XX_USBTLL_BASE + SZ_4K - 1,
49 .flags = IORESOURCE_MEM, 43 .flags = IORESOURCE_MEM,
50 }, 44 },
51 { /* general IRQ */ 45 { /* general IRQ */
52 .start = INT_34XX_EHCI_IRQ,
53 .flags = IORESOURCE_IRQ, 46 .flags = IORESOURCE_IRQ,
54 } 47 }
55}; 48};
@@ -214,13 +207,148 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode)
214 return; 207 return;
215} 208}
216 209
210static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode)
211{
212 switch (port_mode[0]) {
213 case EHCI_HCD_OMAP_MODE_PHY:
214 omap_mux_init_signal("usbb1_ulpiphy_stp",
215 OMAP_PIN_OUTPUT);
216 omap_mux_init_signal("usbb1_ulpiphy_clk",
217 OMAP_PIN_INPUT_PULLDOWN);
218 omap_mux_init_signal("usbb1_ulpiphy_dir",
219 OMAP_PIN_INPUT_PULLDOWN);
220 omap_mux_init_signal("usbb1_ulpiphy_nxt",
221 OMAP_PIN_INPUT_PULLDOWN);
222 omap_mux_init_signal("usbb1_ulpiphy_dat0",
223 OMAP_PIN_INPUT_PULLDOWN);
224 omap_mux_init_signal("usbb1_ulpiphy_dat1",
225 OMAP_PIN_INPUT_PULLDOWN);
226 omap_mux_init_signal("usbb1_ulpiphy_dat2",
227 OMAP_PIN_INPUT_PULLDOWN);
228 omap_mux_init_signal("usbb1_ulpiphy_dat3",
229 OMAP_PIN_INPUT_PULLDOWN);
230 omap_mux_init_signal("usbb1_ulpiphy_dat4",
231 OMAP_PIN_INPUT_PULLDOWN);
232 omap_mux_init_signal("usbb1_ulpiphy_dat5",
233 OMAP_PIN_INPUT_PULLDOWN);
234 omap_mux_init_signal("usbb1_ulpiphy_dat6",
235 OMAP_PIN_INPUT_PULLDOWN);
236 omap_mux_init_signal("usbb1_ulpiphy_dat7",
237 OMAP_PIN_INPUT_PULLDOWN);
238 break;
239 case EHCI_HCD_OMAP_MODE_TLL:
240 omap_mux_init_signal("usbb1_ulpitll_stp",
241 OMAP_PIN_INPUT_PULLUP);
242 omap_mux_init_signal("usbb1_ulpitll_clk",
243 OMAP_PIN_INPUT_PULLDOWN);
244 omap_mux_init_signal("usbb1_ulpitll_dir",
245 OMAP_PIN_INPUT_PULLDOWN);
246 omap_mux_init_signal("usbb1_ulpitll_nxt",
247 OMAP_PIN_INPUT_PULLDOWN);
248 omap_mux_init_signal("usbb1_ulpitll_dat0",
249 OMAP_PIN_INPUT_PULLDOWN);
250 omap_mux_init_signal("usbb1_ulpitll_dat1",
251 OMAP_PIN_INPUT_PULLDOWN);
252 omap_mux_init_signal("usbb1_ulpitll_dat2",
253 OMAP_PIN_INPUT_PULLDOWN);
254 omap_mux_init_signal("usbb1_ulpitll_dat3",
255 OMAP_PIN_INPUT_PULLDOWN);
256 omap_mux_init_signal("usbb1_ulpitll_dat4",
257 OMAP_PIN_INPUT_PULLDOWN);
258 omap_mux_init_signal("usbb1_ulpitll_dat5",
259 OMAP_PIN_INPUT_PULLDOWN);
260 omap_mux_init_signal("usbb1_ulpitll_dat6",
261 OMAP_PIN_INPUT_PULLDOWN);
262 omap_mux_init_signal("usbb1_ulpitll_dat7",
263 OMAP_PIN_INPUT_PULLDOWN);
264 break;
265 case EHCI_HCD_OMAP_MODE_UNKNOWN:
266 default:
267 break;
268 }
269 switch (port_mode[1]) {
270 case EHCI_HCD_OMAP_MODE_PHY:
271 omap_mux_init_signal("usbb2_ulpiphy_stp",
272 OMAP_PIN_OUTPUT);
273 omap_mux_init_signal("usbb2_ulpiphy_clk",
274 OMAP_PIN_INPUT_PULLDOWN);
275 omap_mux_init_signal("usbb2_ulpiphy_dir",
276 OMAP_PIN_INPUT_PULLDOWN);
277 omap_mux_init_signal("usbb2_ulpiphy_nxt",
278 OMAP_PIN_INPUT_PULLDOWN);
279 omap_mux_init_signal("usbb2_ulpiphy_dat0",
280 OMAP_PIN_INPUT_PULLDOWN);
281 omap_mux_init_signal("usbb2_ulpiphy_dat1",
282 OMAP_PIN_INPUT_PULLDOWN);
283 omap_mux_init_signal("usbb2_ulpiphy_dat2",
284 OMAP_PIN_INPUT_PULLDOWN);
285 omap_mux_init_signal("usbb2_ulpiphy_dat3",
286 OMAP_PIN_INPUT_PULLDOWN);
287 omap_mux_init_signal("usbb2_ulpiphy_dat4",
288 OMAP_PIN_INPUT_PULLDOWN);
289 omap_mux_init_signal("usbb2_ulpiphy_dat5",
290 OMAP_PIN_INPUT_PULLDOWN);
291 omap_mux_init_signal("usbb2_ulpiphy_dat6",
292 OMAP_PIN_INPUT_PULLDOWN);
293 omap_mux_init_signal("usbb2_ulpiphy_dat7",
294 OMAP_PIN_INPUT_PULLDOWN);
295 break;
296 case EHCI_HCD_OMAP_MODE_TLL:
297 omap_mux_init_signal("usbb2_ulpitll_stp",
298 OMAP_PIN_INPUT_PULLUP);
299 omap_mux_init_signal("usbb2_ulpitll_clk",
300 OMAP_PIN_INPUT_PULLDOWN);
301 omap_mux_init_signal("usbb2_ulpitll_dir",
302 OMAP_PIN_INPUT_PULLDOWN);
303 omap_mux_init_signal("usbb2_ulpitll_nxt",
304 OMAP_PIN_INPUT_PULLDOWN);
305 omap_mux_init_signal("usbb2_ulpitll_dat0",
306 OMAP_PIN_INPUT_PULLDOWN);
307 omap_mux_init_signal("usbb2_ulpitll_dat1",
308 OMAP_PIN_INPUT_PULLDOWN);
309 omap_mux_init_signal("usbb2_ulpitll_dat2",
310 OMAP_PIN_INPUT_PULLDOWN);
311 omap_mux_init_signal("usbb2_ulpitll_dat3",
312 OMAP_PIN_INPUT_PULLDOWN);
313 omap_mux_init_signal("usbb2_ulpitll_dat4",
314 OMAP_PIN_INPUT_PULLDOWN);
315 omap_mux_init_signal("usbb2_ulpitll_dat5",
316 OMAP_PIN_INPUT_PULLDOWN);
317 omap_mux_init_signal("usbb2_ulpitll_dat6",
318 OMAP_PIN_INPUT_PULLDOWN);
319 omap_mux_init_signal("usbb2_ulpitll_dat7",
320 OMAP_PIN_INPUT_PULLDOWN);
321 break;
322 case EHCI_HCD_OMAP_MODE_UNKNOWN:
323 default:
324 break;
325 }
326}
327
217void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata) 328void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata)
218{ 329{
219 platform_device_add_data(&ehci_device, pdata, sizeof(*pdata)); 330 platform_device_add_data(&ehci_device, pdata, sizeof(*pdata));
220 331
221 /* Setup Pin IO MUX for EHCI */ 332 /* Setup Pin IO MUX for EHCI */
222 if (cpu_is_omap34xx()) 333 if (cpu_is_omap34xx()) {
334 ehci_resources[0].start = OMAP34XX_EHCI_BASE;
335 ehci_resources[0].end = OMAP34XX_EHCI_BASE + SZ_1K - 1;
336 ehci_resources[1].start = OMAP34XX_UHH_CONFIG_BASE;
337 ehci_resources[1].end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1;
338 ehci_resources[2].start = OMAP34XX_USBTLL_BASE;
339 ehci_resources[2].end = OMAP34XX_USBTLL_BASE + SZ_4K - 1;
340 ehci_resources[3].start = INT_34XX_EHCI_IRQ;
223 setup_ehci_io_mux(pdata->port_mode); 341 setup_ehci_io_mux(pdata->port_mode);
342 } else if (cpu_is_omap44xx()) {
343 ehci_resources[0].start = OMAP44XX_HSUSB_EHCI_BASE;
344 ehci_resources[0].end = OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1;
345 ehci_resources[1].start = OMAP44XX_UHH_CONFIG_BASE;
346 ehci_resources[1].end = OMAP44XX_UHH_CONFIG_BASE + SZ_2K - 1;
347 ehci_resources[2].start = OMAP44XX_USBTLL_BASE;
348 ehci_resources[2].end = OMAP44XX_USBTLL_BASE + SZ_4K - 1;
349 ehci_resources[3].start = OMAP44XX_IRQ_EHCI;
350 setup_4430ehci_io_mux(pdata->port_mode);
351 }
224 352
225 if (platform_device_register(&ehci_device) < 0) { 353 if (platform_device_register(&ehci_device) < 0) {
226 printk(KERN_ERR "Unable to register HS-USB (EHCI) device\n"); 354 printk(KERN_ERR "Unable to register HS-USB (EHCI) device\n");
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 72605584bfff..5298949d4b11 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -30,8 +30,101 @@
30#include <mach/irqs.h> 30#include <mach/irqs.h>
31#include <mach/am35xx.h> 31#include <mach/am35xx.h>
32#include <plat/usb.h> 32#include <plat/usb.h>
33#include "control.h"
33 34
34#ifdef CONFIG_USB_MUSB_SOC 35#if defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined (CONFIG_USB_MUSB_AM35X)
36
37static void am35x_musb_reset(void)
38{
39 u32 regval;
40
41 /* Reset the musb interface */
42 regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
43
44 regval |= AM35XX_USBOTGSS_SW_RST;
45 omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
46
47 regval &= ~AM35XX_USBOTGSS_SW_RST;
48 omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
49
50 regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
51}
52
53static void am35x_musb_phy_power(u8 on)
54{
55 unsigned long timeout = jiffies + msecs_to_jiffies(100);
56 u32 devconf2;
57
58 if (on) {
59 /*
60 * Start the on-chip PHY and its PLL.
61 */
62 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
63
64 devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
65 devconf2 |= CONF2_PHY_PLLON;
66
67 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
68
69 pr_info(KERN_INFO "Waiting for PHY clock good...\n");
70 while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
71 & CONF2_PHYCLKGD)) {
72 cpu_relax();
73
74 if (time_after(jiffies, timeout)) {
75 pr_err(KERN_ERR "musb PHY clock good timed out\n");
76 break;
77 }
78 }
79 } else {
80 /*
81 * Power down the on-chip PHY.
82 */
83 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
84
85 devconf2 &= ~CONF2_PHY_PLLON;
86 devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN;
87 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
88 }
89}
90
91static void am35x_musb_clear_irq(void)
92{
93 u32 regval;
94
95 regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
96 regval |= AM35XX_USBOTGSS_INT_CLR;
97 omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
98 regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
99}
100
101static void am35x_musb_set_mode(u8 musb_mode)
102{
103 u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
104
105 devconf2 &= ~CONF2_OTGMODE;
106 switch (musb_mode) {
107#ifdef CONFIG_USB_MUSB_HDRC_HCD
108 case MUSB_HOST: /* Force VBUS valid, ID = 0 */
109 devconf2 |= CONF2_FORCE_HOST;
110 break;
111#endif
112#ifdef CONFIG_USB_GADGET_MUSB_HDRC
113 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
114 devconf2 |= CONF2_FORCE_DEVICE;
115 break;
116#endif
117#ifdef CONFIG_USB_MUSB_OTG
118 case MUSB_OTG: /* Don't override the VBUS/ID comparators */
119 devconf2 |= CONF2_NO_OVERRIDE;
120 break;
121#endif
122 default:
123 pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
124 }
125
126 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
127}
35 128
36static struct resource musb_resources[] = { 129static struct resource musb_resources[] = {
37 [0] = { /* start and end set dynamically */ 130 [0] = { /* start and end set dynamically */
@@ -40,10 +133,12 @@ static struct resource musb_resources[] = {
40 [1] = { /* general IRQ */ 133 [1] = { /* general IRQ */
41 .start = INT_243X_HS_USB_MC, 134 .start = INT_243X_HS_USB_MC,
42 .flags = IORESOURCE_IRQ, 135 .flags = IORESOURCE_IRQ,
136 .name = "mc",
43 }, 137 },
44 [2] = { /* DMA IRQ */ 138 [2] = { /* DMA IRQ */
45 .start = INT_243X_HS_USB_DMA, 139 .start = INT_243X_HS_USB_DMA,
46 .flags = IORESOURCE_IRQ, 140 .flags = IORESOURCE_IRQ,
141 .name = "dma",
47 }, 142 },
48}; 143};
49 144
@@ -75,7 +170,7 @@ static struct musb_hdrc_platform_data musb_plat = {
75static u64 musb_dmamask = DMA_BIT_MASK(32); 170static u64 musb_dmamask = DMA_BIT_MASK(32);
76 171
77static struct platform_device musb_device = { 172static struct platform_device musb_device = {
78 .name = "musb_hdrc", 173 .name = "musb-omap2430",
79 .id = -1, 174 .id = -1,
80 .dev = { 175 .dev = {
81 .dma_mask = &musb_dmamask, 176 .dma_mask = &musb_dmamask,
@@ -91,8 +186,13 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
91 if (cpu_is_omap243x()) { 186 if (cpu_is_omap243x()) {
92 musb_resources[0].start = OMAP243X_HS_BASE; 187 musb_resources[0].start = OMAP243X_HS_BASE;
93 } else if (cpu_is_omap3517() || cpu_is_omap3505()) { 188 } else if (cpu_is_omap3517() || cpu_is_omap3505()) {
189 musb_device.name = "musb-am35x";
94 musb_resources[0].start = AM35XX_IPSS_USBOTGSS_BASE; 190 musb_resources[0].start = AM35XX_IPSS_USBOTGSS_BASE;
95 musb_resources[1].start = INT_35XX_USBOTG_IRQ; 191 musb_resources[1].start = INT_35XX_USBOTG_IRQ;
192 board_data->set_phy_power = am35x_musb_phy_power;
193 board_data->clear_irq = am35x_musb_clear_irq;
194 board_data->set_mode = am35x_musb_set_mode;
195 board_data->reset = am35x_musb_reset;
96 } else if (cpu_is_omap34xx()) { 196 } else if (cpu_is_omap34xx()) {
97 musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE; 197 musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE;
98 } else if (cpu_is_omap44xx()) { 198 } else if (cpu_is_omap44xx()) {
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index 30f112bd3e4d..8a3c05f3c1d6 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -224,7 +224,7 @@ static struct resource tusb_resources[] = {
224static u64 tusb_dmamask = ~(u32)0; 224static u64 tusb_dmamask = ~(u32)0;
225 225
226static struct platform_device tusb_device = { 226static struct platform_device tusb_device = {
227 .name = "musb_hdrc", 227 .name = "musb-tusb",
228 .id = -1, 228 .id = -1,
229 .dev = { 229 .dev = {
230 .dma_mask = &tusb_dmamask, 230 .dma_mask = &tusb_dmamask,
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index 8b3f12ff5cbc..ea2b8a6306e7 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -52,5 +52,10 @@
52#define OMAP4_MMU1_BASE 0x55082000 52#define OMAP4_MMU1_BASE 0x55082000
53#define OMAP4_MMU2_BASE 0x4A066000 53#define OMAP4_MMU2_BASE 0x4A066000
54 54
55#define OMAP44XX_USBTLL_BASE (L4_44XX_BASE + 0x62000)
56#define OMAP44XX_UHH_CONFIG_BASE (L4_44XX_BASE + 0x64000)
57#define OMAP44XX_HSUSB_OHCI_BASE (L4_44XX_BASE + 0x64800)
58#define OMAP44XX_HSUSB_EHCI_BASE (L4_44XX_BASE + 0x64C00)
59
55#endif /* __ASM_ARCH_OMAP44XX_H */ 60#endif /* __ASM_ARCH_OMAP44XX_H */
56 61
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 59c7fe731f28..450a332f1009 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -11,6 +11,7 @@ enum ehci_hcd_omap_mode {
11 EHCI_HCD_OMAP_MODE_UNKNOWN, 11 EHCI_HCD_OMAP_MODE_UNKNOWN,
12 EHCI_HCD_OMAP_MODE_PHY, 12 EHCI_HCD_OMAP_MODE_PHY,
13 EHCI_HCD_OMAP_MODE_TLL, 13 EHCI_HCD_OMAP_MODE_TLL,
14 EHCI_HCD_OMAP_MODE_HSIC,
14}; 15};
15 16
16enum ohci_omap3_port_mode { 17enum ohci_omap3_port_mode {
@@ -69,6 +70,10 @@ struct omap_musb_board_data {
69 u8 mode; 70 u8 mode;
70 u16 power; 71 u16 power;
71 unsigned extvbus:1; 72 unsigned extvbus:1;
73 void (*set_phy_power)(u8 on);
74 void (*clear_irq)(void);
75 void (*set_mode)(u8 mode);
76 void (*reset)(void);
72}; 77};
73 78
74enum musb_interface {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI}; 79enum musb_interface {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI};
@@ -79,6 +84,11 @@ extern void usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata);
79 84
80extern void usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata); 85extern void usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata);
81 86
87extern int omap4430_phy_power(struct device *dev, int ID, int on);
88extern int omap4430_phy_set_clk(struct device *dev, int on);
89extern int omap4430_phy_init(struct device *dev);
90extern int omap4430_phy_exit(struct device *dev);
91
82#endif 92#endif
83 93
84 94
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index fc767ac76381..52295fff5577 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -83,7 +83,7 @@ static struct musb_hdrc_platform_data musb_plat = {
83static u64 musb_dmamask = ~(u32)0; 83static u64 musb_dmamask = ~(u32)0;
84 84
85static struct platform_device musb_device = { 85static struct platform_device musb_device = {
86 .name = "musb_hdrc", 86 .name = "musb-blackfin",
87 .id = 0, 87 .id = 0,
88 .dev = { 88 .dev = {
89 .dma_mask = &musb_dmamask, 89 .dma_mask = &musb_dmamask,
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 2c31af7a320a..50533edc3994 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -82,11 +82,13 @@ static struct resource musb_resources[] = {
82 .start = IRQ_USB_INT0, 82 .start = IRQ_USB_INT0,
83 .end = IRQ_USB_INT0, 83 .end = IRQ_USB_INT0,
84 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 84 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
85 .name = "mc"
85 }, 86 },
86 [2] = { /* DMA IRQ */ 87 [2] = { /* DMA IRQ */
87 .start = IRQ_USB_DMA, 88 .start = IRQ_USB_DMA,
88 .end = IRQ_USB_DMA, 89 .end = IRQ_USB_DMA,
89 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 90 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
91 .name = "dma"
90 }, 92 },
91}; 93};
92 94
@@ -118,7 +120,7 @@ static struct musb_hdrc_platform_data musb_plat = {
118static u64 musb_dmamask = ~(u32)0; 120static u64 musb_dmamask = ~(u32)0;
119 121
120static struct platform_device musb_device = { 122static struct platform_device musb_device = {
121 .name = "musb_hdrc", 123 .name = "musb-blackfin",
122 .id = 0, 124 .id = 0,
123 .dev = { 125 .dev = {
124 .dma_mask = &musb_dmamask, 126 .dma_mask = &musb_dmamask,
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 9a736a850c5c..d06177b5fe22 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -46,11 +46,13 @@ static struct resource musb_resources[] = {
46 .start = IRQ_USB_INT0, 46 .start = IRQ_USB_INT0,
47 .end = IRQ_USB_INT0, 47 .end = IRQ_USB_INT0,
48 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 48 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
49 .name = "mc"
49 }, 50 },
50 [2] = { /* DMA IRQ */ 51 [2] = { /* DMA IRQ */
51 .start = IRQ_USB_DMA, 52 .start = IRQ_USB_DMA,
52 .end = IRQ_USB_DMA, 53 .end = IRQ_USB_DMA,
53 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 54 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
55 .name = "dma"
54 }, 56 },
55}; 57};
56 58
@@ -82,7 +84,7 @@ static struct musb_hdrc_platform_data musb_plat = {
82static u64 musb_dmamask = ~(u32)0; 84static u64 musb_dmamask = ~(u32)0;
83 85
84static struct platform_device musb_device = { 86static struct platform_device musb_device = {
85 .name = "musb_hdrc", 87 .name = "musb-blackfin",
86 .id = 0, 88 .id = 0,
87 .dev = { 89 .dev = {
88 .dma_mask = &musb_dmamask, 90 .dma_mask = &musb_dmamask,
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 9222bc00bbd3..35a88a5a5013 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -86,11 +86,13 @@ static struct resource musb_resources[] = {
86 .start = IRQ_USB_INT0, 86 .start = IRQ_USB_INT0,
87 .end = IRQ_USB_INT0, 87 .end = IRQ_USB_INT0,
88 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 88 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
89 .name = "mc"
89 }, 90 },
90 [2] = { /* DMA IRQ */ 91 [2] = { /* DMA IRQ */
91 .start = IRQ_USB_DMA, 92 .start = IRQ_USB_DMA,
92 .end = IRQ_USB_DMA, 93 .end = IRQ_USB_DMA,
93 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 94 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
95 .name = "dma"
94 }, 96 },
95}; 97};
96 98
@@ -122,7 +124,7 @@ static struct musb_hdrc_platform_data musb_plat = {
122static u64 musb_dmamask = ~(u32)0; 124static u64 musb_dmamask = ~(u32)0;
123 125
124static struct platform_device musb_device = { 126static struct platform_device musb_device = {
125 .name = "musb_hdrc", 127 .name = "musb-blackfin",
126 .id = 0, 128 .id = 0,
127 .dev = { 129 .dev = {
128 .dma_mask = &musb_dmamask, 130 .dma_mask = &musb_dmamask,
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 9ec575729e2c..130861bd2589 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -91,7 +91,7 @@ static struct musb_hdrc_platform_data musb_plat = {
91static u64 musb_dmamask = ~(u32)0; 91static u64 musb_dmamask = ~(u32)0;
92 92
93static struct platform_device musb_device = { 93static struct platform_device musb_device = {
94 .name = "musb_hdrc", 94 .name = "musb-blackfin",
95 .id = 0, 95 .id = 0,
96 .dev = { 96 .dev = {
97 .dma_mask = &musb_dmamask, 97 .dma_mask = &musb_dmamask,
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index f0c0eef95ba8..4c2ee6789099 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -482,11 +482,13 @@ static struct resource musb_resources[] = {
482 .start = IRQ_USB_INT0, 482 .start = IRQ_USB_INT0,
483 .end = IRQ_USB_INT0, 483 .end = IRQ_USB_INT0,
484 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 484 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
485 .name = "mc"
485 }, 486 },
486 [2] = { /* DMA IRQ */ 487 [2] = { /* DMA IRQ */
487 .start = IRQ_USB_DMA, 488 .start = IRQ_USB_DMA,
488 .end = IRQ_USB_DMA, 489 .end = IRQ_USB_DMA,
489 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 490 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
491 .name = "dma"
490 }, 492 },
491}; 493};
492 494
@@ -518,7 +520,7 @@ static struct musb_hdrc_platform_data musb_plat = {
518static u64 musb_dmamask = ~(u32)0; 520static u64 musb_dmamask = ~(u32)0;
519 521
520static struct platform_device musb_device = { 522static struct platform_device musb_device = {
521 .name = "musb_hdrc", 523 .name = "musb-blackfin",
522 .id = 0, 524 .id = 0,
523 .dev = { 525 .dev = {
524 .dma_mask = &musb_dmamask, 526 .dma_mask = &musb_dmamask,
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 216e26999af9..4f03fbc4c9be 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -587,11 +587,13 @@ static struct resource musb_resources[] = {
587 .start = IRQ_USB_INT0, 587 .start = IRQ_USB_INT0,
588 .end = IRQ_USB_INT0, 588 .end = IRQ_USB_INT0,
589 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 589 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
590 .name = "mc"
590 }, 591 },
591 [2] = { /* DMA IRQ */ 592 [2] = { /* DMA IRQ */
592 .start = IRQ_USB_DMA, 593 .start = IRQ_USB_DMA,
593 .end = IRQ_USB_DMA, 594 .end = IRQ_USB_DMA,
594 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 595 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
596 .name = "dma"
595 }, 597 },
596}; 598};
597 599
@@ -623,7 +625,7 @@ static struct musb_hdrc_platform_data musb_plat = {
623static u64 musb_dmamask = ~(u32)0; 625static u64 musb_dmamask = ~(u32)0;
624 626
625static struct platform_device musb_device = { 627static struct platform_device musb_device = {
626 .name = "musb_hdrc", 628 .name = "musb-blackfin",
627 .id = 0, 629 .id = 0,
628 .dev = { 630 .dev = {
629 .dma_mask = &musb_dmamask, 631 .dma_mask = &musb_dmamask,
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 39e534f5a3b0..f099b82703d8 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1542,7 +1542,7 @@ pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1542 * any operations on the root directory. However, we need a non-trivial 1542 * any operations on the root directory. However, we need a non-trivial
1543 * d_name - pfm: will go nicely and kill the special-casing in procfs. 1543 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1544 */ 1544 */
1545static struct vfsmount *pfmfs_mnt; 1545static struct vfsmount *pfmfs_mnt __read_mostly;
1546 1546
1547static int __init 1547static int __init
1548init_pfm_fs(void) 1548init_pfm_fs(void)
@@ -2185,7 +2185,7 @@ static const struct file_operations pfm_file_ops = {
2185}; 2185};
2186 2186
2187static int 2187static int
2188pfmfs_delete_dentry(struct dentry *dentry) 2188pfmfs_delete_dentry(const struct dentry *dentry)
2189{ 2189{
2190 return 1; 2190 return 1;
2191} 2191}
@@ -2233,7 +2233,7 @@ pfm_alloc_file(pfm_context_t *ctx)
2233 } 2233 }
2234 path.mnt = mntget(pfmfs_mnt); 2234 path.mnt = mntget(pfmfs_mnt);
2235 2235
2236 path.dentry->d_op = &pfmfs_dentry_operations; 2236 d_set_d_op(path.dentry, &pfmfs_dentry_operations);
2237 d_add(path.dentry, inode); 2237 d_add(path.dentry, inode);
2238 2238
2239 file = alloc_file(&path, FMODE_READ, &pfm_file_ops); 2239 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 3532b92de983..856e9c398068 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -71,12 +71,18 @@ spufs_alloc_inode(struct super_block *sb)
71 return &ei->vfs_inode; 71 return &ei->vfs_inode;
72} 72}
73 73
74static void 74static void spufs_i_callback(struct rcu_head *head)
75spufs_destroy_inode(struct inode *inode)
76{ 75{
76 struct inode *inode = container_of(head, struct inode, i_rcu);
77 INIT_LIST_HEAD(&inode->i_dentry);
77 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); 78 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
78} 79}
79 80
81static void spufs_destroy_inode(struct inode *inode)
82{
83 call_rcu(&inode->i_rcu, spufs_i_callback);
84}
85
80static void 86static void
81spufs_init_once(void *p) 87spufs_init_once(void *p)
82{ 88{
@@ -159,18 +165,18 @@ static void spufs_prune_dir(struct dentry *dir)
159 165
160 mutex_lock(&dir->d_inode->i_mutex); 166 mutex_lock(&dir->d_inode->i_mutex);
161 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { 167 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
162 spin_lock(&dcache_lock);
163 spin_lock(&dentry->d_lock); 168 spin_lock(&dentry->d_lock);
164 if (!(d_unhashed(dentry)) && dentry->d_inode) { 169 if (!(d_unhashed(dentry)) && dentry->d_inode) {
165 dget_locked(dentry); 170 dget_dlock(dentry);
166 __d_drop(dentry); 171 __d_drop(dentry);
167 spin_unlock(&dentry->d_lock); 172 spin_unlock(&dentry->d_lock);
168 simple_unlink(dir->d_inode, dentry); 173 simple_unlink(dir->d_inode, dentry);
169 spin_unlock(&dcache_lock); 174 /* XXX: what was dcache_lock protecting here? Other
175 * filesystems (IB, configfs) release dcache_lock
176 * before unlink */
170 dput(dentry); 177 dput(dentry);
171 } else { 178 } else {
172 spin_unlock(&dentry->d_lock); 179 spin_unlock(&dentry->d_lock);
173 spin_unlock(&dcache_lock);
174 } 180 }
175 } 181 }
176 shrink_dcache_parent(dir); 182 shrink_dcache_parent(dir);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index e9e71120040c..fff252209f63 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -349,6 +349,7 @@ config CPU_SUBTYPE_SH7720
349 select CPU_HAS_DSP 349 select CPU_HAS_DSP
350 select SYS_SUPPORTS_CMT 350 select SYS_SUPPORTS_CMT
351 select ARCH_WANT_OPTIONAL_GPIOLIB 351 select ARCH_WANT_OPTIONAL_GPIOLIB
352 select USB_ARCH_HAS_OHCI
352 help 353 help
353 Select SH7720 if you have a SH3-DSP SH7720 CPU. 354 Select SH7720 if you have a SH3-DSP SH7720 CPU.
354 355
@@ -357,6 +358,7 @@ config CPU_SUBTYPE_SH7721
357 select CPU_SH3 358 select CPU_SH3
358 select CPU_HAS_DSP 359 select CPU_HAS_DSP
359 select SYS_SUPPORTS_CMT 360 select SYS_SUPPORTS_CMT
361 select USB_ARCH_HAS_OHCI
360 help 362 help
361 Select SH7721 if you have a SH3-DSP SH7721 CPU. 363 Select SH7721 if you have a SH3-DSP SH7721 CPU.
362 364
@@ -437,6 +439,7 @@ config CPU_SUBTYPE_SH7757
437config CPU_SUBTYPE_SH7763 439config CPU_SUBTYPE_SH7763
438 bool "Support SH7763 processor" 440 bool "Support SH7763 processor"
439 select CPU_SH4A 441 select CPU_SH4A
442 select USB_ARCH_HAS_OHCI
440 help 443 help
441 Select SH7763 if you have a SH4A SH7763(R5S77631) CPU. 444 Select SH7763 if you have a SH4A SH7763(R5S77631) CPU.
442 445
@@ -463,6 +466,8 @@ config CPU_SUBTYPE_SH7786
463 select CPU_HAS_PTEAEX 466 select CPU_HAS_PTEAEX
464 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 467 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
465 select ARCH_WANT_OPTIONAL_GPIOLIB 468 select ARCH_WANT_OPTIONAL_GPIOLIB
469 select USB_ARCH_HAS_OHCI
470 select USB_ARCH_HAS_EHCI
466 471
467config CPU_SUBTYPE_SHX3 472config CPU_SUBTYPE_SHX3
468 bool "Support SH-X3 processor" 473 bool "Support SH-X3 processor"
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index c016c0004714..0170dbda1d00 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -522,10 +522,37 @@ static struct platform_device dma0_device = {
522 }, 522 },
523}; 523};
524 524
525#define USB_EHCI_START 0xffe70000
526#define USB_OHCI_START 0xffe70400
527
528static struct resource usb_ehci_resources[] = {
529 [0] = {
530 .start = USB_EHCI_START,
531 .end = USB_EHCI_START + 0x3ff,
532 .flags = IORESOURCE_MEM,
533 },
534 [1] = {
535 .start = 77,
536 .end = 77,
537 .flags = IORESOURCE_IRQ,
538 },
539};
540
541static struct platform_device usb_ehci_device = {
542 .name = "sh_ehci",
543 .id = -1,
544 .dev = {
545 .dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
546 .coherent_dma_mask = DMA_BIT_MASK(32),
547 },
548 .num_resources = ARRAY_SIZE(usb_ehci_resources),
549 .resource = usb_ehci_resources,
550};
551
525static struct resource usb_ohci_resources[] = { 552static struct resource usb_ohci_resources[] = {
526 [0] = { 553 [0] = {
527 .start = 0xffe70400, 554 .start = USB_OHCI_START,
528 .end = 0xffe704ff, 555 .end = USB_OHCI_START + 0x3ff,
529 .flags = IORESOURCE_MEM, 556 .flags = IORESOURCE_MEM,
530 }, 557 },
531 [1] = { 558 [1] = {
@@ -535,12 +562,11 @@ static struct resource usb_ohci_resources[] = {
535 }, 562 },
536}; 563};
537 564
538static u64 usb_ohci_dma_mask = DMA_BIT_MASK(32);
539static struct platform_device usb_ohci_device = { 565static struct platform_device usb_ohci_device = {
540 .name = "sh_ohci", 566 .name = "sh_ohci",
541 .id = -1, 567 .id = -1,
542 .dev = { 568 .dev = {
543 .dma_mask = &usb_ohci_dma_mask, 569 .dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
544 .coherent_dma_mask = DMA_BIT_MASK(32), 570 .coherent_dma_mask = DMA_BIT_MASK(32),
545 }, 571 },
546 .num_resources = ARRAY_SIZE(usb_ohci_resources), 572 .num_resources = ARRAY_SIZE(usb_ohci_resources),
@@ -570,6 +596,7 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
570 596
571static struct platform_device *sh7786_devices[] __initdata = { 597static struct platform_device *sh7786_devices[] __initdata = {
572 &dma0_device, 598 &dma0_device,
599 &usb_ehci_device,
573 &usb_ohci_device, 600 &usb_ohci_device,
574}; 601};
575 602
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 88cb04e7962b..ed1adc20c8eb 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1604,6 +1604,7 @@ static const struct hid_device_id hid_ignore_list[] = {
1604 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, 1604 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
1605 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, 1605 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
1606 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, 1606 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
1607 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
1607 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 1608 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
1608 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, 1609 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
1609 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, 1610 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3341baa86a30..5a559de22282 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -200,6 +200,8 @@
200#define USB_VENDOR_ID_ELECOM 0x056e 200#define USB_VENDOR_ID_ELECOM 0x056e
201#define USB_DEVICE_ID_ELECOM_BM084 0x0061 201#define USB_DEVICE_ID_ELECOM_BM084 0x0061
202 202
203#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
204
203#define USB_VENDOR_ID_ELO 0x04E7 205#define USB_VENDOR_ID_ELO 0x04E7
204#define USB_DEVICE_ID_ELO_TS2700 0x0020 206#define USB_DEVICE_ID_ELO_TS2700 0x0020
205 207
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 8c8afc716b98..31ae1b108aea 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -277,18 +277,14 @@ static int remove_file(struct dentry *parent, char *name)
277 goto bail; 277 goto bail;
278 } 278 }
279 279
280 spin_lock(&dcache_lock);
281 spin_lock(&tmp->d_lock); 280 spin_lock(&tmp->d_lock);
282 if (!(d_unhashed(tmp) && tmp->d_inode)) { 281 if (!(d_unhashed(tmp) && tmp->d_inode)) {
283 dget_locked(tmp); 282 dget_dlock(tmp);
284 __d_drop(tmp); 283 __d_drop(tmp);
285 spin_unlock(&tmp->d_lock); 284 spin_unlock(&tmp->d_lock);
286 spin_unlock(&dcache_lock);
287 simple_unlink(parent->d_inode, tmp); 285 simple_unlink(parent->d_inode, tmp);
288 } else { 286 } else
289 spin_unlock(&tmp->d_lock); 287 spin_unlock(&tmp->d_lock);
290 spin_unlock(&dcache_lock);
291 }
292 288
293 ret = 0; 289 ret = 0;
294bail: 290bail:
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index f99bddc01716..df7fa251dcdc 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -453,17 +453,14 @@ static int remove_file(struct dentry *parent, char *name)
453 goto bail; 453 goto bail;
454 } 454 }
455 455
456 spin_lock(&dcache_lock);
457 spin_lock(&tmp->d_lock); 456 spin_lock(&tmp->d_lock);
458 if (!(d_unhashed(tmp) && tmp->d_inode)) { 457 if (!(d_unhashed(tmp) && tmp->d_inode)) {
459 dget_locked(tmp); 458 dget_dlock(tmp);
460 __d_drop(tmp); 459 __d_drop(tmp);
461 spin_unlock(&tmp->d_lock); 460 spin_unlock(&tmp->d_lock);
462 spin_unlock(&dcache_lock);
463 simple_unlink(parent->d_inode, tmp); 461 simple_unlink(parent->d_inode, tmp);
464 } else { 462 } else {
465 spin_unlock(&tmp->d_lock); 463 spin_unlock(&tmp->d_lock);
466 spin_unlock(&dcache_lock);
467 } 464 }
468 465
469 ret = 0; 466 ret = 0;
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 69dc0cb20a00..dbbe761778d2 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -469,4 +469,4 @@ module_exit(tc3589x_keypad_exit);
469MODULE_LICENSE("GPL v2"); 469MODULE_LICENSE("GPL v2");
470MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer"); 470MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer");
471MODULE_DESCRIPTION("TC35893 Keypad Driver"); 471MODULE_DESCRIPTION("TC35893 Keypad Driver");
472MODULE_ALIAS("platform:tc3589x-keypad") 472MODULE_ALIAS("platform:tc3589x-keypad");
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index c91424c0c135..99c81a9a4f46 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -452,7 +452,8 @@ static int poseidon_probe(struct usb_interface *interface,
452 452
453 device_init_wakeup(&udev->dev, 1); 453 device_init_wakeup(&udev->dev, 1);
454#ifdef CONFIG_PM 454#ifdef CONFIG_PM
455 pd->udev->autosuspend_delay = HZ * PM_SUSPEND_DELAY; 455 pm_runtime_set_autosuspend_delay(&pd->udev->dev,
456 1000 * PM_SUSPEND_DELAY);
456 usb_enable_autosuspend(pd->udev); 457 usb_enable_autosuspend(pd->udev);
457 458
458 if (in_hibernation(pd)) { 459 if (in_hibernation(pd)) {
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 35275ba7096f..12abd5b924b3 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -95,7 +95,8 @@
95#define twl_has_rtc() false 95#define twl_has_rtc() false
96#endif 96#endif
97 97
98#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE) 98#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE) ||\
99 defined(CONFIG_TWL6030_USB) || defined(CONFIG_TWL6030_USB_MODULE)
99#define twl_has_usb() true 100#define twl_has_usb() true
100#else 101#else
101#define twl_has_usb() false 102#define twl_has_usb() false
@@ -682,6 +683,43 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
682 usb3v1.dev = child; 683 usb3v1.dev = child;
683 } 684 }
684 } 685 }
686 if (twl_has_usb() && pdata->usb && twl_class_is_6030()) {
687
688 static struct regulator_consumer_supply usb3v3 = {
689 .supply = "vusb",
690 };
691
692 if (twl_has_regulator()) {
693 /* this is a template that gets copied */
694 struct regulator_init_data usb_fixed = {
695 .constraints.valid_modes_mask =
696 REGULATOR_MODE_NORMAL
697 | REGULATOR_MODE_STANDBY,
698 .constraints.valid_ops_mask =
699 REGULATOR_CHANGE_MODE
700 | REGULATOR_CHANGE_STATUS,
701 };
702
703 child = add_regulator_linked(TWL6030_REG_VUSB,
704 &usb_fixed, &usb3v3, 1);
705 if (IS_ERR(child))
706 return PTR_ERR(child);
707 }
708
709 child = add_child(0, "twl6030_usb",
710 pdata->usb, sizeof(*pdata->usb),
711 true,
712 /* irq1 = VBUS_PRES, irq0 = USB ID */
713 pdata->irq_base + USBOTG_INTR_OFFSET,
714 pdata->irq_base + USB_PRES_INTR_OFFSET);
715
716 if (IS_ERR(child))
717 return PTR_ERR(child);
718 /* we need to connect regulators to this transceiver */
719 if (twl_has_regulator() && child)
720 usb3v3.dev = child;
721
722 }
685 723
686 if (twl_has_watchdog()) { 724 if (twl_has_watchdog()) {
687 child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0); 725 child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
@@ -815,10 +853,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
815 if (IS_ERR(child)) 853 if (IS_ERR(child))
816 return PTR_ERR(child); 854 return PTR_ERR(child);
817 855
818 child = add_regulator(TWL6030_REG_VUSB, pdata->vusb);
819 if (IS_ERR(child))
820 return PTR_ERR(child);
821
822 child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1); 856 child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1);
823 if (IS_ERR(child)) 857 if (IS_ERR(child))
824 return PTR_ERR(child); 858 return PTR_ERR(child);
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index aaedb11d9d2c..06c8955907e9 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -74,7 +74,7 @@ static int twl6030_interrupt_mapping[24] = {
74 USBOTG_INTR_OFFSET, /* Bit 16 ID_WKUP */ 74 USBOTG_INTR_OFFSET, /* Bit 16 ID_WKUP */
75 USBOTG_INTR_OFFSET, /* Bit 17 VBUS_WKUP */ 75 USBOTG_INTR_OFFSET, /* Bit 17 VBUS_WKUP */
76 USBOTG_INTR_OFFSET, /* Bit 18 ID */ 76 USBOTG_INTR_OFFSET, /* Bit 18 ID */
77 USBOTG_INTR_OFFSET, /* Bit 19 VBUS */ 77 USB_PRES_INTR_OFFSET, /* Bit 19 VBUS */
78 CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */ 78 CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */
79 CHARGER_INTR_OFFSET, /* Bit 21 EXT_CHRG */ 79 CHARGER_INTR_OFFSET, /* Bit 21 EXT_CHRG */
80 CHARGER_INTR_OFFSET, /* Bit 22 INT_CHRG */ 80 CHARGER_INTR_OFFSET, /* Bit 22 INT_CHRG */
@@ -128,6 +128,13 @@ static int twl6030_irq_thread(void *data)
128 128
129 sts.bytes[3] = 0; /* Only 24 bits are valid*/ 129 sts.bytes[3] = 0; /* Only 24 bits are valid*/
130 130
131 /*
132 * Since VBUS status bit is not reliable for VBUS disconnect
133 * use CHARGER VBUS detection status bit instead.
134 */
135 if (sts.bytes[2] & 0x10)
136 sts.bytes[2] |= 0x08;
137
131 for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) { 138 for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
132 local_irq_disable(); 139 local_irq_disable();
133 if (sts.int_sts & 0x1) { 140 if (sts.int_sts & 0x1) {
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4759d827e8c7..f511dd15fd31 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1201,7 +1201,7 @@ err_unregister_chdev:
1201static void __exit cleanup_mtdchar(void) 1201static void __exit cleanup_mtdchar(void)
1202{ 1202{
1203 unregister_mtd_user(&mtdchar_notifier); 1203 unregister_mtd_user(&mtdchar_notifier);
1204 mntput(mtd_inode_mnt); 1204 mntput_long(mtd_inode_mnt);
1205 unregister_filesystem(&mtd_inodefs_type); 1205 unregister_filesystem(&mtd_inodefs_type);
1206 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1206 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1207} 1207}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 10e3ab352175..298f2b0b6311 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -514,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
514#ifdef CONFIG_PM 514#ifdef CONFIG_PM
515 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */ 515 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
516 device_init_wakeup(dev, 1); 516 device_init_wakeup(dev, 1);
517 usb_dev->autosuspend_delay = 15 * HZ; 517 pm_runtime_set_autosuspend_delay(&usb_dev->dev, 15000);
518 usb_enable_autosuspend(usb_dev); 518 usb_enable_autosuspend(usb_dev);
519#endif 519#endif
520 520
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 044fb22718d2..51c666fb67a4 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -45,8 +45,8 @@ static char *init_device;
45module_param_named(device, init_device, charp, 0400); 45module_param_named(device, init_device, charp, 0400);
46MODULE_PARM_DESC(device, "specify initial device"); 46MODULE_PARM_DESC(device, "specify initial device");
47 47
48static struct kmem_cache *zfcp_cache_hw_align(const char *name, 48static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
49 unsigned long size) 49 unsigned long size)
50{ 50{
51 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); 51 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
52} 52}
@@ -311,8 +311,7 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
311 if (zfcp_fsf_status_read(adapter->qdio)) { 311 if (zfcp_fsf_status_read(adapter->qdio)) {
312 if (atomic_read(&adapter->stat_miss) >= 312 if (atomic_read(&adapter->stat_miss) >=
313 adapter->stat_read_buf_num) { 313 adapter->stat_read_buf_num) {
314 zfcp_erp_adapter_reopen(adapter, 0, "axsref1", 314 zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
315 NULL);
316 return 1; 315 return 1;
317 } 316 }
318 break; 317 break;
@@ -459,7 +458,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
459 sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); 458 sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
460 459
461 zfcp_erp_thread_kill(adapter); 460 zfcp_erp_thread_kill(adapter);
462 zfcp_dbf_adapter_unregister(adapter->dbf); 461 zfcp_dbf_adapter_unregister(adapter);
463 zfcp_qdio_destroy(adapter->qdio); 462 zfcp_qdio_destroy(adapter->qdio);
464 463
465 zfcp_ccw_adapter_put(adapter); /* final put to release */ 464 zfcp_ccw_adapter_put(adapter); /* final put to release */
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 0833c2b51e39..4f7852dd30c7 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -48,7 +48,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
48 48
49 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 49 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
50 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 50 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
51 "ccresu2", NULL); 51 "ccresu2");
52 zfcp_erp_wait(adapter); 52 zfcp_erp_wait(adapter);
53 flush_work(&adapter->scan_work); 53 flush_work(&adapter->scan_work);
54 54
@@ -182,7 +182,7 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
182 if (!adapter) 182 if (!adapter)
183 return 0; 183 return 0;
184 184
185 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); 185 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
186 zfcp_erp_wait(adapter); 186 zfcp_erp_wait(adapter);
187 187
188 zfcp_ccw_adapter_put(adapter); 188 zfcp_ccw_adapter_put(adapter);
@@ -207,24 +207,24 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
207 switch (event) { 207 switch (event) {
208 case CIO_GONE: 208 case CIO_GONE:
209 dev_warn(&cdev->dev, "The FCP device has been detached\n"); 209 dev_warn(&cdev->dev, "The FCP device has been detached\n");
210 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL); 210 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
211 break; 211 break;
212 case CIO_NO_PATH: 212 case CIO_NO_PATH:
213 dev_warn(&cdev->dev, 213 dev_warn(&cdev->dev,
214 "The CHPID for the FCP device is offline\n"); 214 "The CHPID for the FCP device is offline\n");
215 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL); 215 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
216 break; 216 break;
217 case CIO_OPER: 217 case CIO_OPER:
218 dev_info(&cdev->dev, "The FCP device is operational again\n"); 218 dev_info(&cdev->dev, "The FCP device is operational again\n");
219 zfcp_erp_set_adapter_status(adapter, 219 zfcp_erp_set_adapter_status(adapter,
220 ZFCP_STATUS_COMMON_RUNNING); 220 ZFCP_STATUS_COMMON_RUNNING);
221 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 221 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
222 "ccnoti4", NULL); 222 "ccnoti4");
223 break; 223 break;
224 case CIO_BOXED: 224 case CIO_BOXED:
225 dev_warn(&cdev->dev, "The FCP device did not respond within " 225 dev_warn(&cdev->dev, "The FCP device did not respond within "
226 "the specified time\n"); 226 "the specified time\n");
227 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); 227 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
228 break; 228 break;
229 } 229 }
230 230
@@ -243,7 +243,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
243 if (!adapter) 243 if (!adapter)
244 return; 244 return;
245 245
246 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); 246 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
247 zfcp_erp_wait(adapter); 247 zfcp_erp_wait(adapter);
248 zfcp_erp_thread_kill(adapter); 248 zfcp_erp_thread_kill(adapter);
249 249
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index d692e229ecba..46342fee394d 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -288,7 +288,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
288 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) 288 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
289 zfcp_erp_port_reopen(port, 289 zfcp_erp_port_reopen(port,
290 ZFCP_STATUS_COMMON_ERP_FAILED, 290 ZFCP_STATUS_COMMON_ERP_FAILED,
291 "cfaac_1", NULL); 291 "cfaac_1");
292 } 292 }
293 read_unlock_irqrestore(&adapter->port_list_lock, flags); 293 read_unlock_irqrestore(&adapter->port_list_lock, flags);
294 294
@@ -299,7 +299,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
299 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) 299 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
300 zfcp_erp_lun_reopen(sdev, 300 zfcp_erp_lun_reopen(sdev,
301 ZFCP_STATUS_COMMON_ERP_FAILED, 301 ZFCP_STATUS_COMMON_ERP_FAILED,
302 "cfaac_2", NULL); 302 "cfaac_2");
303 } 303 }
304} 304}
305 305
@@ -426,7 +426,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
426 zfcp_scsi_dev_lun(sdev), 426 zfcp_scsi_dev_lun(sdev),
427 (unsigned long long)zfcp_sdev->port->wwpn); 427 (unsigned long long)zfcp_sdev->port->wwpn);
428 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 428 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
429 zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL); 429 zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
430 return -EACCES; 430 return -EACCES;
431 } 431 }
432 432
@@ -437,7 +437,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
437 zfcp_scsi_dev_lun(sdev), 437 zfcp_scsi_dev_lun(sdev),
438 (unsigned long long)zfcp_sdev->port->wwpn); 438 (unsigned long long)zfcp_sdev->port->wwpn);
439 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 439 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
440 zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL); 440 zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
441 return -EACCES; 441 return -EACCES;
442 } 442 }
443 443
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 2cdd6b28ff7f..96d1462e0bf5 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Debug traces for zfcp. 4 * Debug traces for zfcp.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -22,980 +22,392 @@ module_param(dbfsize, uint, 0400);
22MODULE_PARM_DESC(dbfsize, 22MODULE_PARM_DESC(dbfsize,
23 "number of pages for each debug feature area (default 4)"); 23 "number of pages for each debug feature area (default 4)");
24 24
25static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, 25static inline unsigned int zfcp_dbf_plen(unsigned int offset)
26 int level, char *from, int from_len)
27{ 26{
28 int offset; 27 return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
29 struct zfcp_dbf_dump *dump = to;
30 int room = to_len - sizeof(*dump);
31
32 for (offset = 0; offset < from_len; offset += dump->size) {
33 memset(to, 0, to_len);
34 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
35 dump->total_size = from_len;
36 dump->offset = offset;
37 dump->size = min(from_len - offset, room);
38 memcpy(dump->data, from + offset, dump->size);
39 debug_event(dbf, level, dump, dump->size + sizeof(*dump));
40 }
41} 28}
42 29
43static void zfcp_dbf_tag(char **p, const char *label, const char *tag) 30static inline
31void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
32 u64 req_id)
44{ 33{
45 int i; 34 struct zfcp_dbf_pay *pl = &dbf->pay_buf;
46 35 u16 offset = 0, rec_length;
47 *p += sprintf(*p, "%-24s", label);
48 for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
49 *p += sprintf(*p, "%c", tag[i]);
50 *p += sprintf(*p, "\n");
51}
52 36
53static void zfcp_dbf_outs(char **buf, const char *s1, const char *s2) 37 spin_lock(&dbf->pay_lock);
54{ 38 memset(pl, 0, sizeof(*pl));
55 *buf += sprintf(*buf, "%-24s%s\n", s1, s2); 39 pl->fsf_req_id = req_id;
56} 40 memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
57 41
58static void zfcp_dbf_out(char **buf, const char *s, const char *format, ...) 42 while (offset < length) {
59{ 43 rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
60 va_list arg; 44 (u16) (length - offset));
45 memcpy(pl->data, data + offset, rec_length);
46 debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
61 47
62 *buf += sprintf(*buf, "%-24s", s); 48 offset += rec_length;
63 va_start(arg, format); 49 pl->counter++;
64 *buf += vsprintf(*buf, format, arg);
65 va_end(arg);
66 *buf += sprintf(*buf, "\n");
67}
68
69static void zfcp_dbf_outd(char **p, const char *label, char *buffer,
70 int buflen, int offset, int total_size)
71{
72 if (!offset)
73 *p += sprintf(*p, "%-24s ", label);
74 while (buflen--) {
75 if (offset > 0) {
76 if ((offset % 32) == 0)
77 *p += sprintf(*p, "\n%-24c ", ' ');
78 else if ((offset % 4) == 0)
79 *p += sprintf(*p, " ");
80 }
81 *p += sprintf(*p, "%02x", *buffer++);
82 if (++offset == total_size) {
83 *p += sprintf(*p, "\n");
84 break;
85 }
86 } 50 }
87 if (!total_size)
88 *p += sprintf(*p, "\n");
89}
90 51
91static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, 52 spin_unlock(&dbf->pay_lock);
92 int area, debug_entry_t *entry, char *out_buf)
93{
94 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
95 struct timespec t;
96 char *p = out_buf;
97
98 if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
99 stck_to_timespec(entry->id.stck, &t);
100 zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu",
101 t.tv_sec, t.tv_nsec);
102 zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid);
103 } else {
104 zfcp_dbf_outd(&p, "", dump->data, dump->size, dump->offset,
105 dump->total_size);
106 if ((dump->offset + dump->size) == dump->total_size)
107 p += sprintf(p, "\n");
108 }
109 return p - out_buf;
110} 53}
111 54
112void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, 55/**
113 struct zfcp_fsf_req *fsf_req, 56 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
114 struct zfcp_dbf *dbf) 57 * @tag: tag indicating which kind of unsolicited status has been received
58 * @req: request for which a response was received
59 */
60void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
115{ 61{
116 struct fsf_qtcb *qtcb = fsf_req->qtcb; 62 struct zfcp_dbf *dbf = req->adapter->dbf;
117 union fsf_prot_status_qual *prot_status_qual = 63 struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
118 &qtcb->prefix.prot_status_qual; 64 struct fsf_qtcb_header *q_head = &req->qtcb->header;
119 union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual; 65 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
120 struct scsi_cmnd *scsi_cmnd;
121 struct zfcp_port *port;
122 struct zfcp_unit *unit;
123 struct zfcp_send_els *send_els;
124 struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
125 struct zfcp_dbf_hba_record_response *response = &rec->u.response;
126 unsigned long flags; 66 unsigned long flags;
127 67
128 spin_lock_irqsave(&dbf->hba_lock, flags); 68 spin_lock_irqsave(&dbf->hba_lock, flags);
129 memset(rec, 0, sizeof(*rec)); 69 memset(rec, 0, sizeof(*rec));
130 strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
131 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
132
133 response->fsf_command = fsf_req->fsf_command;
134 response->fsf_reqid = fsf_req->req_id;
135 response->fsf_seqno = fsf_req->seq_no;
136 response->fsf_issued = fsf_req->issued;
137 response->fsf_prot_status = qtcb->prefix.prot_status;
138 response->fsf_status = qtcb->header.fsf_status;
139 memcpy(response->fsf_prot_status_qual,
140 prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
141 memcpy(response->fsf_status_qual,
142 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
143 response->fsf_req_status = fsf_req->status;
144 response->sbal_first = fsf_req->qdio_req.sbal_first;
145 response->sbal_last = fsf_req->qdio_req.sbal_last;
146 response->sbal_response = fsf_req->qdio_req.sbal_response;
147 response->pool = fsf_req->pool != NULL;
148 response->erp_action = (unsigned long)fsf_req->erp_action;
149
150 switch (fsf_req->fsf_command) {
151 case FSF_QTCB_FCP_CMND:
152 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
153 break;
154 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
155 if (scsi_cmnd) {
156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
157 response->u.fcp.data_dir =
158 qtcb->bottom.io.data_direction;
159 }
160 break;
161
162 case FSF_QTCB_OPEN_PORT_WITH_DID:
163 case FSF_QTCB_CLOSE_PORT:
164 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
165 port = (struct zfcp_port *)fsf_req->data;
166 response->u.port.wwpn = port->wwpn;
167 response->u.port.d_id = port->d_id;
168 response->u.port.port_handle = qtcb->header.port_handle;
169 break;
170
171 case FSF_QTCB_OPEN_LUN:
172 case FSF_QTCB_CLOSE_LUN:
173 unit = (struct zfcp_unit *)fsf_req->data;
174 port = unit->port;
175 response->u.unit.wwpn = port->wwpn;
176 response->u.unit.fcp_lun = unit->fcp_lun;
177 response->u.unit.port_handle = qtcb->header.port_handle;
178 response->u.unit.lun_handle = qtcb->header.lun_handle;
179 break;
180
181 case FSF_QTCB_SEND_ELS:
182 send_els = (struct zfcp_send_els *)fsf_req->data;
183 response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id);
184 break;
185
186 case FSF_QTCB_ABORT_FCP_CMND:
187 case FSF_QTCB_SEND_GENERIC:
188 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
189 case FSF_QTCB_EXCHANGE_PORT_DATA:
190 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
191 case FSF_QTCB_UPLOAD_CONTROL_FILE:
192 break;
193 }
194
195 debug_event(dbf->hba, level, rec, sizeof(*rec));
196 70
197 /* have fcp channel microcode fixed to use as little as possible */ 71 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
198 if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { 72 rec->id = ZFCP_DBF_HBA_RES;
199 /* adjust length skipping trailing zeros */ 73 rec->fsf_req_id = req->req_id;
200 char *buf = (char *)qtcb + qtcb->header.log_start; 74 rec->fsf_req_status = req->status;
201 int len = qtcb->header.log_length; 75 rec->fsf_cmd = req->fsf_command;
202 for (; len && !buf[len - 1]; len--); 76 rec->fsf_seq_no = req->seq_no;
203 zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf, 77 rec->u.res.req_issued = req->issued;
204 len); 78 rec->u.res.prot_status = q_pref->prot_status;
79 rec->u.res.fsf_status = q_head->fsf_status;
80
81 memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
82 FSF_PROT_STATUS_QUAL_SIZE);
83 memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
84 FSF_STATUS_QUALIFIER_SIZE);
85
86 if (req->fsf_command != FSF_QTCB_FCP_CMND) {
87 rec->pl_len = q_head->log_length;
88 zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
89 rec->pl_len, "fsf_res", req->req_id);
205 } 90 }
206 91
207 spin_unlock_irqrestore(&dbf->hba_lock, flags); 92 debug_event(dbf->hba, 1, rec, sizeof(*rec));
208}
209
210void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf,
211 struct fsf_status_read_buffer *status_buffer)
212{
213 struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
214 unsigned long flags;
215
216 spin_lock_irqsave(&dbf->hba_lock, flags);
217 memset(rec, 0, sizeof(*rec));
218 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
219 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
220
221 rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss);
222 if (status_buffer != NULL) {
223 rec->u.status.status_type = status_buffer->status_type;
224 rec->u.status.status_subtype = status_buffer->status_subtype;
225 memcpy(&rec->u.status.queue_designator,
226 &status_buffer->queue_designator,
227 sizeof(struct fsf_queue_designator));
228
229 switch (status_buffer->status_type) {
230 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
231 rec->u.status.payload_size =
232 ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
233 break;
234
235 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
236 rec->u.status.payload_size =
237 ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
238 break;
239
240 case FSF_STATUS_READ_LINK_DOWN:
241 switch (status_buffer->status_subtype) {
242 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
243 case FSF_STATUS_READ_SUB_FDISC_FAILED:
244 rec->u.status.payload_size =
245 sizeof(struct fsf_link_down_info);
246 }
247 break;
248
249 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
250 rec->u.status.payload_size =
251 ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
252 break;
253 }
254 memcpy(&rec->u.status.payload,
255 &status_buffer->payload, rec->u.status.payload_size);
256 }
257
258 debug_event(dbf->hba, level, rec, sizeof(*rec));
259 spin_unlock_irqrestore(&dbf->hba_lock, flags); 93 spin_unlock_irqrestore(&dbf->hba_lock, flags);
260} 94}
261 95
262/** 96/**
263 * zfcp_dbf_hba_qdio - trace event for QDIO related failure 97 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
264 * @qdio: qdio structure affected by this QDIO related event 98 * @tag: tag indicating which kind of unsolicited status has been received
265 * @qdio_error: as passed by qdio module 99 * @req: request providing the unsolicited status
266 * @sbal_index: first buffer with error condition, as passed by qdio module
267 * @sbal_count: number of buffers affected, as passed by qdio module
268 */ 100 */
269void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error, 101void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
270 int sbal_index, int sbal_count)
271{ 102{
272 struct zfcp_dbf_hba_record *r = &dbf->hba_buf; 103 struct zfcp_dbf *dbf = req->adapter->dbf;
104 struct fsf_status_read_buffer *srb = req->data;
105 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
273 unsigned long flags; 106 unsigned long flags;
274 107
275 spin_lock_irqsave(&dbf->hba_lock, flags); 108 spin_lock_irqsave(&dbf->hba_lock, flags);
276 memset(r, 0, sizeof(*r)); 109 memset(rec, 0, sizeof(*rec));
277 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); 110
278 r->u.qdio.qdio_error = qdio_error; 111 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
279 r->u.qdio.sbal_index = sbal_index; 112 rec->id = ZFCP_DBF_HBA_USS;
280 r->u.qdio.sbal_count = sbal_count; 113 rec->fsf_req_id = req->req_id;
281 debug_event(dbf->hba, 0, r, sizeof(*r)); 114 rec->fsf_req_status = req->status;
115 rec->fsf_cmd = req->fsf_command;
116
117 if (!srb)
118 goto log;
119
120 rec->u.uss.status_type = srb->status_type;
121 rec->u.uss.status_subtype = srb->status_subtype;
122 rec->u.uss.d_id = ntoh24(srb->d_id);
123 rec->u.uss.lun = srb->fcp_lun;
124 memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
125 sizeof(rec->u.uss.queue_designator));
126
127 /* status read buffer payload length */
128 rec->pl_len = (!srb->length) ? 0 : srb->length -
129 offsetof(struct fsf_status_read_buffer, payload);
130
131 if (rec->pl_len)
132 zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
133 "fsf_uss", req->req_id);
134log:
135 debug_event(dbf->hba, 2, rec, sizeof(*rec));
282 spin_unlock_irqrestore(&dbf->hba_lock, flags); 136 spin_unlock_irqrestore(&dbf->hba_lock, flags);
283} 137}
284 138
285/** 139/**
286 * zfcp_dbf_hba_berr - trace event for bit error threshold 140 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
287 * @dbf: dbf structure affected by this QDIO related event 141 * @tag: tag indicating which kind of unsolicited status has been received
288 * @req: fsf request 142 * @req: request which caused the bit_error condition
289 */ 143 */
290void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req) 144void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
291{ 145{
292 struct zfcp_dbf_hba_record *r = &dbf->hba_buf; 146 struct zfcp_dbf *dbf = req->adapter->dbf;
147 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
293 struct fsf_status_read_buffer *sr_buf = req->data; 148 struct fsf_status_read_buffer *sr_buf = req->data;
294 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
295 unsigned long flags; 149 unsigned long flags;
296 150
297 spin_lock_irqsave(&dbf->hba_lock, flags); 151 spin_lock_irqsave(&dbf->hba_lock, flags);
298 memset(r, 0, sizeof(*r)); 152 memset(rec, 0, sizeof(*rec));
299 strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
300 memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
301 debug_event(dbf->hba, 0, r, sizeof(*r));
302 spin_unlock_irqrestore(&dbf->hba_lock, flags);
303}
304static void zfcp_dbf_hba_view_response(char **p,
305 struct zfcp_dbf_hba_record_response *r)
306{
307 struct timespec t;
308
309 zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command);
310 zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
311 zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno);
312 stck_to_timespec(r->fsf_issued, &t);
313 zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
314 zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status);
315 zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status);
316 zfcp_dbf_outd(p, "fsf_prot_status_qual", r->fsf_prot_status_qual,
317 FSF_PROT_STATUS_QUAL_SIZE, 0, FSF_PROT_STATUS_QUAL_SIZE);
318 zfcp_dbf_outd(p, "fsf_status_qual", r->fsf_status_qual,
319 FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
320 zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
321 zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
322 zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
323 zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
324 zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
325
326 switch (r->fsf_command) {
327 case FSF_QTCB_FCP_CMND:
328 if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
329 break;
330 zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
331 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
332 *p += sprintf(*p, "\n");
333 break;
334
335 case FSF_QTCB_OPEN_PORT_WITH_DID:
336 case FSF_QTCB_CLOSE_PORT:
337 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
338 zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.port.wwpn);
339 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.port.d_id);
340 zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.port.port_handle);
341 break;
342
343 case FSF_QTCB_OPEN_LUN:
344 case FSF_QTCB_CLOSE_LUN:
345 zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.unit.wwpn);
346 zfcp_dbf_out(p, "fcp_lun", "0x%016Lx", r->u.unit.fcp_lun);
347 zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.unit.port_handle);
348 zfcp_dbf_out(p, "lun_handle", "0x%08x", r->u.unit.lun_handle);
349 break;
350
351 case FSF_QTCB_SEND_ELS:
352 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
353 break;
354
355 case FSF_QTCB_ABORT_FCP_CMND:
356 case FSF_QTCB_SEND_GENERIC:
357 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
358 case FSF_QTCB_EXCHANGE_PORT_DATA:
359 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
360 case FSF_QTCB_UPLOAD_CONTROL_FILE:
361 break;
362 }
363}
364
365static void zfcp_dbf_hba_view_status(char **p,
366 struct zfcp_dbf_hba_record_status *r)
367{
368 zfcp_dbf_out(p, "failed", "0x%02x", r->failed);
369 zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type);
370 zfcp_dbf_out(p, "status_subtype", "0x%08x", r->status_subtype);
371 zfcp_dbf_outd(p, "queue_designator", (char *)&r->queue_designator,
372 sizeof(struct fsf_queue_designator), 0,
373 sizeof(struct fsf_queue_designator));
374 zfcp_dbf_outd(p, "payload", (char *)&r->payload, r->payload_size, 0,
375 r->payload_size);
376}
377
378static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r)
379{
380 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
381 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
382 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
383}
384 153
385static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r) 154 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
386{ 155 rec->id = ZFCP_DBF_HBA_BIT;
387 zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); 156 rec->fsf_req_id = req->req_id;
388 zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); 157 rec->fsf_req_status = req->status;
389 zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count); 158 rec->fsf_cmd = req->fsf_command;
390 zfcp_dbf_out(p, "prim_seq_err", "%d", 159 memcpy(&rec->u.be, &sr_buf->payload.bit_error,
391 r->primitive_sequence_error_count); 160 sizeof(struct fsf_bit_error_payload));
392 zfcp_dbf_out(p, "inval_trans_word_err", "%d",
393 r->invalid_transmission_word_error_count);
394 zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
395 zfcp_dbf_out(p, "prim_seq_event_to", "%d",
396 r->primitive_sequence_event_timeout_count);
397 zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
398 r->elastic_buffer_overrun_error_count);
399 zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
400 r->advertised_receive_b2b_credit);
401 zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
402 r->current_receive_b2b_credit);
403 zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
404 r->advertised_transmit_b2b_credit);
405 zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
406 r->current_transmit_b2b_credit);
407}
408 161
409static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view, 162 debug_event(dbf->hba, 1, rec, sizeof(*rec));
410 char *out_buf, const char *in_buf) 163 spin_unlock_irqrestore(&dbf->hba_lock, flags);
411{
412 struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf;
413 char *p = out_buf;
414
415 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
416 return 0;
417
418 zfcp_dbf_tag(&p, "tag", r->tag);
419 if (isalpha(r->tag2[0]))
420 zfcp_dbf_tag(&p, "tag2", r->tag2);
421
422 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
423 zfcp_dbf_hba_view_response(&p, &r->u.response);
424 else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
425 zfcp_dbf_hba_view_status(&p, &r->u.status);
426 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
427 zfcp_dbf_hba_view_qdio(&p, &r->u.qdio);
428 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
429 zfcp_dbf_hba_view_berr(&p, &r->u.berr);
430
431 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0)
432 p += sprintf(p, "\n");
433 return p - out_buf;
434} 164}
435 165
436static struct debug_view zfcp_dbf_hba_view = { 166static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
437 .name = "structured", 167 struct zfcp_adapter *adapter,
438 .header_proc = zfcp_dbf_view_header, 168 struct zfcp_port *port,
439 .format_proc = zfcp_dbf_hba_view_format, 169 struct scsi_device *sdev)
440};
441
442static const char *zfcp_dbf_rec_tags[] = {
443 [ZFCP_REC_DBF_ID_THREAD] = "thread",
444 [ZFCP_REC_DBF_ID_TARGET] = "target",
445 [ZFCP_REC_DBF_ID_TRIGGER] = "trigger",
446 [ZFCP_REC_DBF_ID_ACTION] = "action",
447};
448
449static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
450 char *buf, const char *_rec)
451{ 170{
452 struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec; 171 rec->adapter_status = atomic_read(&adapter->status);
453 char *p = buf; 172 if (port) {
454 char hint[ZFCP_DBF_ID_SIZE + 1]; 173 rec->port_status = atomic_read(&port->status);
455 174 rec->wwpn = port->wwpn;
456 memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); 175 rec->d_id = port->d_id;
457 hint[ZFCP_DBF_ID_SIZE] = 0; 176 }
458 zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]); 177 if (sdev) {
459 zfcp_dbf_outs(&p, "hint", hint); 178 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
460 switch (r->id) { 179 rec->lun = zfcp_scsi_dev_lun(sdev);
461 case ZFCP_REC_DBF_ID_THREAD:
462 zfcp_dbf_out(&p, "total", "%d", r->u.thread.total);
463 zfcp_dbf_out(&p, "ready", "%d", r->u.thread.ready);
464 zfcp_dbf_out(&p, "running", "%d", r->u.thread.running);
465 break;
466 case ZFCP_REC_DBF_ID_TARGET:
467 zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.target.ref);
468 zfcp_dbf_out(&p, "status", "0x%08x", r->u.target.status);
469 zfcp_dbf_out(&p, "erp_count", "%d", r->u.target.erp_count);
470 zfcp_dbf_out(&p, "d_id", "0x%06x", r->u.target.d_id);
471 zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.target.wwpn);
472 zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.target.fcp_lun);
473 break;
474 case ZFCP_REC_DBF_ID_TRIGGER:
475 zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.trigger.ref);
476 zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.trigger.action);
477 zfcp_dbf_out(&p, "requested", "%d", r->u.trigger.want);
478 zfcp_dbf_out(&p, "executed", "%d", r->u.trigger.need);
479 zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.trigger.wwpn);
480 zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
481 zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
482 zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
483 zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls);
484 break;
485 case ZFCP_REC_DBF_ID_ACTION:
486 zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
487 zfcp_dbf_out(&p, "fsf_req", "0x%016Lx", r->u.action.fsf_req);
488 zfcp_dbf_out(&p, "status", "0x%08Lx", r->u.action.status);
489 zfcp_dbf_out(&p, "step", "0x%08Lx", r->u.action.step);
490 break;
491 } 180 }
492 p += sprintf(p, "\n");
493 return p - buf;
494} 181}
495 182
496static struct debug_view zfcp_dbf_rec_view = {
497 .name = "structured",
498 .header_proc = zfcp_dbf_view_header,
499 .format_proc = zfcp_dbf_rec_view_format,
500};
501
502/** 183/**
503 * zfcp_dbf_rec_thread - trace event related to recovery thread operation 184 * zfcp_dbf_rec_trig - trace event related to triggered recovery
504 * @id2: identifier for event 185 * @tag: identifier for event
505 * @dbf: reference to dbf structure 186 * @adapter: adapter on which the erp_action should run
506 * This function assumes that the caller is holding erp_lock. 187 * @port: remote port involved in the erp_action
188 * @sdev: scsi device involved in the erp_action
189 * @want: wanted erp_action
190 * @need: required erp_action
191 *
192 * The adapter->erp_lock has to be held.
507 */ 193 */
508void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf) 194void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
195 struct zfcp_port *port, struct scsi_device *sdev,
196 u8 want, u8 need)
509{ 197{
510 struct zfcp_adapter *adapter = dbf->adapter; 198 struct zfcp_dbf *dbf = adapter->dbf;
511 struct zfcp_dbf_rec_record *r = &dbf->rec_buf; 199 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
512 unsigned long flags = 0;
513 struct list_head *entry; 200 struct list_head *entry;
514 unsigned ready = 0, running = 0, total;
515
516 list_for_each(entry, &adapter->erp_ready_head)
517 ready++;
518 list_for_each(entry, &adapter->erp_running_head)
519 running++;
520 total = adapter->erp_total_count;
521
522 spin_lock_irqsave(&dbf->rec_lock, flags);
523 memset(r, 0, sizeof(*r));
524 r->id = ZFCP_REC_DBF_ID_THREAD;
525 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
526 r->u.thread.total = total;
527 r->u.thread.ready = ready;
528 r->u.thread.running = running;
529 debug_event(dbf->rec, 6, r, sizeof(*r));
530 spin_unlock_irqrestore(&dbf->rec_lock, flags);
531}
532
533/**
534 * zfcp_dbf_rec_thread - trace event related to recovery thread operation
535 * @id2: identifier for event
536 * @adapter: adapter
537 * This function assumes that the caller does not hold erp_lock.
538 */
539void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf)
540{
541 struct zfcp_adapter *adapter = dbf->adapter;
542 unsigned long flags;
543
544 read_lock_irqsave(&adapter->erp_lock, flags);
545 zfcp_dbf_rec_thread(id2, dbf);
546 read_unlock_irqrestore(&adapter->erp_lock, flags);
547}
548
549static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf,
550 atomic_t *status, atomic_t *erp_count, u64 wwpn,
551 u32 d_id, u64 fcp_lun)
552{
553 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
554 unsigned long flags; 201 unsigned long flags;
555 202
556 spin_lock_irqsave(&dbf->rec_lock, flags); 203 spin_lock_irqsave(&dbf->rec_lock, flags);
557 memset(r, 0, sizeof(*r)); 204 memset(rec, 0, sizeof(*rec));
558 r->id = ZFCP_REC_DBF_ID_TARGET;
559 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
560 r->u.target.ref = (unsigned long)ref;
561 r->u.target.status = atomic_read(status);
562 r->u.target.wwpn = wwpn;
563 r->u.target.d_id = d_id;
564 r->u.target.fcp_lun = fcp_lun;
565 r->u.target.erp_count = atomic_read(erp_count);
566 debug_event(dbf->rec, 3, r, sizeof(*r));
567 spin_unlock_irqrestore(&dbf->rec_lock, flags);
568}
569
570/**
571 * zfcp_dbf_rec_adapter - trace event for adapter state change
572 * @id: identifier for trigger of state change
573 * @ref: additional reference (e.g. request)
574 * @dbf: reference to dbf structure
575 */
576void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
577{
578 struct zfcp_adapter *adapter = dbf->adapter;
579
580 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
581 &adapter->erp_counter, 0, 0,
582 ZFCP_DBF_INVALID_LUN);
583}
584
585/**
586 * zfcp_dbf_rec_port - trace event for port state change
587 * @id: identifier for trigger of state change
588 * @ref: additional reference (e.g. request)
589 * @port: port
590 */
591void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
592{
593 struct zfcp_dbf *dbf = port->adapter->dbf;
594 205
595 zfcp_dbf_rec_target(id, ref, dbf, &port->status, 206 rec->id = ZFCP_DBF_REC_TRIG;
596 &port->erp_counter, port->wwpn, port->d_id, 207 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
597 ZFCP_DBF_INVALID_LUN); 208 zfcp_dbf_set_common(rec, adapter, port, sdev);
598}
599 209
600/** 210 list_for_each(entry, &adapter->erp_ready_head)
601 * zfcp_dbf_rec_lun - trace event for LUN state change 211 rec->u.trig.ready++;
602 * @id: identifier for trigger of state change
603 * @ref: additional reference (e.g. request)
604 * @sdev: SCSI device
605 */
606void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev)
607{
608 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
609 struct zfcp_port *port = zfcp_sdev->port;
610 struct zfcp_dbf *dbf = port->adapter->dbf;
611 212
612 zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status, 213 list_for_each(entry, &adapter->erp_running_head)
613 &zfcp_sdev->erp_counter, port->wwpn, port->d_id, 214 rec->u.trig.running++;
614 zfcp_scsi_dev_lun(sdev));
615}
616 215
617/** 216 rec->u.trig.want = want;
618 * zfcp_dbf_rec_trigger - trace event for triggered error recovery 217 rec->u.trig.need = need;
619 * @id2: identifier for error recovery trigger
620 * @ref: additional reference (e.g. request)
621 * @want: originally requested error recovery action
622 * @need: error recovery action actually initiated
623 * @action: address of error recovery action struct
624 * @adapter: adapter
625 * @port: port
626 * @sdev: SCSI device
627 */
628void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
629 struct zfcp_adapter *adapter, struct zfcp_port *port,
630 struct scsi_device *sdev)
631{
632 struct zfcp_dbf *dbf = adapter->dbf;
633 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
634 unsigned long flags;
635 218
636 spin_lock_irqsave(&dbf->rec_lock, flags); 219 debug_event(dbf->rec, 1, rec, sizeof(*rec));
637 memset(r, 0, sizeof(*r));
638 r->id = ZFCP_REC_DBF_ID_TRIGGER;
639 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
640 r->u.trigger.ref = (unsigned long)ref;
641 r->u.trigger.want = want;
642 r->u.trigger.need = need;
643 r->u.trigger.action = (unsigned long)action;
644 r->u.trigger.as = atomic_read(&adapter->status);
645 if (port) {
646 r->u.trigger.ps = atomic_read(&port->status);
647 r->u.trigger.wwpn = port->wwpn;
648 }
649 if (sdev)
650 r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status);
651 r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) :
652 ZFCP_DBF_INVALID_LUN;
653 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
654 spin_unlock_irqrestore(&dbf->rec_lock, flags); 220 spin_unlock_irqrestore(&dbf->rec_lock, flags);
655} 221}
656 222
223
657/** 224/**
658 * zfcp_dbf_rec_action - trace event showing progress of recovery action 225 * zfcp_dbf_rec_run - trace event related to running recovery
659 * @id2: identifier 226 * @tag: identifier for event
660 * @erp_action: error recovery action struct pointer 227 * @erp: erp_action running
661 */ 228 */
662void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) 229void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
663{ 230{
664 struct zfcp_dbf *dbf = erp_action->adapter->dbf; 231 struct zfcp_dbf *dbf = erp->adapter->dbf;
665 struct zfcp_dbf_rec_record *r = &dbf->rec_buf; 232 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
666 unsigned long flags; 233 unsigned long flags;
667 234
668 spin_lock_irqsave(&dbf->rec_lock, flags); 235 spin_lock_irqsave(&dbf->rec_lock, flags);
669 memset(r, 0, sizeof(*r)); 236 memset(rec, 0, sizeof(*rec));
670 r->id = ZFCP_REC_DBF_ID_ACTION;
671 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
672 r->u.action.action = (unsigned long)erp_action;
673 r->u.action.status = erp_action->status;
674 r->u.action.step = erp_action->step;
675 r->u.action.fsf_req = erp_action->fsf_req_id;
676 debug_event(dbf->rec, 5, r, sizeof(*r));
677 spin_unlock_irqrestore(&dbf->rec_lock, flags);
678}
679 237
680/** 238 rec->id = ZFCP_DBF_REC_RUN;
681 * zfcp_dbf_san_ct_request - trace event for issued CT request 239 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
682 * @fsf_req: request containing issued CT data 240 zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
683 * @d_id: destination id where ct request is sent to
684 */
685void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id)
686{
687 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
688 struct zfcp_adapter *adapter = fsf_req->adapter;
689 struct zfcp_dbf *dbf = adapter->dbf;
690 struct fc_ct_hdr *hdr = sg_virt(ct->req);
691 struct zfcp_dbf_san_record *r = &dbf->san_buf;
692 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
693 int level = 3;
694 unsigned long flags;
695 241
696 spin_lock_irqsave(&dbf->san_lock, flags); 242 rec->u.run.fsf_req_id = erp->fsf_req_id;
697 memset(r, 0, sizeof(*r)); 243 rec->u.run.rec_status = erp->status;
698 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 244 rec->u.run.rec_step = erp->step;
699 r->fsf_reqid = fsf_req->req_id; 245 rec->u.run.rec_action = erp->action;
700 r->fsf_seqno = fsf_req->seq_no;
701 oct->d_id = d_id;
702 oct->cmd_req_code = hdr->ct_cmd;
703 oct->revision = hdr->ct_rev;
704 oct->gs_type = hdr->ct_fs_type;
705 oct->gs_subtype = hdr->ct_fs_subtype;
706 oct->options = hdr->ct_options;
707 oct->max_res_size = hdr->ct_mr_size;
708 oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr),
709 ZFCP_DBF_SAN_MAX_PAYLOAD);
710 debug_event(dbf->san, level, r, sizeof(*r));
711 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
712 (void *)hdr + sizeof(struct fc_ct_hdr), oct->len);
713 spin_unlock_irqrestore(&dbf->san_lock, flags);
714}
715 246
716/** 247 if (erp->sdev)
717 * zfcp_dbf_san_ct_response - trace event for completion of CT request 248 rec->u.run.rec_count =
718 * @fsf_req: request containing CT response 249 atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
719 */ 250 else if (erp->port)
720void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) 251 rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
721{ 252 else
722 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data; 253 rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
723 struct zfcp_adapter *adapter = fsf_req->adapter;
724 struct fc_ct_hdr *hdr = sg_virt(ct->resp);
725 struct zfcp_dbf *dbf = adapter->dbf;
726 struct zfcp_dbf_san_record *r = &dbf->san_buf;
727 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
728 int level = 3;
729 unsigned long flags;
730 254
731 spin_lock_irqsave(&dbf->san_lock, flags); 255 debug_event(dbf->rec, 1, rec, sizeof(*rec));
732 memset(r, 0, sizeof(*r)); 256 spin_unlock_irqrestore(&dbf->rec_lock, flags);
733 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
734 r->fsf_reqid = fsf_req->req_id;
735 r->fsf_seqno = fsf_req->seq_no;
736 rct->cmd_rsp_code = hdr->ct_cmd;
737 rct->revision = hdr->ct_rev;
738 rct->reason_code = hdr->ct_reason;
739 rct->expl = hdr->ct_explan;
740 rct->vendor_unique = hdr->ct_vendor;
741 rct->max_res_size = hdr->ct_mr_size;
742 rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr),
743 ZFCP_DBF_SAN_MAX_PAYLOAD);
744 debug_event(dbf->san, level, r, sizeof(*r));
745 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
746 (void *)hdr + sizeof(struct fc_ct_hdr), rct->len);
747 spin_unlock_irqrestore(&dbf->san_lock, flags);
748} 257}
749 258
750static void zfcp_dbf_san_els(const char *tag, int level, 259static inline
751 struct zfcp_fsf_req *fsf_req, u32 d_id, 260void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
752 void *buffer, int buflen) 261 u64 req_id, u32 d_id)
753{ 262{
754 struct zfcp_adapter *adapter = fsf_req->adapter; 263 struct zfcp_dbf_san *rec = &dbf->san_buf;
755 struct zfcp_dbf *dbf = adapter->dbf; 264 u16 rec_len;
756 struct zfcp_dbf_san_record *rec = &dbf->san_buf;
757 unsigned long flags; 265 unsigned long flags;
758 266
759 spin_lock_irqsave(&dbf->san_lock, flags); 267 spin_lock_irqsave(&dbf->san_lock, flags);
760 memset(rec, 0, sizeof(*rec)); 268 memset(rec, 0, sizeof(*rec));
761 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 269
762 rec->fsf_reqid = fsf_req->req_id; 270 rec->id = id;
763 rec->fsf_seqno = fsf_req->seq_no; 271 rec->fsf_req_id = req_id;
764 rec->u.els.d_id = d_id; 272 rec->d_id = d_id;
765 debug_event(dbf->san, level, rec, sizeof(*rec)); 273 rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
766 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, 274 memcpy(rec->payload, data, rec_len);
767 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); 275 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
276
277 debug_event(dbf->san, 1, rec, sizeof(*rec));
768 spin_unlock_irqrestore(&dbf->san_lock, flags); 278 spin_unlock_irqrestore(&dbf->san_lock, flags);
769} 279}
770 280
771/** 281/**
772 * zfcp_dbf_san_els_request - trace event for issued ELS 282 * zfcp_dbf_san_req - trace event for issued SAN request
773 * @fsf_req: request containing issued ELS 283 * @tag: indentifier for event
284 * @fsf_req: request containing issued CT data
285 * d_id: destination ID
774 */ 286 */
775void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) 287void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
776{ 288{
777 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; 289 struct zfcp_dbf *dbf = fsf->adapter->dbf;
778 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); 290 struct zfcp_fsf_ct_els *ct_els = fsf->data;
291 u16 length;
779 292
780 zfcp_dbf_san_els("oels", 2, fsf_req, d_id, 293 length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
781 sg_virt(els->req), els->req->length); 294 zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
295 fsf->req_id, d_id);
782} 296}
783 297
784/** 298/**
785 * zfcp_dbf_san_els_response - trace event for completed ELS 299 * zfcp_dbf_san_res - trace event for received SAN request
786 * @fsf_req: request containing ELS response 300 * @tag: indentifier for event
301 * @fsf_req: request containing issued CT data
787 */ 302 */
788void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) 303void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
789{ 304{
790 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; 305 struct zfcp_dbf *dbf = fsf->adapter->dbf;
791 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); 306 struct zfcp_fsf_ct_els *ct_els = fsf->data;
307 u16 length;
792 308
793 zfcp_dbf_san_els("rels", 2, fsf_req, d_id, 309 length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
794 sg_virt(els->resp), els->resp->length); 310 zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
311 fsf->req_id, 0);
795} 312}
796 313
797/** 314/**
798 * zfcp_dbf_san_incoming_els - trace event for incomig ELS 315 * zfcp_dbf_san_in_els - trace event for incoming ELS
799 * @fsf_req: request containing unsolicited status buffer with incoming ELS 316 * @tag: indentifier for event
317 * @fsf_req: request containing issued CT data
800 */ 318 */
801void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) 319void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
802{ 320{
803 struct fsf_status_read_buffer *buf = 321 struct zfcp_dbf *dbf = fsf->adapter->dbf;
804 (struct fsf_status_read_buffer *)fsf_req->data; 322 struct fsf_status_read_buffer *srb =
805 int length = (int)buf->length - 323 (struct fsf_status_read_buffer *) fsf->data;
806 (int)((void *)&buf->payload - (void *)buf); 324 u16 length;
807 325
808 zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id), 326 length = (u16)(srb->length -
809 (void *)buf->payload.data, length); 327 offsetof(struct fsf_status_read_buffer, payload));
810} 328 zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
811 329 fsf->req_id, ntoh24(srb->d_id));
812static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
813 char *out_buf, const char *in_buf)
814{
815 struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf;
816 char *p = out_buf;
817
818 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
819 return 0;
820
821 zfcp_dbf_tag(&p, "tag", r->tag);
822 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
823 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
824
825 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
826 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
827 zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id);
828 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
829 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
830 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
831 zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype);
832 zfcp_dbf_out(&p, "options", "0x%02x", ct->options);
833 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
834 } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
835 struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp;
836 zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code);
837 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
838 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
839 zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
840 zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
841 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
842 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
843 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
844 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
845 struct zfcp_dbf_san_record_els *els = &r->u.els;
846 zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id);
847 }
848 return p - out_buf;
849} 330}
850 331
851static struct debug_view zfcp_dbf_san_view = { 332/**
852 .name = "structured", 333 * zfcp_dbf_scsi - trace event for scsi commands
853 .header_proc = zfcp_dbf_view_header, 334 * @tag: identifier for event
854 .format_proc = zfcp_dbf_san_view_format, 335 * @sc: pointer to struct scsi_cmnd
855}; 336 * @fsf: pointer to struct zfcp_fsf_req
856 337 */
857void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, 338void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
858 struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd,
859 struct zfcp_fsf_req *fsf_req, unsigned long old_req_id)
860{ 339{
861 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; 340 struct zfcp_adapter *adapter =
862 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 341 (struct zfcp_adapter *) sc->device->host->hostdata[0];
863 unsigned long flags; 342 struct zfcp_dbf *dbf = adapter->dbf;
343 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
864 struct fcp_resp_with_ext *fcp_rsp; 344 struct fcp_resp_with_ext *fcp_rsp;
865 struct fcp_resp_rsp_info *fcp_rsp_info = NULL; 345 struct fcp_resp_rsp_info *fcp_rsp_info;
866 char *fcp_sns_info = NULL; 346 unsigned long flags;
867 int offset = 0, buflen = 0;
868 347
869 spin_lock_irqsave(&dbf->scsi_lock, flags); 348 spin_lock_irqsave(&dbf->scsi_lock, flags);
870 do { 349 memset(rec, 0, sizeof(*rec));
871 memset(rec, 0, sizeof(*rec));
872 if (offset == 0) {
873 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
874 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
875 if (scsi_cmnd != NULL) {
876 if (scsi_cmnd->device) {
877 rec->scsi_id = scsi_cmnd->device->id;
878 rec->scsi_lun = scsi_cmnd->device->lun;
879 }
880 rec->scsi_result = scsi_cmnd->result;
881 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
882 memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
883 min((int)scsi_cmnd->cmd_len,
884 ZFCP_DBF_SCSI_OPCODE));
885 rec->scsi_retries = scsi_cmnd->retries;
886 rec->scsi_allowed = scsi_cmnd->allowed;
887 }
888 if (fsf_req != NULL) {
889 fcp_rsp = (struct fcp_resp_with_ext *)
890 &(fsf_req->qtcb->bottom.io.fcp_rsp);
891 fcp_rsp_info = (struct fcp_resp_rsp_info *)
892 &fcp_rsp[1];
893 fcp_sns_info = (char *) &fcp_rsp[1];
894 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
895 fcp_sns_info += fcp_rsp->ext.fr_sns_len;
896
897 rec->rsp_validity = fcp_rsp->resp.fr_flags;
898 rec->rsp_scsi_status = fcp_rsp->resp.fr_status;
899 rec->rsp_resid = fcp_rsp->ext.fr_resid;
900 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
901 rec->rsp_code = fcp_rsp_info->rsp_code;
902 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
903 buflen = min(fcp_rsp->ext.fr_sns_len,
904 (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
905 rec->sns_info_len = buflen;
906 memcpy(rec->sns_info, fcp_sns_info,
907 min(buflen,
908 ZFCP_DBF_SCSI_FCP_SNS_INFO));
909 offset += min(buflen,
910 ZFCP_DBF_SCSI_FCP_SNS_INFO);
911 }
912
913 rec->fsf_reqid = fsf_req->req_id;
914 rec->fsf_seqno = fsf_req->seq_no;
915 rec->fsf_issued = fsf_req->issued;
916 }
917 rec->old_fsf_reqid = old_req_id;
918 } else {
919 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
920 dump->total_size = buflen;
921 dump->offset = offset;
922 dump->size = min(buflen - offset,
923 (int)sizeof(struct
924 zfcp_dbf_scsi_record) -
925 (int)sizeof(struct zfcp_dbf_dump));
926 memcpy(dump->data, fcp_sns_info + offset, dump->size);
927 offset += dump->size;
928 }
929 debug_event(dbf->scsi, level, rec, sizeof(*rec));
930 } while (offset < buflen);
931 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
932}
933 350
934static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, 351 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
935 char *out_buf, const char *in_buf) 352 rec->id = ZFCP_DBF_SCSI_CMND;
936{ 353 rec->scsi_result = sc->result;
937 struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf; 354 rec->scsi_retries = sc->retries;
938 struct timespec t; 355 rec->scsi_allowed = sc->allowed;
939 char *p = out_buf; 356 rec->scsi_id = sc->device->id;
940 357 rec->scsi_lun = sc->device->lun;
941 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) 358 rec->host_scribble = (unsigned long)sc->host_scribble;
942 return 0; 359
943 360 memcpy(rec->scsi_opcode, sc->cmnd,
944 zfcp_dbf_tag(&p, "tag", r->tag); 361 min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
945 zfcp_dbf_tag(&p, "tag2", r->tag2); 362
946 zfcp_dbf_out(&p, "scsi_id", "0x%08x", r->scsi_id); 363 if (fsf) {
947 zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); 364 rec->fsf_req_id = fsf->req_id;
948 zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); 365 fcp_rsp = (struct fcp_resp_with_ext *)
949 zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); 366 &(fsf->qtcb->bottom.io.fcp_rsp);
950 zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, 367 memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
951 0, ZFCP_DBF_SCSI_OPCODE); 368 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
952 zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); 369 fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
953 zfcp_dbf_out(&p, "scsi_allowed", "0x%02x", r->scsi_allowed); 370 rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
954 if (strncmp(r->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) 371 }
955 zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); 372 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
956 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); 373 rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
957 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); 374 (u16)ZFCP_DBF_PAY_MAX_REC);
958 stck_to_timespec(r->fsf_issued, &t); 375 zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
959 zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); 376 "fcp_sns", fsf->req_id);
960 377 }
961 if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
962 zfcp_dbf_out(&p, "fcp_rsp_validity", "0x%02x", r->rsp_validity);
963 zfcp_dbf_out(&p, "fcp_rsp_scsi_status", "0x%02x",
964 r->rsp_scsi_status);
965 zfcp_dbf_out(&p, "fcp_rsp_resid", "0x%08x", r->rsp_resid);
966 zfcp_dbf_out(&p, "fcp_rsp_code", "0x%08x", r->rsp_code);
967 zfcp_dbf_out(&p, "fcp_sns_info_len", "0x%08x", r->sns_info_len);
968 zfcp_dbf_outd(&p, "fcp_sns_info", r->sns_info,
969 min((int)r->sns_info_len,
970 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
971 r->sns_info_len);
972 } 378 }
973 p += sprintf(p, "\n");
974 return p - out_buf;
975}
976 379
977static struct debug_view zfcp_dbf_scsi_view = { 380 debug_event(dbf->scsi, 1, rec, sizeof(*rec));
978 .name = "structured", 381 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
979 .header_proc = zfcp_dbf_view_header, 382}
980 .format_proc = zfcp_dbf_scsi_view_format,
981};
982 383
983static debug_info_t *zfcp_dbf_reg(const char *name, int level, 384static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
984 struct debug_view *view, int size)
985{ 385{
986 struct debug_info *d; 386 struct debug_info *d;
987 387
988 d = debug_register(name, dbfsize, level, size); 388 d = debug_register(name, size, 1, rec_size);
989 if (!d) 389 if (!d)
990 return NULL; 390 return NULL;
991 391
992 debug_register_view(d, &debug_hex_ascii_view); 392 debug_register_view(d, &debug_hex_ascii_view);
993 debug_register_view(d, view); 393 debug_set_level(d, 3);
994 debug_set_level(d, level);
995 394
996 return d; 395 return d;
997} 396}
998 397
398static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
399{
400 if (!dbf)
401 return;
402
403 debug_unregister(dbf->scsi);
404 debug_unregister(dbf->san);
405 debug_unregister(dbf->hba);
406 debug_unregister(dbf->pay);
407 debug_unregister(dbf->rec);
408 kfree(dbf);
409}
410
999/** 411/**
1000 * zfcp_adapter_debug_register - registers debug feature for an adapter 412 * zfcp_adapter_debug_register - registers debug feature for an adapter
1001 * @adapter: pointer to adapter for which debug features should be registered 413 * @adapter: pointer to adapter for which debug features should be registered
@@ -1003,69 +415,66 @@ static debug_info_t *zfcp_dbf_reg(const char *name, int level,
1003 */ 415 */
1004int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) 416int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
1005{ 417{
1006 char dbf_name[DEBUG_MAX_NAME_LEN]; 418 char name[DEBUG_MAX_NAME_LEN];
1007 struct zfcp_dbf *dbf; 419 struct zfcp_dbf *dbf;
1008 420
1009 dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); 421 dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
1010 if (!dbf) 422 if (!dbf)
1011 return -ENOMEM; 423 return -ENOMEM;
1012 424
1013 dbf->adapter = adapter; 425 spin_lock_init(&dbf->pay_lock);
1014
1015 spin_lock_init(&dbf->hba_lock); 426 spin_lock_init(&dbf->hba_lock);
1016 spin_lock_init(&dbf->san_lock); 427 spin_lock_init(&dbf->san_lock);
1017 spin_lock_init(&dbf->scsi_lock); 428 spin_lock_init(&dbf->scsi_lock);
1018 spin_lock_init(&dbf->rec_lock); 429 spin_lock_init(&dbf->rec_lock);
1019 430
1020 /* debug feature area which records recovery activity */ 431 /* debug feature area which records recovery activity */
1021 sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); 432 sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
1022 dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view, 433 dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
1023 sizeof(struct zfcp_dbf_rec_record));
1024 if (!dbf->rec) 434 if (!dbf->rec)
1025 goto err_out; 435 goto err_out;
1026 436
1027 /* debug feature area which records HBA (FSF and QDIO) conditions */ 437 /* debug feature area which records HBA (FSF and QDIO) conditions */
1028 sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); 438 sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
1029 dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view, 439 dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
1030 sizeof(struct zfcp_dbf_hba_record));
1031 if (!dbf->hba) 440 if (!dbf->hba)
1032 goto err_out; 441 goto err_out;
1033 442
443 /* debug feature area which records payload info */
444 sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
445 dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
446 if (!dbf->pay)
447 goto err_out;
448
1034 /* debug feature area which records SAN command failures and recovery */ 449 /* debug feature area which records SAN command failures and recovery */
1035 sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); 450 sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
1036 dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view, 451 dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
1037 sizeof(struct zfcp_dbf_san_record));
1038 if (!dbf->san) 452 if (!dbf->san)
1039 goto err_out; 453 goto err_out;
1040 454
1041 /* debug feature area which records SCSI command failures and recovery */ 455 /* debug feature area which records SCSI command failures and recovery */
1042 sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); 456 sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
1043 dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view, 457 dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
1044 sizeof(struct zfcp_dbf_scsi_record));
1045 if (!dbf->scsi) 458 if (!dbf->scsi)
1046 goto err_out; 459 goto err_out;
1047 460
1048 adapter->dbf = dbf; 461 adapter->dbf = dbf;
1049 return 0;
1050 462
463 return 0;
1051err_out: 464err_out:
1052 zfcp_dbf_adapter_unregister(dbf); 465 zfcp_dbf_unregister(dbf);
1053 return -ENOMEM; 466 return -ENOMEM;
1054} 467}
1055 468
1056/** 469/**
1057 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter 470 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
1058 * @dbf: pointer to dbf for which debug features should be unregistered 471 * @adapter: pointer to adapter for which debug features should be unregistered
1059 */ 472 */
1060void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) 473void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
1061{ 474{
1062 if (!dbf) 475 struct zfcp_dbf *dbf = adapter->dbf;
1063 return; 476
1064 debug_unregister(dbf->scsi); 477 adapter->dbf = NULL;
1065 debug_unregister(dbf->san); 478 zfcp_dbf_unregister(dbf);
1066 debug_unregister(dbf->hba);
1067 debug_unregister(dbf->rec);
1068 dbf->adapter->dbf = NULL;
1069 kfree(dbf);
1070} 479}
1071 480
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 04081b1b62b4..714f087eb7a9 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -1,22 +1,8 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries. 3 * debug feature declarations
4 * 4 *
5 * Copyright IBM Corp. 2008, 2009 5 * Copyright IBM Corp. 2008, 2010
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 6 */
21 7
22#ifndef ZFCP_DBF_H 8#ifndef ZFCP_DBF_H
@@ -27,322 +13,350 @@
27#include "zfcp_fsf.h" 13#include "zfcp_fsf.h"
28#include "zfcp_def.h" 14#include "zfcp_def.h"
29 15
30#define ZFCP_DBF_TAG_SIZE 4 16#define ZFCP_DBF_TAG_LEN 7
31#define ZFCP_DBF_ID_SIZE 7
32 17
33#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull 18#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
34 19
35struct zfcp_dbf_dump { 20/**
36 u8 tag[ZFCP_DBF_TAG_SIZE]; 21 * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
37 u32 total_size; /* size of total dump data */ 22 * @ready: number of ready recovery actions
38 u32 offset; /* how much data has being already dumped */ 23 * @running: number of running recovery actions
39 u32 size; /* how much data comes with this record */ 24 * @want: wanted recovery action
40 u8 data[]; /* dump data */ 25 * @need: needed recovery action
41} __attribute__ ((packed)); 26 */
42 27struct zfcp_dbf_rec_trigger {
43struct zfcp_dbf_rec_record_thread {
44 u32 total;
45 u32 ready; 28 u32 ready;
46 u32 running; 29 u32 running;
47};
48
49struct zfcp_dbf_rec_record_target {
50 u64 ref;
51 u32 status;
52 u32 d_id;
53 u64 wwpn;
54 u64 fcp_lun;
55 u32 erp_count;
56};
57
58struct zfcp_dbf_rec_record_trigger {
59 u8 want; 30 u8 want;
60 u8 need; 31 u8 need;
61 u32 as; 32} __packed;
62 u32 ps;
63 u32 ls;
64 u64 ref;
65 u64 action;
66 u64 wwpn;
67 u64 fcp_lun;
68};
69 33
70struct zfcp_dbf_rec_record_action { 34/**
71 u32 status; 35 * struct zfcp_dbf_rec_running - trace record for running recovery
72 u32 step; 36 * @fsf_req_id: request id for fsf requests
73 u64 action; 37 * @rec_status: status of the fsf request
74 u64 fsf_req; 38 * @rec_step: current step of the recovery action
39 * rec_count: recovery counter
40 */
41struct zfcp_dbf_rec_running {
42 u64 fsf_req_id;
43 u32 rec_status;
44 u16 rec_step;
45 u8 rec_action;
46 u8 rec_count;
47} __packed;
48
49/**
50 * enum zfcp_dbf_rec_id - recovery trace record id
51 * @ZFCP_DBF_REC_TRIG: triggered recovery identifier
52 * @ZFCP_DBF_REC_RUN: running recovery identifier
53 */
54enum zfcp_dbf_rec_id {
55 ZFCP_DBF_REC_TRIG = 1,
56 ZFCP_DBF_REC_RUN = 2,
75}; 57};
76 58
77struct zfcp_dbf_rec_record { 59/**
60 * struct zfcp_dbf_rec - trace record for error recovery actions
61 * @id: unique number of recovery record type
62 * @tag: identifier string specifying the location of initiation
63 * @lun: logical unit number
64 * @wwpn: word wide port number
65 * @d_id: destination ID
66 * @adapter_status: current status of the adapter
67 * @port_status: current status of the port
68 * @lun_status: current status of the lun
69 * @u.trig: structure zfcp_dbf_rec_trigger
70 * @u.run: structure zfcp_dbf_rec_running
71 */
72struct zfcp_dbf_rec {
78 u8 id; 73 u8 id;
79 char id2[7]; 74 char tag[ZFCP_DBF_TAG_LEN];
75 u64 lun;
76 u64 wwpn;
77 u32 d_id;
78 u32 adapter_status;
79 u32 port_status;
80 u32 lun_status;
80 union { 81 union {
81 struct zfcp_dbf_rec_record_action action; 82 struct zfcp_dbf_rec_trigger trig;
82 struct zfcp_dbf_rec_record_thread thread; 83 struct zfcp_dbf_rec_running run;
83 struct zfcp_dbf_rec_record_target target;
84 struct zfcp_dbf_rec_record_trigger trigger;
85 } u; 84 } u;
86}; 85} __packed;
87 86
88enum { 87/**
89 ZFCP_REC_DBF_ID_ACTION, 88 * enum zfcp_dbf_san_id - SAN trace record identifier
90 ZFCP_REC_DBF_ID_THREAD, 89 * @ZFCP_DBF_SAN_REQ: request trace record id
91 ZFCP_REC_DBF_ID_TARGET, 90 * @ZFCP_DBF_SAN_RES: response trace record id
92 ZFCP_REC_DBF_ID_TRIGGER, 91 * @ZFCP_DBF_SAN_ELS: extended link service record id
92 */
93enum zfcp_dbf_san_id {
94 ZFCP_DBF_SAN_REQ = 1,
95 ZFCP_DBF_SAN_RES = 2,
96 ZFCP_DBF_SAN_ELS = 3,
93}; 97};
94 98
95struct zfcp_dbf_hba_record_response { 99/** struct zfcp_dbf_san - trace record for SAN requests and responses
96 u32 fsf_command; 100 * @id: unique number of recovery record type
97 u64 fsf_reqid; 101 * @tag: identifier string specifying the location of initiation
98 u32 fsf_seqno; 102 * @fsf_req_id: request id for fsf requests
99 u64 fsf_issued; 103 * @payload: unformatted information related to request/response
100 u32 fsf_prot_status; 104 * @d_id: destination id
105 */
106struct zfcp_dbf_san {
107 u8 id;
108 char tag[ZFCP_DBF_TAG_LEN];
109 u64 fsf_req_id;
110 u32 d_id;
111#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
112 char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
113} __packed;
114
115/**
116 * struct zfcp_dbf_hba_res - trace record for hba responses
117 * @req_issued: timestamp when request was issued
118 * @prot_status: protocol status
119 * @prot_status_qual: protocol status qualifier
120 * @fsf_status: fsf status
121 * @fsf_status_qual: fsf status qualifier
122 */
123struct zfcp_dbf_hba_res {
124 u64 req_issued;
125 u32 prot_status;
126 u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
101 u32 fsf_status; 127 u32 fsf_status;
102 u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; 128 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
103 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; 129} __packed;
104 u32 fsf_req_status;
105 u8 sbal_first;
106 u8 sbal_last;
107 u8 sbal_response;
108 u8 pool;
109 u64 erp_action;
110 union {
111 struct {
112 u64 cmnd;
113 u32 data_dir;
114 } fcp;
115 struct {
116 u64 wwpn;
117 u32 d_id;
118 u32 port_handle;
119 } port;
120 struct {
121 u64 wwpn;
122 u64 fcp_lun;
123 u32 port_handle;
124 u32 lun_handle;
125 } unit;
126 struct {
127 u32 d_id;
128 } els;
129 } u;
130} __attribute__ ((packed));
131 130
132struct zfcp_dbf_hba_record_status { 131/**
133 u8 failed; 132 * struct zfcp_dbf_hba_uss - trace record for unsolicited status
133 * @status_type: type of unsolicited status
134 * @status_subtype: subtype of unsolicited status
135 * @d_id: destination ID
136 * @lun: logical unit number
137 * @queue_designator: queue designator
138 */
139struct zfcp_dbf_hba_uss {
134 u32 status_type; 140 u32 status_type;
135 u32 status_subtype; 141 u32 status_subtype;
136 struct fsf_queue_designator
137 queue_designator;
138 u32 payload_size;
139#define ZFCP_DBF_UNSOL_PAYLOAD 80
140#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32
141#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56
142#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32)
143 u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
144} __attribute__ ((packed));
145
146struct zfcp_dbf_hba_record_qdio {
147 u32 qdio_error;
148 u8 sbal_index;
149 u8 sbal_count;
150} __attribute__ ((packed));
151
152struct zfcp_dbf_hba_record {
153 u8 tag[ZFCP_DBF_TAG_SIZE];
154 u8 tag2[ZFCP_DBF_TAG_SIZE];
155 union {
156 struct zfcp_dbf_hba_record_response response;
157 struct zfcp_dbf_hba_record_status status;
158 struct zfcp_dbf_hba_record_qdio qdio;
159 struct fsf_bit_error_payload berr;
160 } u;
161} __attribute__ ((packed));
162
163struct zfcp_dbf_san_record_ct_request {
164 u16 cmd_req_code;
165 u8 revision;
166 u8 gs_type;
167 u8 gs_subtype;
168 u8 options;
169 u16 max_res_size;
170 u32 len;
171 u32 d_id;
172} __attribute__ ((packed));
173
174struct zfcp_dbf_san_record_ct_response {
175 u16 cmd_rsp_code;
176 u8 revision;
177 u8 reason_code;
178 u8 expl;
179 u8 vendor_unique;
180 u16 max_res_size;
181 u32 len;
182} __attribute__ ((packed));
183
184struct zfcp_dbf_san_record_els {
185 u32 d_id; 142 u32 d_id;
186} __attribute__ ((packed)); 143 u64 lun;
144 u64 queue_designator;
145} __packed;
187 146
188struct zfcp_dbf_san_record { 147/**
189 u8 tag[ZFCP_DBF_TAG_SIZE]; 148 * enum zfcp_dbf_hba_id - HBA trace record identifier
190 u64 fsf_reqid; 149 * @ZFCP_DBF_HBA_RES: response trace record
191 u32 fsf_seqno; 150 * @ZFCP_DBF_HBA_USS: unsolicited status trace record
151 * @ZFCP_DBF_HBA_BIT: bit error trace record
152 */
153enum zfcp_dbf_hba_id {
154 ZFCP_DBF_HBA_RES = 1,
155 ZFCP_DBF_HBA_USS = 2,
156 ZFCP_DBF_HBA_BIT = 3,
157};
158
159/**
160 * struct zfcp_dbf_hba - common trace record for HBA records
161 * @id: unique number of recovery record type
162 * @tag: identifier string specifying the location of initiation
163 * @fsf_req_id: request id for fsf requests
164 * @fsf_req_status: status of fsf request
165 * @fsf_cmd: fsf command
166 * @fsf_seq_no: fsf sequence number
167 * @pl_len: length of payload stored as zfcp_dbf_pay
168 * @u: record type specific data
169 */
170struct zfcp_dbf_hba {
171 u8 id;
172 char tag[ZFCP_DBF_TAG_LEN];
173 u64 fsf_req_id;
174 u32 fsf_req_status;
175 u32 fsf_cmd;
176 u32 fsf_seq_no;
177 u16 pl_len;
192 union { 178 union {
193 struct zfcp_dbf_san_record_ct_request ct_req; 179 struct zfcp_dbf_hba_res res;
194 struct zfcp_dbf_san_record_ct_response ct_resp; 180 struct zfcp_dbf_hba_uss uss;
195 struct zfcp_dbf_san_record_els els; 181 struct fsf_bit_error_payload be;
196 } u; 182 } u;
197} __attribute__ ((packed)); 183} __packed;
198 184
199#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 185/**
186 * enum zfcp_dbf_scsi_id - scsi trace record identifier
187 * @ZFCP_DBF_SCSI_CMND: scsi command trace record
188 */
189enum zfcp_dbf_scsi_id {
190 ZFCP_DBF_SCSI_CMND = 1,
191};
200 192
201struct zfcp_dbf_scsi_record { 193/**
202 u8 tag[ZFCP_DBF_TAG_SIZE]; 194 * struct zfcp_dbf_scsi - common trace record for SCSI records
203 u8 tag2[ZFCP_DBF_TAG_SIZE]; 195 * @id: unique number of recovery record type
196 * @tag: identifier string specifying the location of initiation
197 * @scsi_id: scsi device id
198 * @scsi_lun: scsi device logical unit number
199 * @scsi_result: scsi result
200 * @scsi_retries: current retry number of scsi request
201 * @scsi_allowed: allowed retries
202 * @fcp_rsp_info: FCP response info
203 * @scsi_opcode: scsi opcode
204 * @fsf_req_id: request id of fsf request
205 * @host_scribble: LLD specific data attached to SCSI request
206 * @pl_len: length of paload stored as zfcp_dbf_pay
207 * @fsf_rsp: response for fsf request
208 */
209struct zfcp_dbf_scsi {
210 u8 id;
211 char tag[ZFCP_DBF_TAG_LEN];
204 u32 scsi_id; 212 u32 scsi_id;
205 u32 scsi_lun; 213 u32 scsi_lun;
206 u32 scsi_result; 214 u32 scsi_result;
207 u64 scsi_cmnd;
208#define ZFCP_DBF_SCSI_OPCODE 16
209 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
210 u8 scsi_retries; 215 u8 scsi_retries;
211 u8 scsi_allowed; 216 u8 scsi_allowed;
212 u64 fsf_reqid; 217 u8 fcp_rsp_info;
213 u32 fsf_seqno; 218#define ZFCP_DBF_SCSI_OPCODE 16
214 u64 fsf_issued; 219 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
215 u64 old_fsf_reqid; 220 u64 fsf_req_id;
216 u8 rsp_validity; 221 u64 host_scribble;
217 u8 rsp_scsi_status; 222 u16 pl_len;
218 u32 rsp_resid; 223 struct fcp_resp_with_ext fcp_rsp;
219 u8 rsp_code; 224} __packed;
220#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16
221#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256
222 u32 sns_info_len;
223 u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
224} __attribute__ ((packed));
225 225
226/**
227 * struct zfcp_dbf_pay - trace record for unformatted payload information
228 * @area: area this record is originated from
229 * @counter: ascending record number
230 * @fsf_req_id: request id of fsf request
231 * @data: unformatted data
232 */
233struct zfcp_dbf_pay {
234 u8 counter;
235 char area[ZFCP_DBF_TAG_LEN];
236 u64 fsf_req_id;
237#define ZFCP_DBF_PAY_MAX_REC 0x100
238 char data[ZFCP_DBF_PAY_MAX_REC];
239} __packed;
240
241/**
242 * struct zfcp_dbf - main dbf trace structure
243 * @pay: reference to payload trace area
244 * @rec: reference to recovery trace area
245 * @hba: reference to hba trace area
246 * @san: reference to san trace area
247 * @scsi: reference to scsi trace area
248 * @pay_lock: lock protecting payload trace buffer
249 * @rec_lock: lock protecting recovery trace buffer
250 * @hba_lock: lock protecting hba trace buffer
251 * @san_lock: lock protecting san trace buffer
252 * @scsi_lock: lock protecting scsi trace buffer
253 * @pay_buf: pre-allocated buffer for payload
254 * @rec_buf: pre-allocated buffer for recovery
255 * @hba_buf: pre-allocated buffer for hba
256 * @san_buf: pre-allocated buffer for san
257 * @scsi_buf: pre-allocated buffer for scsi
258 */
226struct zfcp_dbf { 259struct zfcp_dbf {
260 debug_info_t *pay;
227 debug_info_t *rec; 261 debug_info_t *rec;
228 debug_info_t *hba; 262 debug_info_t *hba;
229 debug_info_t *san; 263 debug_info_t *san;
230 debug_info_t *scsi; 264 debug_info_t *scsi;
265 spinlock_t pay_lock;
231 spinlock_t rec_lock; 266 spinlock_t rec_lock;
232 spinlock_t hba_lock; 267 spinlock_t hba_lock;
233 spinlock_t san_lock; 268 spinlock_t san_lock;
234 spinlock_t scsi_lock; 269 spinlock_t scsi_lock;
235 struct zfcp_dbf_rec_record rec_buf; 270 struct zfcp_dbf_pay pay_buf;
236 struct zfcp_dbf_hba_record hba_buf; 271 struct zfcp_dbf_rec rec_buf;
237 struct zfcp_dbf_san_record san_buf; 272 struct zfcp_dbf_hba hba_buf;
238 struct zfcp_dbf_scsi_record scsi_buf; 273 struct zfcp_dbf_san san_buf;
239 struct zfcp_adapter *adapter; 274 struct zfcp_dbf_scsi scsi_buf;
240}; 275};
241 276
242static inline 277static inline
243void zfcp_dbf_hba_fsf_resp(const char *tag2, int level, 278void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
244 struct zfcp_fsf_req *req, struct zfcp_dbf *dbf)
245{ 279{
246 if (level <= dbf->hba->level) 280 if (level <= req->adapter->dbf->hba->level)
247 _zfcp_dbf_hba_fsf_response(tag2, level, req, dbf); 281 zfcp_dbf_hba_fsf_res(tag, req);
248} 282}
249 283
250/** 284/**
251 * zfcp_dbf_hba_fsf_response - trace event for request completion 285 * zfcp_dbf_hba_fsf_response - trace event for request completion
252 * @fsf_req: request that has been completed 286 * @req: request that has been completed
253 */ 287 */
254static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) 288static inline
289void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
255{ 290{
256 struct zfcp_dbf *dbf = req->adapter->dbf;
257 struct fsf_qtcb *qtcb = req->qtcb; 291 struct fsf_qtcb *qtcb = req->qtcb;
258 292
259 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && 293 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
260 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { 294 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
261 zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf); 295 zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
262 296
263 } else if (qtcb->header.fsf_status != FSF_GOOD) { 297 } else if (qtcb->header.fsf_status != FSF_GOOD) {
264 zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf); 298 zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
265 299
266 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || 300 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
267 (req->fsf_command == FSF_QTCB_OPEN_LUN)) { 301 (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
268 zfcp_dbf_hba_fsf_resp("open", 4, req, dbf); 302 zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
269 303
270 } else if (qtcb->header.log_length) { 304 } else if (qtcb->header.log_length) {
271 zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf); 305 zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
272 306
273 } else { 307 } else {
274 zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf); 308 zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
275 } 309 }
276 }
277
278/**
279 * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer
280 * @tag: tag indicating which kind of unsolicited status has been received
281 * @dbf: reference to dbf structure
282 * @status_buffer: buffer containing payload of unsolicited status
283 */
284static inline
285void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf,
286 struct fsf_status_read_buffer *buf)
287{
288 int level = 2;
289
290 if (level <= dbf->hba->level)
291 _zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf);
292} 310}
293 311
294static inline 312static inline
295void zfcp_dbf_scsi(const char *tag, const char *tag2, int level, 313void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
296 struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, 314 struct zfcp_fsf_req *req)
297 struct zfcp_fsf_req *req, unsigned long old_id)
298{ 315{
299 if (level <= dbf->scsi->level) 316 struct zfcp_adapter *adapter = (struct zfcp_adapter *)
300 _zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id); 317 scmd->device->host->hostdata[0];
318
319 if (level <= adapter->dbf->scsi->level)
320 zfcp_dbf_scsi(tag, scmd, req);
301} 321}
302 322
303/** 323/**
304 * zfcp_dbf_scsi_result - trace event for SCSI command completion 324 * zfcp_dbf_scsi_result - trace event for SCSI command completion
305 * @dbf: adapter dbf trace
306 * @scmd: SCSI command pointer 325 * @scmd: SCSI command pointer
307 * @req: FSF request used to issue SCSI command 326 * @req: FSF request used to issue SCSI command
308 */ 327 */
309static inline 328static inline
310void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, 329void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
311 struct zfcp_fsf_req *req)
312{ 330{
313 if (scmd->result != 0) 331 if (scmd->result != 0)
314 zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0); 332 _zfcp_dbf_scsi("rsl_err", 3, scmd, req);
315 else if (scmd->retries > 0) 333 else if (scmd->retries > 0)
316 zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0); 334 _zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
317 else 335 else
318 zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0); 336 _zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
319} 337}
320 338
321/** 339/**
322 * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command 340 * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
323 * @dbf: adapter dbf trace
324 * @scmd: SCSI command pointer 341 * @scmd: SCSI command pointer
325 */ 342 */
326static inline 343static inline
327void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd) 344void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
328{ 345{
329 zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0); 346 _zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
330} 347}
331 348
332/** 349/**
333 * zfcp_dbf_scsi_abort - trace event for SCSI command abort 350 * zfcp_dbf_scsi_abort - trace event for SCSI command abort
334 * @tag: tag indicating success or failure of abort operation 351 * @tag: tag indicating success or failure of abort operation
335 * @adapter: adapter thas has been used to issue SCSI command to be aborted
336 * @scmd: SCSI command to be aborted 352 * @scmd: SCSI command to be aborted
337 * @new_req: request containing abort (might be NULL) 353 * @fsf_req: request containing abort (might be NULL)
338 * @old_id: identifier of request containg SCSI command to be aborted
339 */ 354 */
340static inline 355static inline
341void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, 356void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
342 struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req, 357 struct zfcp_fsf_req *fsf_req)
343 unsigned long old_id)
344{ 358{
345 zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id); 359 _zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
346} 360}
347 361
348/** 362/**
@@ -352,12 +366,17 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
352 * @flag: indicates type of reset (Target Reset, Logical Unit Reset) 366 * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
353 */ 367 */
354static inline 368static inline
355void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag) 369void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
356{ 370{
357 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); 371 char tmp_tag[ZFCP_DBF_TAG_LEN];
372
373 if (flag == FCP_TMF_TGT_RESET)
374 memcpy(tmp_tag, "tr_", 3);
375 else
376 memcpy(tmp_tag, "lr_", 3);
358 377
359 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, 378 memcpy(&tmp_tag[3], tag, 4);
360 zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0); 379 _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
361} 380}
362 381
363#endif /* ZFCP_DBF_H */ 382#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 0bcd5806bd9a..e003e306f870 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -76,9 +76,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
76 struct zfcp_adapter *adapter = act->adapter; 76 struct zfcp_adapter *adapter = act->adapter;
77 77
78 list_move(&act->list, &act->adapter->erp_ready_head); 78 list_move(&act->list, &act->adapter->erp_ready_head);
79 zfcp_dbf_rec_action("erardy1", act); 79 zfcp_dbf_rec_run("erardy1", act);
80 wake_up(&adapter->erp_ready_wq); 80 wake_up(&adapter->erp_ready_wq);
81 zfcp_dbf_rec_thread("erardy2", adapter->dbf); 81 zfcp_dbf_rec_run("erardy2", act);
82} 82}
83 83
84static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) 84static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -236,10 +236,10 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
236static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, 236static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
237 struct zfcp_port *port, 237 struct zfcp_port *port,
238 struct scsi_device *sdev, 238 struct scsi_device *sdev,
239 char *id, void *ref, u32 act_status) 239 char *id, u32 act_status)
240{ 240{
241 int retval = 1, need; 241 int retval = 1, need;
242 struct zfcp_erp_action *act = NULL; 242 struct zfcp_erp_action *act;
243 243
244 if (!adapter->erp_thread) 244 if (!adapter->erp_thread)
245 return -EIO; 245 return -EIO;
@@ -255,15 +255,14 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
255 ++adapter->erp_total_count; 255 ++adapter->erp_total_count;
256 list_add_tail(&act->list, &adapter->erp_ready_head); 256 list_add_tail(&act->list, &adapter->erp_ready_head);
257 wake_up(&adapter->erp_ready_wq); 257 wake_up(&adapter->erp_ready_wq);
258 zfcp_dbf_rec_thread("eracte1", adapter->dbf);
259 retval = 0; 258 retval = 0;
260 out: 259 out:
261 zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev); 260 zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
262 return retval; 261 return retval;
263} 262}
264 263
265static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, 264static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
266 int clear_mask, char *id, void *ref) 265 int clear_mask, char *id)
267{ 266{
268 zfcp_erp_adapter_block(adapter, clear_mask); 267 zfcp_erp_adapter_block(adapter, clear_mask);
269 zfcp_scsi_schedule_rports_block(adapter); 268 zfcp_scsi_schedule_rports_block(adapter);
@@ -275,7 +274,7 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
275 return -EIO; 274 return -EIO;
276 } 275 }
277 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, 276 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
278 adapter, NULL, NULL, id, ref, 0); 277 adapter, NULL, NULL, id, 0);
279} 278}
280 279
281/** 280/**
@@ -283,10 +282,8 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
283 * @adapter: Adapter to reopen. 282 * @adapter: Adapter to reopen.
284 * @clear: Status flags to clear. 283 * @clear: Status flags to clear.
285 * @id: Id for debug trace event. 284 * @id: Id for debug trace event.
286 * @ref: Reference for debug trace event.
287 */ 285 */
288void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, 286void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
289 char *id, void *ref)
290{ 287{
291 unsigned long flags; 288 unsigned long flags;
292 289
@@ -299,7 +296,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
299 ZFCP_STATUS_COMMON_ERP_FAILED); 296 ZFCP_STATUS_COMMON_ERP_FAILED);
300 else 297 else
301 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, 298 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
302 NULL, NULL, id, ref, 0); 299 NULL, NULL, id, 0);
303 write_unlock_irqrestore(&adapter->erp_lock, flags); 300 write_unlock_irqrestore(&adapter->erp_lock, flags);
304} 301}
305 302
@@ -308,13 +305,12 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
308 * @adapter: Adapter to shut down. 305 * @adapter: Adapter to shut down.
309 * @clear: Status flags to clear. 306 * @clear: Status flags to clear.
310 * @id: Id for debug trace event. 307 * @id: Id for debug trace event.
311 * @ref: Reference for debug trace event.
312 */ 308 */
313void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, 309void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
314 char *id, void *ref) 310 char *id)
315{ 311{
316 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 312 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
317 zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref); 313 zfcp_erp_adapter_reopen(adapter, clear | flags, id);
318} 314}
319 315
320/** 316/**
@@ -322,13 +318,11 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
322 * @port: Port to shut down. 318 * @port: Port to shut down.
323 * @clear: Status flags to clear. 319 * @clear: Status flags to clear.
324 * @id: Id for debug trace event. 320 * @id: Id for debug trace event.
325 * @ref: Reference for debug trace event.
326 */ 321 */
327void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id, 322void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
328 void *ref)
329{ 323{
330 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 324 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
331 zfcp_erp_port_reopen(port, clear | flags, id, ref); 325 zfcp_erp_port_reopen(port, clear | flags, id);
332} 326}
333 327
334static void zfcp_erp_port_block(struct zfcp_port *port, int clear) 328static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
@@ -337,8 +331,8 @@ static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
337 ZFCP_STATUS_COMMON_UNBLOCKED | clear); 331 ZFCP_STATUS_COMMON_UNBLOCKED | clear);
338} 332}
339 333
340static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, 334static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
341 int clear, char *id, void *ref) 335 char *id)
342{ 336{
343 zfcp_erp_port_block(port, clear); 337 zfcp_erp_port_block(port, clear);
344 zfcp_scsi_schedule_rport_block(port); 338 zfcp_scsi_schedule_rport_block(port);
@@ -347,28 +341,26 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
347 return; 341 return;
348 342
349 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 343 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
350 port->adapter, port, NULL, id, ref, 0); 344 port->adapter, port, NULL, id, 0);
351} 345}
352 346
353/** 347/**
354 * zfcp_erp_port_forced_reopen - Forced close of port and open again 348 * zfcp_erp_port_forced_reopen - Forced close of port and open again
355 * @port: Port to force close and to reopen. 349 * @port: Port to force close and to reopen.
350 * @clear: Status flags to clear.
356 * @id: Id for debug trace event. 351 * @id: Id for debug trace event.
357 * @ref: Reference for debug trace event.
358 */ 352 */
359void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id, 353void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
360 void *ref)
361{ 354{
362 unsigned long flags; 355 unsigned long flags;
363 struct zfcp_adapter *adapter = port->adapter; 356 struct zfcp_adapter *adapter = port->adapter;
364 357
365 write_lock_irqsave(&adapter->erp_lock, flags); 358 write_lock_irqsave(&adapter->erp_lock, flags);
366 _zfcp_erp_port_forced_reopen(port, clear, id, ref); 359 _zfcp_erp_port_forced_reopen(port, clear, id);
367 write_unlock_irqrestore(&adapter->erp_lock, flags); 360 write_unlock_irqrestore(&adapter->erp_lock, flags);
368} 361}
369 362
370static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, 363static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
371 void *ref)
372{ 364{
373 zfcp_erp_port_block(port, clear); 365 zfcp_erp_port_block(port, clear);
374 zfcp_scsi_schedule_rport_block(port); 366 zfcp_scsi_schedule_rport_block(port);
@@ -380,24 +372,25 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
380 } 372 }
381 373
382 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, 374 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
383 port->adapter, port, NULL, id, ref, 0); 375 port->adapter, port, NULL, id, 0);
384} 376}
385 377
386/** 378/**
387 * zfcp_erp_port_reopen - trigger remote port recovery 379 * zfcp_erp_port_reopen - trigger remote port recovery
388 * @port: port to recover 380 * @port: port to recover
389 * @clear_mask: flags in port status to be cleared 381 * @clear_mask: flags in port status to be cleared
382 * @id: Id for debug trace event.
390 * 383 *
391 * Returns 0 if recovery has been triggered, < 0 if not. 384 * Returns 0 if recovery has been triggered, < 0 if not.
392 */ 385 */
393int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) 386int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
394{ 387{
395 int retval; 388 int retval;
396 unsigned long flags; 389 unsigned long flags;
397 struct zfcp_adapter *adapter = port->adapter; 390 struct zfcp_adapter *adapter = port->adapter;
398 391
399 write_lock_irqsave(&adapter->erp_lock, flags); 392 write_lock_irqsave(&adapter->erp_lock, flags);
400 retval = _zfcp_erp_port_reopen(port, clear, id, ref); 393 retval = _zfcp_erp_port_reopen(port, clear, id);
401 write_unlock_irqrestore(&adapter->erp_lock, flags); 394 write_unlock_irqrestore(&adapter->erp_lock, flags);
402 395
403 return retval; 396 return retval;
@@ -410,7 +403,7 @@ static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
410} 403}
411 404
412static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, 405static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
413 void *ref, u32 act_status) 406 u32 act_status)
414{ 407{
415 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 408 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
416 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 409 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -421,17 +414,18 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
421 return; 414 return;
422 415
423 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, 416 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
424 zfcp_sdev->port, sdev, id, ref, act_status); 417 zfcp_sdev->port, sdev, id, act_status);
425} 418}
426 419
427/** 420/**
428 * zfcp_erp_lun_reopen - initiate reopen of a LUN 421 * zfcp_erp_lun_reopen - initiate reopen of a LUN
429 * @sdev: SCSI device / LUN to be reopened 422 * @sdev: SCSI device / LUN to be reopened
430 * @clear_mask: specifies flags in LUN status to be cleared 423 * @clear_mask: specifies flags in LUN status to be cleared
424 * @id: Id for debug trace event.
425 *
431 * Return: 0 on success, < 0 on error 426 * Return: 0 on success, < 0 on error
432 */ 427 */
433void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, 428void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
434 void *ref)
435{ 429{
436 unsigned long flags; 430 unsigned long flags;
437 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 431 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -439,7 +433,7 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
439 struct zfcp_adapter *adapter = port->adapter; 433 struct zfcp_adapter *adapter = port->adapter;
440 434
441 write_lock_irqsave(&adapter->erp_lock, flags); 435 write_lock_irqsave(&adapter->erp_lock, flags);
442 _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0); 436 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
443 write_unlock_irqrestore(&adapter->erp_lock, flags); 437 write_unlock_irqrestore(&adapter->erp_lock, flags);
444} 438}
445 439
@@ -448,13 +442,11 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
448 * @sdev: SCSI device / LUN to shut down. 442 * @sdev: SCSI device / LUN to shut down.
449 * @clear: Status flags to clear. 443 * @clear: Status flags to clear.
450 * @id: Id for debug trace event. 444 * @id: Id for debug trace event.
451 * @ref: Reference for debug trace event.
452 */ 445 */
453void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id, 446void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
454 void *ref)
455{ 447{
456 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 448 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
457 zfcp_erp_lun_reopen(sdev, clear | flags, id, ref); 449 zfcp_erp_lun_reopen(sdev, clear | flags, id);
458} 450}
459 451
460/** 452/**
@@ -476,7 +468,7 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
476 int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 468 int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
477 469
478 write_lock_irqsave(&adapter->erp_lock, flags); 470 write_lock_irqsave(&adapter->erp_lock, flags);
479 _zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF); 471 _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
480 write_unlock_irqrestore(&adapter->erp_lock, flags); 472 write_unlock_irqrestore(&adapter->erp_lock, flags);
481 473
482 zfcp_erp_wait(adapter); 474 zfcp_erp_wait(adapter);
@@ -490,14 +482,14 @@ static int status_change_set(unsigned long mask, atomic_t *status)
490static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 482static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
491{ 483{
492 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) 484 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
493 zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf); 485 zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
494 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 486 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
495} 487}
496 488
497static void zfcp_erp_port_unblock(struct zfcp_port *port) 489static void zfcp_erp_port_unblock(struct zfcp_port *port)
498{ 490{
499 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) 491 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
500 zfcp_dbf_rec_port("erpubl1", NULL, port); 492 zfcp_dbf_rec_run("erpubl1", &port->erp_action);
501 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 493 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
502} 494}
503 495
@@ -506,14 +498,14 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
506 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 498 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
507 499
508 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) 500 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
509 zfcp_dbf_rec_lun("erlubl1", NULL, sdev); 501 zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
510 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); 502 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
511} 503}
512 504
513static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) 505static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
514{ 506{
515 list_move(&erp_action->list, &erp_action->adapter->erp_running_head); 507 list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
516 zfcp_dbf_rec_action("erator1", erp_action); 508 zfcp_dbf_rec_run("erator1", erp_action);
517} 509}
518 510
519static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) 511static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -530,11 +522,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
530 if (act->status & (ZFCP_STATUS_ERP_DISMISSED | 522 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
531 ZFCP_STATUS_ERP_TIMEDOUT)) { 523 ZFCP_STATUS_ERP_TIMEDOUT)) {
532 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 524 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
533 zfcp_dbf_rec_action("erscf_1", act); 525 zfcp_dbf_rec_run("erscf_1", act);
534 req->erp_action = NULL; 526 req->erp_action = NULL;
535 } 527 }
536 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 528 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
537 zfcp_dbf_rec_action("erscf_2", act); 529 zfcp_dbf_rec_run("erscf_2", act);
538 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) 530 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
539 act->fsf_req_id = 0; 531 act->fsf_req_id = 0;
540 } else 532 } else
@@ -585,40 +577,40 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
585} 577}
586 578
587static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, 579static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
588 int clear, char *id, void *ref) 580 int clear, char *id)
589{ 581{
590 struct zfcp_port *port; 582 struct zfcp_port *port;
591 583
592 read_lock(&adapter->port_list_lock); 584 read_lock(&adapter->port_list_lock);
593 list_for_each_entry(port, &adapter->port_list, list) 585 list_for_each_entry(port, &adapter->port_list, list)
594 _zfcp_erp_port_reopen(port, clear, id, ref); 586 _zfcp_erp_port_reopen(port, clear, id);
595 read_unlock(&adapter->port_list_lock); 587 read_unlock(&adapter->port_list_lock);
596} 588}
597 589
598static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, 590static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
599 char *id, void *ref) 591 char *id)
600{ 592{
601 struct scsi_device *sdev; 593 struct scsi_device *sdev;
602 594
603 shost_for_each_device(sdev, port->adapter->scsi_host) 595 shost_for_each_device(sdev, port->adapter->scsi_host)
604 if (sdev_to_zfcp(sdev)->port == port) 596 if (sdev_to_zfcp(sdev)->port == port)
605 _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0); 597 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
606} 598}
607 599
608static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 600static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
609{ 601{
610 switch (act->action) { 602 switch (act->action) {
611 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 603 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
612 _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL); 604 _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
613 break; 605 break;
614 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 606 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
615 _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL); 607 _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
616 break; 608 break;
617 case ZFCP_ERP_ACTION_REOPEN_PORT: 609 case ZFCP_ERP_ACTION_REOPEN_PORT:
618 _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); 610 _zfcp_erp_port_reopen(act->port, 0, "ersff_3");
619 break; 611 break;
620 case ZFCP_ERP_ACTION_REOPEN_LUN: 612 case ZFCP_ERP_ACTION_REOPEN_LUN:
621 _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0); 613 _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
622 break; 614 break;
623 } 615 }
624} 616}
@@ -627,13 +619,13 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
627{ 619{
628 switch (act->action) { 620 switch (act->action) {
629 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 621 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
630 _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL); 622 _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
631 break; 623 break;
632 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 624 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
633 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); 625 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
634 break; 626 break;
635 case ZFCP_ERP_ACTION_REOPEN_PORT: 627 case ZFCP_ERP_ACTION_REOPEN_PORT:
636 _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL); 628 _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
637 break; 629 break;
638 } 630 }
639} 631}
@@ -652,17 +644,6 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
652 read_unlock_irqrestore(&adapter->erp_lock, flags); 644 read_unlock_irqrestore(&adapter->erp_lock, flags);
653} 645}
654 646
655static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
656{
657 struct zfcp_qdio *qdio = act->adapter->qdio;
658
659 if (zfcp_qdio_open(qdio))
660 return ZFCP_ERP_FAILED;
661 init_waitqueue_head(&qdio->req_q_wq);
662 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
663 return ZFCP_ERP_SUCCEEDED;
664}
665
666static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) 647static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
667{ 648{
668 struct zfcp_port *port; 649 struct zfcp_port *port;
@@ -670,7 +651,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
670 adapter->peer_d_id); 651 adapter->peer_d_id);
671 if (IS_ERR(port)) /* error or port already attached */ 652 if (IS_ERR(port)) /* error or port already attached */
672 return; 653 return;
673 _zfcp_erp_port_reopen(port, 0, "ereptp1", NULL); 654 _zfcp_erp_port_reopen(port, 0, "ereptp1");
674} 655}
675 656
676static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) 657static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -693,10 +674,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
693 return ZFCP_ERP_FAILED; 674 return ZFCP_ERP_FAILED;
694 } 675 }
695 676
696 zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf);
697 wait_event(adapter->erp_ready_wq, 677 wait_event(adapter->erp_ready_wq,
698 !list_empty(&adapter->erp_ready_head)); 678 !list_empty(&adapter->erp_ready_head));
699 zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf);
700 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) 679 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
701 break; 680 break;
702 681
@@ -735,10 +714,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
735 if (ret) 714 if (ret)
736 return ZFCP_ERP_FAILED; 715 return ZFCP_ERP_FAILED;
737 716
738 zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); 717 zfcp_dbf_rec_run("erasox1", act);
739 wait_event(adapter->erp_ready_wq, 718 wait_event(adapter->erp_ready_wq,
740 !list_empty(&adapter->erp_ready_head)); 719 !list_empty(&adapter->erp_ready_head));
741 zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); 720 zfcp_dbf_rec_run("erasox2", act);
742 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 721 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
743 return ZFCP_ERP_FAILED; 722 return ZFCP_ERP_FAILED;
744 723
@@ -788,7 +767,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
788{ 767{
789 struct zfcp_adapter *adapter = act->adapter; 768 struct zfcp_adapter *adapter = act->adapter;
790 769
791 if (zfcp_erp_adapter_strategy_open_qdio(act)) { 770 if (zfcp_qdio_open(adapter->qdio)) {
792 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 771 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
793 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 772 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
794 &adapter->status); 773 &adapter->status);
@@ -1166,7 +1145,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1166 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { 1145 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
1167 _zfcp_erp_adapter_reopen(adapter, 1146 _zfcp_erp_adapter_reopen(adapter,
1168 ZFCP_STATUS_COMMON_ERP_FAILED, 1147 ZFCP_STATUS_COMMON_ERP_FAILED,
1169 "ersscg1", NULL); 1148 "ersscg1");
1170 return ZFCP_ERP_EXIT; 1149 return ZFCP_ERP_EXIT;
1171 } 1150 }
1172 break; 1151 break;
@@ -1176,7 +1155,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1176 if (zfcp_erp_strat_change_det(&port->status, erp_status)) { 1155 if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
1177 _zfcp_erp_port_reopen(port, 1156 _zfcp_erp_port_reopen(port,
1178 ZFCP_STATUS_COMMON_ERP_FAILED, 1157 ZFCP_STATUS_COMMON_ERP_FAILED,
1179 "ersscg2", NULL); 1158 "ersscg2");
1180 return ZFCP_ERP_EXIT; 1159 return ZFCP_ERP_EXIT;
1181 } 1160 }
1182 break; 1161 break;
@@ -1186,7 +1165,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1186 if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) { 1165 if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
1187 _zfcp_erp_lun_reopen(sdev, 1166 _zfcp_erp_lun_reopen(sdev,
1188 ZFCP_STATUS_COMMON_ERP_FAILED, 1167 ZFCP_STATUS_COMMON_ERP_FAILED,
1189 "ersscg3", NULL, 0); 1168 "ersscg3", 0);
1190 return ZFCP_ERP_EXIT; 1169 return ZFCP_ERP_EXIT;
1191 } 1170 }
1192 break; 1171 break;
@@ -1206,7 +1185,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1206 } 1185 }
1207 1186
1208 list_del(&erp_action->list); 1187 list_del(&erp_action->list);
1209 zfcp_dbf_rec_action("eractd1", erp_action); 1188 zfcp_dbf_rec_run("eractd1", erp_action);
1210 1189
1211 switch (erp_action->action) { 1190 switch (erp_action->action) {
1212 case ZFCP_ERP_ACTION_REOPEN_LUN: 1191 case ZFCP_ERP_ACTION_REOPEN_LUN:
@@ -1313,7 +1292,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1313 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM; 1292 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
1314 } 1293 }
1315 if (adapter->erp_total_count == adapter->erp_low_mem_count) 1294 if (adapter->erp_total_count == adapter->erp_low_mem_count)
1316 _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL); 1295 _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
1317 else { 1296 else {
1318 zfcp_erp_strategy_memwait(erp_action); 1297 zfcp_erp_strategy_memwait(erp_action);
1319 retval = ZFCP_ERP_CONTINUES; 1298 retval = ZFCP_ERP_CONTINUES;
@@ -1357,11 +1336,9 @@ static int zfcp_erp_thread(void *data)
1357 unsigned long flags; 1336 unsigned long flags;
1358 1337
1359 for (;;) { 1338 for (;;) {
1360 zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf);
1361 wait_event_interruptible(adapter->erp_ready_wq, 1339 wait_event_interruptible(adapter->erp_ready_wq,
1362 !list_empty(&adapter->erp_ready_head) || 1340 !list_empty(&adapter->erp_ready_head) ||
1363 kthread_should_stop()); 1341 kthread_should_stop());
1364 zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf);
1365 1342
1366 if (kthread_should_stop()) 1343 if (kthread_should_stop())
1367 break; 1344 break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bf8f3e514839..6e325284fbe7 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -45,47 +45,33 @@ extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
45 45
46/* zfcp_dbf.c */ 46/* zfcp_dbf.c */
47extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); 47extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
48extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *); 48extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
49extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); 49extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
50extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); 50 struct zfcp_port *, struct scsi_device *, u8, u8);
51extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); 51extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
52extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); 52extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
53extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *); 53extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
54extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, 54extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
55 struct zfcp_adapter *, struct zfcp_port *,
56 struct scsi_device *);
57extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
58extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
59 struct zfcp_dbf *);
60extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
61 struct fsf_status_read_buffer *);
62extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
63extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 55extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
64extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32); 56extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
65extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); 57extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
66extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); 58extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
67extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); 59extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
68extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *);
69extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
70 struct scsi_cmnd *, struct zfcp_fsf_req *,
71 unsigned long);
72 60
73/* zfcp_erp.c */ 61/* zfcp_erp.c */
74extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); 62extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
75extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); 63extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
76extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); 64extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
77extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, 65extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
78 void *);
79extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); 66extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
80extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); 67extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
81extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); 68extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
82extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); 69extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
83extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, 70extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
84 void *);
85extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); 71extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
86extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); 72extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
87extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *); 73extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
88extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *); 74extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
89extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *); 75extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
90extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 76extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
91extern void zfcp_erp_thread_kill(struct zfcp_adapter *); 77extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
@@ -149,6 +135,8 @@ extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
149extern int zfcp_qdio_open(struct zfcp_qdio *); 135extern int zfcp_qdio_open(struct zfcp_qdio *);
150extern void zfcp_qdio_close(struct zfcp_qdio *); 136extern void zfcp_qdio_close(struct zfcp_qdio *);
151extern void zfcp_qdio_siosl(struct zfcp_adapter *); 137extern void zfcp_qdio_siosl(struct zfcp_adapter *);
138extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
139 struct qdio_buffer *);
152 140
153/* zfcp_scsi.c */ 141/* zfcp_scsi.c */
154extern struct zfcp_data zfcp_data; 142extern struct zfcp_data zfcp_data;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 86fd905df48b..30cf91a787a3 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -174,7 +174,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
174 if (!port->d_id) 174 if (!port->d_id)
175 zfcp_erp_port_reopen(port, 175 zfcp_erp_port_reopen(port,
176 ZFCP_STATUS_COMMON_ERP_FAILED, 176 ZFCP_STATUS_COMMON_ERP_FAILED,
177 "fcrscn1", NULL); 177 "fcrscn1");
178 } 178 }
179 read_unlock_irqrestore(&adapter->port_list_lock, flags); 179 read_unlock_irqrestore(&adapter->port_list_lock, flags);
180} 180}
@@ -215,7 +215,7 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
215 read_lock_irqsave(&adapter->port_list_lock, flags); 215 read_lock_irqsave(&adapter->port_list_lock, flags);
216 list_for_each_entry(port, &adapter->port_list, list) 216 list_for_each_entry(port, &adapter->port_list, list)
217 if (port->wwpn == wwpn) { 217 if (port->wwpn == wwpn) {
218 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req); 218 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
219 break; 219 break;
220 } 220 }
221 read_unlock_irqrestore(&adapter->port_list_lock, flags); 221 read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -251,7 +251,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
251 (struct fsf_status_read_buffer *) fsf_req->data; 251 (struct fsf_status_read_buffer *) fsf_req->data;
252 unsigned int els_type = status_buffer->payload.data[0]; 252 unsigned int els_type = status_buffer->payload.data[0];
253 253
254 zfcp_dbf_san_incoming_els(fsf_req); 254 zfcp_dbf_san_in_els("fciels1", fsf_req);
255 if (els_type == ELS_PLOGI) 255 if (els_type == ELS_PLOGI)
256 zfcp_fc_incoming_plogi(fsf_req); 256 zfcp_fc_incoming_plogi(fsf_req);
257 else if (els_type == ELS_LOGO) 257 else if (els_type == ELS_LOGO)
@@ -360,7 +360,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
360 ret = zfcp_fc_ns_gid_pn(port); 360 ret = zfcp_fc_ns_gid_pn(port);
361 if (ret) { 361 if (ret) {
362 /* could not issue gid_pn for some reason */ 362 /* could not issue gid_pn for some reason */
363 zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL); 363 zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
364 goto out; 364 goto out;
365 } 365 }
366 366
@@ -369,7 +369,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
369 goto out; 369 goto out;
370 } 370 }
371 371
372 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); 372 zfcp_erp_port_reopen(port, 0, "fcgpn_3");
373out: 373out:
374 put_device(&port->dev); 374 put_device(&port->dev);
375} 375}
@@ -426,7 +426,7 @@ static void zfcp_fc_adisc_handler(void *data)
426 if (adisc->els.status) { 426 if (adisc->els.status) {
427 /* request rejected or timed out */ 427 /* request rejected or timed out */
428 zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 428 zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
429 "fcadh_1", NULL); 429 "fcadh_1");
430 goto out; 430 goto out;
431 } 431 }
432 432
@@ -436,7 +436,7 @@ static void zfcp_fc_adisc_handler(void *data)
436 if ((port->wwpn != adisc_resp->adisc_wwpn) || 436 if ((port->wwpn != adisc_resp->adisc_wwpn) ||
437 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { 437 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
438 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 438 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
439 "fcadh_2", NULL); 439 "fcadh_2");
440 goto out; 440 goto out;
441 } 441 }
442 442
@@ -507,7 +507,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
507 507
508 /* send of ADISC was not possible */ 508 /* send of ADISC was not possible */
509 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 509 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
510 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); 510 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
511 511
512out: 512out:
513 put_device(&port->dev); 513 put_device(&port->dev);
@@ -659,7 +659,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
659 port = zfcp_port_enqueue(adapter, acc->fp_wwpn, 659 port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
660 ZFCP_STATUS_COMMON_NOESC, d_id); 660 ZFCP_STATUS_COMMON_NOESC, d_id);
661 if (!IS_ERR(port)) 661 if (!IS_ERR(port))
662 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); 662 zfcp_erp_port_reopen(port, 0, "fcegpf1");
663 else if (PTR_ERR(port) != -EEXIST) 663 else if (PTR_ERR(port) != -EEXIST)
664 ret = PTR_ERR(port); 664 ret = PTR_ERR(port);
665 } 665 }
@@ -671,7 +671,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
671 write_unlock_irqrestore(&adapter->port_list_lock, flags); 671 write_unlock_irqrestore(&adapter->port_list_lock, flags);
672 672
673 list_for_each_entry_safe(port, tmp, &remove_lh, list) { 673 list_for_each_entry_safe(port, tmp, &remove_lh, list) {
674 zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); 674 zfcp_erp_port_shutdown(port, 0, "fcegpf2");
675 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); 675 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
676 } 676 }
677 677
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 2eb7dd56ab80..60ff9d172c79 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -23,7 +23,7 @@ static void zfcp_fsf_request_timeout_handler(unsigned long data)
23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
24 zfcp_qdio_siosl(adapter); 24 zfcp_qdio_siosl(adapter);
25 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 25 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
26 "fsrth_1", NULL); 26 "fsrth_1");
27} 27}
28 28
29static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 29static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
@@ -65,7 +65,7 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
65{ 65{
66 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 66 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
67 "operational because of an unsupported FC class\n"); 67 "operational because of an unsupported FC class\n");
68 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req); 68 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
69 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 69 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
70} 70}
71 71
@@ -98,7 +98,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
98 read_lock_irqsave(&adapter->port_list_lock, flags); 98 read_lock_irqsave(&adapter->port_list_lock, flags);
99 list_for_each_entry(port, &adapter->port_list, list) 99 list_for_each_entry(port, &adapter->port_list, list)
100 if (port->d_id == d_id) { 100 if (port->d_id == d_id) {
101 zfcp_erp_port_reopen(port, 0, "fssrpc1", req); 101 zfcp_erp_port_reopen(port, 0, "fssrpc1");
102 break; 102 break;
103 } 103 }
104 read_unlock_irqrestore(&adapter->port_list_lock, flags); 104 read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -211,13 +211,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
211 struct fsf_status_read_buffer *sr_buf = req->data; 211 struct fsf_status_read_buffer *sr_buf = req->data;
212 212
213 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 213 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
214 zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf); 214 zfcp_dbf_hba_fsf_uss("fssrh_1", req);
215 mempool_free(sr_buf, adapter->pool.status_read_data); 215 mempool_free(sr_buf, adapter->pool.status_read_data);
216 zfcp_fsf_req_free(req); 216 zfcp_fsf_req_free(req);
217 return; 217 return;
218 } 218 }
219 219
220 zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf); 220 zfcp_dbf_hba_fsf_uss("fssrh_2", req);
221 221
222 switch (sr_buf->status_type) { 222 switch (sr_buf->status_type) {
223 case FSF_STATUS_READ_PORT_CLOSED: 223 case FSF_STATUS_READ_PORT_CLOSED:
@@ -232,7 +232,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
232 dev_warn(&adapter->ccw_device->dev, 232 dev_warn(&adapter->ccw_device->dev,
233 "The error threshold for checksum statistics " 233 "The error threshold for checksum statistics "
234 "has been exceeded\n"); 234 "has been exceeded\n");
235 zfcp_dbf_hba_berr(adapter->dbf, req); 235 zfcp_dbf_hba_bit_err("fssrh_3", req);
236 break; 236 break;
237 case FSF_STATUS_READ_LINK_DOWN: 237 case FSF_STATUS_READ_LINK_DOWN:
238 zfcp_fsf_status_read_link_down(req); 238 zfcp_fsf_status_read_link_down(req);
@@ -247,7 +247,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
247 zfcp_erp_adapter_reopen(adapter, 247 zfcp_erp_adapter_reopen(adapter,
248 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 248 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
249 ZFCP_STATUS_COMMON_ERP_FAILED, 249 ZFCP_STATUS_COMMON_ERP_FAILED,
250 "fssrh_2", req); 250 "fssrh_2");
251 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 251 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
252 252
253 break; 253 break;
@@ -287,7 +287,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
287 "The FCP adapter reported a problem " 287 "The FCP adapter reported a problem "
288 "that cannot be recovered\n"); 288 "that cannot be recovered\n");
289 zfcp_qdio_siosl(req->adapter); 289 zfcp_qdio_siosl(req->adapter);
290 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); 290 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
291 break; 291 break;
292 } 292 }
293 /* all non-return stats set FSFREQ_ERROR*/ 293 /* all non-return stats set FSFREQ_ERROR*/
@@ -304,7 +304,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
304 dev_err(&req->adapter->ccw_device->dev, 304 dev_err(&req->adapter->ccw_device->dev,
305 "The FCP adapter does not recognize the command 0x%x\n", 305 "The FCP adapter does not recognize the command 0x%x\n",
306 req->qtcb->header.fsf_command); 306 req->qtcb->header.fsf_command);
307 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req); 307 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
308 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 308 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
309 break; 309 break;
310 case FSF_ADAPTER_STATUS_AVAILABLE: 310 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -335,17 +335,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
335 "QTCB version 0x%x not supported by FCP adapter " 335 "QTCB version 0x%x not supported by FCP adapter "
336 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 336 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
337 psq->word[0], psq->word[1]); 337 psq->word[0], psq->word[1]);
338 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req); 338 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
339 break; 339 break;
340 case FSF_PROT_ERROR_STATE: 340 case FSF_PROT_ERROR_STATE:
341 case FSF_PROT_SEQ_NUMB_ERROR: 341 case FSF_PROT_SEQ_NUMB_ERROR:
342 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req); 342 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
343 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 343 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
344 break; 344 break;
345 case FSF_PROT_UNSUPP_QTCB_TYPE: 345 case FSF_PROT_UNSUPP_QTCB_TYPE:
346 dev_err(&adapter->ccw_device->dev, 346 dev_err(&adapter->ccw_device->dev,
347 "The QTCB type is not supported by the FCP adapter\n"); 347 "The QTCB type is not supported by the FCP adapter\n");
348 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req); 348 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
349 break; 349 break;
350 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 350 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
351 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 351 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
@@ -355,12 +355,12 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
355 dev_err(&adapter->ccw_device->dev, 355 dev_err(&adapter->ccw_device->dev,
356 "0x%Lx is an ambiguous request identifier\n", 356 "0x%Lx is an ambiguous request identifier\n",
357 (unsigned long long)qtcb->bottom.support.req_handle); 357 (unsigned long long)qtcb->bottom.support.req_handle);
358 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); 358 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
359 break; 359 break;
360 case FSF_PROT_LINK_DOWN: 360 case FSF_PROT_LINK_DOWN:
361 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); 361 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
362 /* go through reopen to flush pending requests */ 362 /* go through reopen to flush pending requests */
363 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); 363 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
364 break; 364 break;
365 case FSF_PROT_REEST_QUEUE: 365 case FSF_PROT_REEST_QUEUE:
366 /* All ports should be marked as ready to run again */ 366 /* All ports should be marked as ready to run again */
@@ -369,14 +369,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
369 zfcp_erp_adapter_reopen(adapter, 369 zfcp_erp_adapter_reopen(adapter,
370 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 370 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
371 ZFCP_STATUS_COMMON_ERP_FAILED, 371 ZFCP_STATUS_COMMON_ERP_FAILED,
372 "fspse_8", req); 372 "fspse_8");
373 break; 373 break;
374 default: 374 default:
375 dev_err(&adapter->ccw_device->dev, 375 dev_err(&adapter->ccw_device->dev,
376 "0x%x is not a valid transfer protocol status\n", 376 "0x%x is not a valid transfer protocol status\n",
377 qtcb->prefix.prot_status); 377 qtcb->prefix.prot_status);
378 zfcp_qdio_siosl(adapter); 378 zfcp_qdio_siosl(adapter);
379 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); 379 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
380 } 380 }
381 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 381 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
382} 382}
@@ -482,7 +482,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
482 dev_err(&adapter->ccw_device->dev, 482 dev_err(&adapter->ccw_device->dev,
483 "Unknown or unsupported arbitrated loop " 483 "Unknown or unsupported arbitrated loop "
484 "fibre channel topology detected\n"); 484 "fibre channel topology detected\n");
485 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req); 485 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
486 return -EIO; 486 return -EIO;
487 } 487 }
488 488
@@ -518,7 +518,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
518 "FCP adapter maximum QTCB size (%d bytes) " 518 "FCP adapter maximum QTCB size (%d bytes) "
519 "is too small\n", 519 "is too small\n",
520 bottom->max_qtcb_size); 520 bottom->max_qtcb_size);
521 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req); 521 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
522 return; 522 return;
523 } 523 }
524 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 524 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -536,7 +536,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
536 &qtcb->header.fsf_status_qual.link_down_info); 536 &qtcb->header.fsf_status_qual.link_down_info);
537 break; 537 break;
538 default: 538 default:
539 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req); 539 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
540 return; 540 return;
541 } 541 }
542 542
@@ -552,14 +552,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
552 dev_err(&adapter->ccw_device->dev, 552 dev_err(&adapter->ccw_device->dev,
553 "The FCP adapter only supports newer " 553 "The FCP adapter only supports newer "
554 "control block versions\n"); 554 "control block versions\n");
555 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req); 555 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
556 return; 556 return;
557 } 557 }
558 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 558 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
559 dev_err(&adapter->ccw_device->dev, 559 dev_err(&adapter->ccw_device->dev,
560 "The FCP adapter only supports older " 560 "The FCP adapter only supports older "
561 "control block versions\n"); 561 "control block versions\n");
562 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req); 562 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
563 } 563 }
564} 564}
565 565
@@ -700,7 +700,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
700 del_timer(&req->timer); 700 del_timer(&req->timer);
701 /* lookup request again, list might have changed */ 701 /* lookup request again, list might have changed */
702 zfcp_reqlist_find_rm(adapter->req_list, req_id); 702 zfcp_reqlist_find_rm(adapter->req_list, req_id);
703 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); 703 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
704 return -EIO; 704 return -EIO;
705 } 705 }
706 706
@@ -754,10 +754,11 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
754 goto out; 754 goto out;
755 755
756failed_req_send: 756failed_req_send:
757 req->data = NULL;
757 mempool_free(sr_buf, adapter->pool.status_read_data); 758 mempool_free(sr_buf, adapter->pool.status_read_data);
758failed_buf: 759failed_buf:
760 zfcp_dbf_hba_fsf_uss("fssr__1", req);
759 zfcp_fsf_req_free(req); 761 zfcp_fsf_req_free(req);
760 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
761out: 762out:
762 spin_unlock_irq(&qdio->req_q_lock); 763 spin_unlock_irq(&qdio->req_q_lock);
763 return retval; 764 return retval;
@@ -776,14 +777,13 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
776 case FSF_PORT_HANDLE_NOT_VALID: 777 case FSF_PORT_HANDLE_NOT_VALID:
777 if (fsq->word[0] == fsq->word[1]) { 778 if (fsq->word[0] == fsq->word[1]) {
778 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, 779 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
779 "fsafch1", req); 780 "fsafch1");
780 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 781 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
781 } 782 }
782 break; 783 break;
783 case FSF_LUN_HANDLE_NOT_VALID: 784 case FSF_LUN_HANDLE_NOT_VALID:
784 if (fsq->word[0] == fsq->word[1]) { 785 if (fsq->word[0] == fsq->word[1]) {
785 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2", 786 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
786 req);
787 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 787 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
788 } 788 }
789 break; 789 break;
@@ -794,14 +794,13 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
794 zfcp_erp_set_port_status(zfcp_sdev->port, 794 zfcp_erp_set_port_status(zfcp_sdev->port,
795 ZFCP_STATUS_COMMON_ACCESS_BOXED); 795 ZFCP_STATUS_COMMON_ACCESS_BOXED);
796 zfcp_erp_port_reopen(zfcp_sdev->port, 796 zfcp_erp_port_reopen(zfcp_sdev->port,
797 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3", 797 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
798 req);
799 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 798 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
800 break; 799 break;
801 case FSF_LUN_BOXED: 800 case FSF_LUN_BOXED:
802 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 801 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
803 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 802 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
804 "fsafch4", req); 803 "fsafch4");
805 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 804 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
806 break; 805 break;
807 case FSF_ADAPTER_STATUS_AVAILABLE: 806 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -882,7 +881,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
882 881
883 switch (header->fsf_status) { 882 switch (header->fsf_status) {
884 case FSF_GOOD: 883 case FSF_GOOD:
885 zfcp_dbf_san_ct_response(req); 884 zfcp_dbf_san_res("fsscth1", req);
886 ct->status = 0; 885 ct->status = 0;
887 break; 886 break;
888 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 887 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -902,7 +901,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
902 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 901 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
903 break; 902 break;
904 case FSF_PORT_HANDLE_NOT_VALID: 903 case FSF_PORT_HANDLE_NOT_VALID:
905 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); 904 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
906 /* fall through */ 905 /* fall through */
907 case FSF_GENERIC_COMMAND_REJECTED: 906 case FSF_GENERIC_COMMAND_REJECTED:
908 case FSF_PAYLOAD_SIZE_MISMATCH: 907 case FSF_PAYLOAD_SIZE_MISMATCH:
@@ -1025,7 +1024,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1025 req->qtcb->header.port_handle = wka_port->handle; 1024 req->qtcb->header.port_handle = wka_port->handle;
1026 req->data = ct; 1025 req->data = ct;
1027 1026
1028 zfcp_dbf_san_ct_request(req, wka_port->d_id); 1027 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1029 1028
1030 ret = zfcp_fsf_req_send(req); 1029 ret = zfcp_fsf_req_send(req);
1031 if (ret) 1030 if (ret)
@@ -1053,7 +1052,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1053 1052
1054 switch (header->fsf_status) { 1053 switch (header->fsf_status) {
1055 case FSF_GOOD: 1054 case FSF_GOOD:
1056 zfcp_dbf_san_els_response(req); 1055 zfcp_dbf_san_res("fsselh1", req);
1057 send_els->status = 0; 1056 send_els->status = 0;
1058 break; 1057 break;
1059 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1058 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1127,7 +1126,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1127 req->handler = zfcp_fsf_send_els_handler; 1126 req->handler = zfcp_fsf_send_els_handler;
1128 req->data = els; 1127 req->data = els;
1129 1128
1130 zfcp_dbf_san_els_request(req); 1129 zfcp_dbf_san_req("fssels1", req, d_id);
1131 1130
1132 ret = zfcp_fsf_req_send(req); 1131 ret = zfcp_fsf_req_send(req);
1133 if (ret) 1132 if (ret)
@@ -1448,7 +1447,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1448 1447
1449 switch (req->qtcb->header.fsf_status) { 1448 switch (req->qtcb->header.fsf_status) {
1450 case FSF_PORT_HANDLE_NOT_VALID: 1449 case FSF_PORT_HANDLE_NOT_VALID:
1451 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req); 1450 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1452 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1451 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1453 break; 1452 break;
1454 case FSF_ADAPTER_STATUS_AVAILABLE: 1453 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1580,7 +1579,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1580 1579
1581 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1580 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1582 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1581 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1583 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req); 1582 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1584 } 1583 }
1585 1584
1586 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1585 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
@@ -1638,7 +1637,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1638 1637
1639 switch (header->fsf_status) { 1638 switch (header->fsf_status) {
1640 case FSF_PORT_HANDLE_NOT_VALID: 1639 case FSF_PORT_HANDLE_NOT_VALID:
1641 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req); 1640 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1642 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1641 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1643 break; 1642 break;
1644 case FSF_ACCESS_DENIED: 1643 case FSF_ACCESS_DENIED:
@@ -1654,7 +1653,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1654 &sdev_to_zfcp(sdev)->status); 1653 &sdev_to_zfcp(sdev)->status);
1655 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 1654 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1656 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 1655 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1657 "fscpph2", req); 1656 "fscpph2");
1658 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1657 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1659 break; 1658 break;
1660 case FSF_ADAPTER_STATUS_AVAILABLE: 1659 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1743,7 +1742,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1743 switch (header->fsf_status) { 1742 switch (header->fsf_status) {
1744 1743
1745 case FSF_PORT_HANDLE_NOT_VALID: 1744 case FSF_PORT_HANDLE_NOT_VALID:
1746 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req); 1745 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1747 /* fall through */ 1746 /* fall through */
1748 case FSF_LUN_ALREADY_OPEN: 1747 case FSF_LUN_ALREADY_OPEN:
1749 break; 1748 break;
@@ -1755,8 +1754,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1755 zfcp_erp_set_port_status(zfcp_sdev->port, 1754 zfcp_erp_set_port_status(zfcp_sdev->port,
1756 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1755 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1757 zfcp_erp_port_reopen(zfcp_sdev->port, 1756 zfcp_erp_port_reopen(zfcp_sdev->port,
1758 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2", 1757 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1759 req);
1760 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1758 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1761 break; 1759 break;
1762 case FSF_LUN_SHARING_VIOLATION: 1760 case FSF_LUN_SHARING_VIOLATION:
@@ -1852,20 +1850,18 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1852 1850
1853 switch (req->qtcb->header.fsf_status) { 1851 switch (req->qtcb->header.fsf_status) {
1854 case FSF_PORT_HANDLE_NOT_VALID: 1852 case FSF_PORT_HANDLE_NOT_VALID:
1855 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1", 1853 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1856 req);
1857 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1854 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1858 break; 1855 break;
1859 case FSF_LUN_HANDLE_NOT_VALID: 1856 case FSF_LUN_HANDLE_NOT_VALID:
1860 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req); 1857 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1861 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1858 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1862 break; 1859 break;
1863 case FSF_PORT_BOXED: 1860 case FSF_PORT_BOXED:
1864 zfcp_erp_set_port_status(zfcp_sdev->port, 1861 zfcp_erp_set_port_status(zfcp_sdev->port,
1865 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1862 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1866 zfcp_erp_port_reopen(zfcp_sdev->port, 1863 zfcp_erp_port_reopen(zfcp_sdev->port,
1867 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3", 1864 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1868 req);
1869 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1865 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1870 break; 1866 break;
1871 case FSF_ADAPTER_STATUS_AVAILABLE: 1867 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -2002,13 +1998,12 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2002 switch (header->fsf_status) { 1998 switch (header->fsf_status) {
2003 case FSF_HANDLE_MISMATCH: 1999 case FSF_HANDLE_MISMATCH:
2004 case FSF_PORT_HANDLE_NOT_VALID: 2000 case FSF_PORT_HANDLE_NOT_VALID:
2005 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1", 2001 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2006 req);
2007 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2002 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2008 break; 2003 break;
2009 case FSF_FCPLUN_NOT_VALID: 2004 case FSF_FCPLUN_NOT_VALID:
2010 case FSF_LUN_HANDLE_NOT_VALID: 2005 case FSF_LUN_HANDLE_NOT_VALID:
2011 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req); 2006 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2012 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2007 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2013 break; 2008 break;
2014 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2009 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -2026,7 +2021,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2026 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2021 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2027 (unsigned long long)zfcp_sdev->port->wwpn); 2022 (unsigned long long)zfcp_sdev->port->wwpn);
2028 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, 2023 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2029 "fssfch3", req); 2024 "fssfch3");
2030 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2025 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2031 break; 2026 break;
2032 case FSF_CMND_LENGTH_NOT_VALID: 2027 case FSF_CMND_LENGTH_NOT_VALID:
@@ -2037,21 +2032,20 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2037 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2032 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2038 (unsigned long long)zfcp_sdev->port->wwpn); 2033 (unsigned long long)zfcp_sdev->port->wwpn);
2039 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, 2034 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2040 "fssfch4", req); 2035 "fssfch4");
2041 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2036 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2042 break; 2037 break;
2043 case FSF_PORT_BOXED: 2038 case FSF_PORT_BOXED:
2044 zfcp_erp_set_port_status(zfcp_sdev->port, 2039 zfcp_erp_set_port_status(zfcp_sdev->port,
2045 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2040 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2046 zfcp_erp_port_reopen(zfcp_sdev->port, 2041 zfcp_erp_port_reopen(zfcp_sdev->port,
2047 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5", 2042 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2048 req);
2049 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2043 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2050 break; 2044 break;
2051 case FSF_LUN_BOXED: 2045 case FSF_LUN_BOXED:
2052 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2046 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2053 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 2047 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2054 "fssfch6", req); 2048 "fssfch6");
2055 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2049 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2056 break; 2050 break;
2057 case FSF_ADAPTER_STATUS_AVAILABLE: 2051 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -2104,7 +2098,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2104 2098
2105skip_fsfstatus: 2099skip_fsfstatus:
2106 zfcp_fsf_req_trace(req, scpnt); 2100 zfcp_fsf_req_trace(req, scpnt);
2107 zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); 2101 zfcp_dbf_scsi_result(scpnt, req);
2108 2102
2109 scpnt->host_scribble = NULL; 2103 scpnt->host_scribble = NULL;
2110 (scpnt->scsi_done) (scpnt); 2104 (scpnt->scsi_done) (scpnt);
@@ -2420,3 +2414,12 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2420 break; 2414 break;
2421 } 2415 }
2422} 2416}
2417
2418struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
2419 struct qdio_buffer *sbal)
2420{
2421 struct qdio_buffer_element *sbale = &sbal->element[0];
2422 u64 req_id = (unsigned long) sbale->addr;
2423
2424 return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
2425}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index a0554beb4179..2511f92302dd 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -41,7 +41,7 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
41 zfcp_qdio_siosl(adapter); 41 zfcp_qdio_siosl(adapter);
42 zfcp_erp_adapter_reopen(adapter, 42 zfcp_erp_adapter_reopen(adapter,
43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
44 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); 44 ZFCP_STATUS_COMMON_ERP_FAILED, id);
45} 45}
46 46
47static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) 47static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -74,7 +74,6 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
74 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 74 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
75 75
76 if (unlikely(qdio_err)) { 76 if (unlikely(qdio_err)) {
77 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
78 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); 77 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
79 return; 78 return;
80 } 79 }
@@ -97,7 +96,6 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
97 int sbal_idx, sbal_no; 96 int sbal_idx, sbal_no;
98 97
99 if (unlikely(qdio_err)) { 98 if (unlikely(qdio_err)) {
100 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
101 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); 99 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
102 return; 100 return;
103 } 101 }
@@ -116,7 +114,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
116 * put SBALs back to response queue 114 * put SBALs back to response queue
117 */ 115 */
118 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) 116 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
119 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL); 117 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
120} 118}
121 119
122static struct qdio_buffer_element * 120static struct qdio_buffer_element *
@@ -236,7 +234,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
236 if (!ret) { 234 if (!ret) {
237 atomic_inc(&qdio->req_q_full); 235 atomic_inc(&qdio->req_q_full);
238 /* assume hanging outbound queue, try queue recovery */ 236 /* assume hanging outbound queue, try queue recovery */
239 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); 237 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
240 } 238 }
241 239
242 spin_lock_irq(&qdio->req_q_lock); 240 spin_lock_irq(&qdio->req_q_lock);
@@ -309,6 +307,7 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
309 return -ENOMEM; 307 return -ENOMEM;
310 308
311 zfcp_qdio_setup_init_data(&init_data, qdio); 309 zfcp_qdio_setup_init_data(&init_data, qdio);
310 init_waitqueue_head(&qdio->req_q_wq);
312 311
313 return qdio_allocate(&init_data); 312 return qdio_allocate(&init_data);
314} 313}
@@ -393,6 +392,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
393 /* set index of first avalable SBALS / number of available SBALS */ 392 /* set index of first avalable SBALS / number of available SBALS */
394 qdio->req_q_idx = 0; 393 qdio->req_q_idx = 0;
395 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); 394 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
395 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
396 396
397 return 0; 397 return 0;
398 398
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 63529ed801eb..ddb5800823a9 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -30,6 +30,10 @@ module_param_named(dif, enable_dif, bool, 0600);
30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); 30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
31#endif 31#endif
32 32
33static bool allow_lun_scan = 1;
34module_param(allow_lun_scan, bool, 0600);
35MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
36
33static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, 37static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
34 int reason) 38 int reason)
35{ 39{
@@ -68,11 +72,8 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
68 72
69static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) 73static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
70{ 74{
71 struct zfcp_adapter *adapter =
72 (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
73
74 set_host_byte(scpnt, result); 75 set_host_byte(scpnt, result);
75 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); 76 zfcp_dbf_scsi_fail_send(scpnt);
76 scpnt->scsi_done(scpnt); 77 scpnt->scsi_done(scpnt);
77} 78}
78 79
@@ -80,7 +81,6 @@ static
80int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) 81int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
81{ 82{
82 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 83 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
83 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
84 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); 84 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
85 int status, scsi_result, ret; 85 int status, scsi_result, ret;
86 86
@@ -91,7 +91,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
91 scsi_result = fc_remote_port_chkready(rport); 91 scsi_result = fc_remote_port_chkready(rport);
92 if (unlikely(scsi_result)) { 92 if (unlikely(scsi_result)) {
93 scpnt->result = scsi_result; 93 scpnt->result = scsi_result;
94 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); 94 zfcp_dbf_scsi_fail_send(scpnt);
95 scpnt->scsi_done(scpnt); 95 scpnt->scsi_done(scpnt);
96 return 0; 96 return 0;
97 } 97 }
@@ -134,6 +134,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
134 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 134 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
135 struct zfcp_port *port; 135 struct zfcp_port *port;
136 struct zfcp_unit *unit; 136 struct zfcp_unit *unit;
137 int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
137 138
138 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 139 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
139 if (!port) 140 if (!port)
@@ -143,7 +144,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
143 if (unit) 144 if (unit)
144 put_device(&unit->dev); 145 put_device(&unit->dev);
145 146
146 if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { 147 if (!unit && !(allow_lun_scan && npiv)) {
147 put_device(&port->dev); 148 put_device(&port->dev);
148 return -ENXIO; 149 return -ENXIO;
149 } 150 }
@@ -158,7 +159,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
158 spin_lock_init(&zfcp_sdev->latencies.lock); 159 spin_lock_init(&zfcp_sdev->latencies.lock);
159 160
160 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); 161 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
161 zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL); 162 zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
162 zfcp_erp_wait(port->adapter); 163 zfcp_erp_wait(port->adapter);
163 164
164 return 0; 165 return 0;
@@ -182,8 +183,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
182 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid); 183 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
183 if (!old_req) { 184 if (!old_req) {
184 write_unlock_irqrestore(&adapter->abort_lock, flags); 185 write_unlock_irqrestore(&adapter->abort_lock, flags);
185 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, 186 zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
186 old_reqid);
187 return FAILED; /* completion could be in progress */ 187 return FAILED; /* completion could be in progress */
188 } 188 }
189 old_req->data = NULL; 189 old_req->data = NULL;
@@ -198,29 +198,32 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
198 198
199 zfcp_erp_wait(adapter); 199 zfcp_erp_wait(adapter);
200 ret = fc_block_scsi_eh(scpnt); 200 ret = fc_block_scsi_eh(scpnt);
201 if (ret) 201 if (ret) {
202 zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
202 return ret; 203 return ret;
204 }
203 if (!(atomic_read(&adapter->status) & 205 if (!(atomic_read(&adapter->status) &
204 ZFCP_STATUS_COMMON_RUNNING)) { 206 ZFCP_STATUS_COMMON_RUNNING)) {
205 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, 207 zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
206 old_reqid);
207 return SUCCESS; 208 return SUCCESS;
208 } 209 }
209 } 210 }
210 if (!abrt_req) 211 if (!abrt_req) {
212 zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
211 return FAILED; 213 return FAILED;
214 }
212 215
213 wait_for_completion(&abrt_req->completion); 216 wait_for_completion(&abrt_req->completion);
214 217
215 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) 218 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
216 dbf_tag = "okay"; 219 dbf_tag = "abrt_ok";
217 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) 220 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
218 dbf_tag = "lte2"; 221 dbf_tag = "abrt_nn";
219 else { 222 else {
220 dbf_tag = "fail"; 223 dbf_tag = "abrt_fa";
221 retval = FAILED; 224 retval = FAILED;
222 } 225 }
223 zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid); 226 zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
224 zfcp_fsf_req_free(abrt_req); 227 zfcp_fsf_req_free(abrt_req);
225 return retval; 228 return retval;
226} 229}
@@ -280,7 +283,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
280 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 283 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
281 int ret; 284 int ret;
282 285
283 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); 286 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
284 zfcp_erp_wait(adapter); 287 zfcp_erp_wait(adapter);
285 ret = fc_block_scsi_eh(scpnt); 288 ret = fc_block_scsi_eh(scpnt);
286 if (ret) 289 if (ret)
@@ -518,7 +521,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
518 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 521 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
519 522
520 if (port) { 523 if (port) {
521 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL); 524 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
522 put_device(&port->dev); 525 put_device(&port->dev);
523 } 526 }
524} 527}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 2f2c54f4718f..cdc4ff78a7ba 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -105,8 +105,7 @@ static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
105 return -EINVAL; 105 return -EINVAL;
106 106
107 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING); 107 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
108 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2", 108 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
109 NULL);
110 zfcp_erp_wait(port->adapter); 109 zfcp_erp_wait(port->adapter);
111 110
112 return count; 111 return count;
@@ -148,7 +147,7 @@ static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
148 if (sdev) { 147 if (sdev) {
149 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); 148 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
150 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 149 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
151 "syufai2", NULL); 150 "syufai2");
152 zfcp_erp_wait(unit->port->adapter); 151 zfcp_erp_wait(unit->port->adapter);
153 } else 152 } else
154 zfcp_unit_scsi_scan(unit); 153 zfcp_unit_scsi_scan(unit);
@@ -198,7 +197,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
198 197
199 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 198 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
200 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 199 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
201 "syafai2", NULL); 200 "syafai2");
202 zfcp_erp_wait(adapter); 201 zfcp_erp_wait(adapter);
203out: 202out:
204 zfcp_ccw_adapter_put(adapter); 203 zfcp_ccw_adapter_put(adapter);
@@ -256,7 +255,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
256 255
257 put_device(&port->dev); 256 put_device(&port->dev);
258 257
259 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); 258 zfcp_erp_port_shutdown(port, 0, "syprs_1");
260 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); 259 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
261 out: 260 out:
262 zfcp_ccw_adapter_put(adapter); 261 zfcp_ccw_adapter_put(adapter);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 17e3df4f016f..1cadcd6b7da6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1171,9 +1171,8 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1171 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); 1171 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1172 if ( arccdbsize > 256) 1172 if ( arccdbsize > 256)
1173 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; 1173 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1174 if (pcmd->cmnd[0]|WRITE_6 || pcmd->cmnd[0]|WRITE_10 || pcmd->cmnd[0]|WRITE_12 ){ 1174 if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1175 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 1175 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1176 }
1177 ccb->arc_cdb_size = arccdbsize; 1176 ccb->arc_cdb_size = arccdbsize;
1178 return SUCCESS; 1177 return SUCCESS;
1179} 1178}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 75a85aa9e882..79cefbe31367 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3785,7 +3785,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3785 dma_addr_t paddr; 3785 dma_addr_t paddr;
3786 3786
3787 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 3787 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3788 GFP_KERNEL, &paddr); 3788 GFP_ATOMIC, &paddr);
3789 if (!io_task->cmd_bhs) 3789 if (!io_task->cmd_bhs)
3790 return -ENOMEM; 3790 return -ENOMEM;
3791 io_task->bhs_pa.u.a64.address = paddr; 3791 io_task->bhs_pa.u.a64.address = paddr;
@@ -3914,7 +3914,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
3914 io_task->psgl_handle = NULL; 3914 io_task->psgl_handle = NULL;
3915 } 3915 }
3916 } else { 3916 } else {
3917 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) 3917 if (task->hdr &&
3918 ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
3918 return; 3919 return;
3919 if (io_task->psgl_handle) { 3920 if (io_task->psgl_handle) {
3920 spin_lock(&phba->mgmt_sgl_lock); 3921 spin_lock(&phba->mgmt_sgl_lock);
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index d2eefd3e3bd5..4ce6f4942327 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -3,6 +3,4 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
3bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o 3bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
4bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o 4bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
5bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o 5bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
6bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o 6bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
7
8ccflags-y := -DBFA_PERF_BUILD
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index ff2bd07161f7..7be6b5a8114b 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -17,7 +17,7 @@
17#ifndef __BFA_H__ 17#ifndef __BFA_H__
18#define __BFA_H__ 18#define __BFA_H__
19 19
20#include "bfa_os_inc.h" 20#include "bfad_drv.h"
21#include "bfa_cs.h" 21#include "bfa_cs.h"
22#include "bfa_plog.h" 22#include "bfa_plog.h"
23#include "bfa_defs_svc.h" 23#include "bfa_defs_svc.h"
@@ -33,7 +33,6 @@ typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
33 * Interrupt message handlers 33 * Interrupt message handlers
34 */ 34 */
35void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); 35void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
36void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
37 36
38/* 37/*
39 * Request and response queue related defines 38 * Request and response queue related defines
@@ -121,8 +120,8 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
121 \ 120 \
122 struct list_head *waitq = bfa_reqq(__bfa, __reqq); \ 121 struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
123 \ 122 \
124 bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \ 123 WARN_ON(((__reqq) >= BFI_IOC_MAX_CQS)); \
125 bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \ 124 WARN_ON(!((__wqe)->qresume && (__wqe)->cbarg)); \
126 \ 125 \
127 list_add_tail(&(__wqe)->qe, waitq); \ 126 list_add_tail(&(__wqe)->qe, waitq); \
128 } while (0) 127 } while (0)
@@ -297,7 +296,6 @@ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
297 struct bfa_iocfc_cfg_s *cfg, 296 struct bfa_iocfc_cfg_s *cfg,
298 struct bfa_meminfo_s *meminfo, 297 struct bfa_meminfo_s *meminfo,
299 struct bfa_pcidev_s *pcidev); 298 struct bfa_pcidev_s *pcidev);
300void bfa_iocfc_detach(struct bfa_s *bfa);
301void bfa_iocfc_init(struct bfa_s *bfa); 299void bfa_iocfc_init(struct bfa_s *bfa);
302void bfa_iocfc_start(struct bfa_s *bfa); 300void bfa_iocfc_start(struct bfa_s *bfa);
303void bfa_iocfc_stop(struct bfa_s *bfa); 301void bfa_iocfc_stop(struct bfa_s *bfa);
@@ -333,12 +331,9 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
333 u32 *maxvec); 331 u32 *maxvec);
334void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, 332void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
335 u32 *end); 333 u32 *end);
336void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi);
337void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); 334void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
338wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa); 335wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
339wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa); 336wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
340void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
341 struct bfa_boot_pbc_s *pbcfg);
342int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, 337int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
343 struct bfi_pbc_vport_s *pbc_vport); 338 struct bfi_pbc_vport_s *pbc_vport);
344 339
@@ -386,19 +381,11 @@ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
386void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 381void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
387 struct bfa_meminfo_s *meminfo, 382 struct bfa_meminfo_s *meminfo,
388 struct bfa_pcidev_s *pcidev); 383 struct bfa_pcidev_s *pcidev);
389void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
390void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
391void bfa_detach(struct bfa_s *bfa); 384void bfa_detach(struct bfa_s *bfa);
392void bfa_init(struct bfa_s *bfa);
393void bfa_start(struct bfa_s *bfa);
394void bfa_stop(struct bfa_s *bfa);
395void bfa_attach_fcs(struct bfa_s *bfa);
396void bfa_cb_init(void *bfad, bfa_status_t status); 385void bfa_cb_init(void *bfad, bfa_status_t status);
397void bfa_cb_updateq(void *bfad, bfa_status_t status); 386void bfa_cb_updateq(void *bfad, bfa_status_t status);
398 387
399bfa_boolean_t bfa_intx(struct bfa_s *bfa); 388bfa_boolean_t bfa_intx(struct bfa_s *bfa);
400void bfa_intx_disable(struct bfa_s *bfa);
401void bfa_intx_enable(struct bfa_s *bfa);
402void bfa_isr_enable(struct bfa_s *bfa); 389void bfa_isr_enable(struct bfa_s *bfa);
403void bfa_isr_disable(struct bfa_s *bfa); 390void bfa_isr_disable(struct bfa_s *bfa);
404 391
@@ -408,31 +395,14 @@ void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
408 395
409typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status); 396typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
410void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr); 397void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
411void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
412 398
413void bfa_adapter_get_attr(struct bfa_s *bfa,
414 struct bfa_adapter_attr_s *ad_attr);
415u64 bfa_adapter_get_id(struct bfa_s *bfa);
416 399
417bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, 400bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
418 struct bfa_iocfc_intr_attr_s *attr); 401 struct bfa_iocfc_intr_attr_s *attr);
419 402
420void bfa_iocfc_enable(struct bfa_s *bfa); 403void bfa_iocfc_enable(struct bfa_s *bfa);
421void bfa_iocfc_disable(struct bfa_s *bfa); 404void bfa_iocfc_disable(struct bfa_s *bfa);
422void bfa_chip_reset(struct bfa_s *bfa);
423void bfa_timer_tick(struct bfa_s *bfa);
424#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ 405#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
425 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) 406 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
426 407
427/*
428 * BFA debug API functions
429 */
430bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
431bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
432bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf,
433 u32 *offset, int *buflen);
434void bfa_debug_fwsave_clear(struct bfa_s *bfa);
435bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data);
436bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa);
437
438#endif /* __BFA_H__ */ 408#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_cb_ioim.h b/drivers/scsi/bfa/bfa_cb_ioim.h
deleted file mode 100644
index 6f021015f1f6..000000000000
--- a/drivers/scsi/bfa/bfa_cb_ioim.h
+++ /dev/null
@@ -1,169 +0,0 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_HCB_IOIM_H__
19#define __BFA_HCB_IOIM_H__
20
21#include "bfa_os_inc.h"
22/*
23 * task attribute values in FCP-2 FCP_CMND IU
24 */
25#define SIMPLE_Q 0
26#define HEAD_OF_Q 1
27#define ORDERED_Q 2
28#define ACA_Q 4
29#define UNTAGGED 5
30
31static inline lun_t
32bfad_int_to_lun(u32 luno)
33{
34 union {
35 u16 scsi_lun[4];
36 lun_t bfa_lun;
37 } lun;
38
39 lun.bfa_lun = 0;
40 lun.scsi_lun[0] = cpu_to_be16(luno);
41
42 return lun.bfa_lun;
43}
44
45/*
46 * Get LUN for the I/O request
47 */
48#define bfa_cb_ioim_get_lun(__dio) \
49 bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
50
51/*
52 * Get CDB for the I/O request
53 */
54static inline u8 *
55bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
56{
57 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
58
59 return (u8 *) cmnd->cmnd;
60}
61
62/*
63 * Get I/O direction (read/write) for the I/O request
64 */
65static inline enum fcp_iodir
66bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
67{
68 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
69 enum dma_data_direction dmadir;
70
71 dmadir = cmnd->sc_data_direction;
72 if (dmadir == DMA_TO_DEVICE)
73 return FCP_IODIR_WRITE;
74 else if (dmadir == DMA_FROM_DEVICE)
75 return FCP_IODIR_READ;
76 else
77 return FCP_IODIR_NONE;
78}
79
80/*
81 * Get IO size in bytes for the I/O request
82 */
83static inline u32
84bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
85{
86 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
87
88 return scsi_bufflen(cmnd);
89}
90
91/*
92 * Get timeout for the I/O request
93 */
94static inline u8
95bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
96{
97 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
98 /*
99 * TBD: need a timeout for scsi passthru
100 */
101 if (cmnd->device->host == NULL)
102 return 4;
103
104 return 0;
105}
106
107/*
108 * Get Command Reference Number for the I/O request. 0 if none.
109 */
110static inline u8
111bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
112{
113 return 0;
114}
115
116/*
117 * Get SAM-3 priority for the I/O request. 0 is default.
118 */
119static inline u8
120bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
121{
122 return 0;
123}
124
125/*
126 * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
127 */
128static inline u8
129bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
130{
131 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
132 u8 task_attr = UNTAGGED;
133
134 if (cmnd->device->tagged_supported) {
135 switch (cmnd->tag) {
136 case HEAD_OF_QUEUE_TAG:
137 task_attr = HEAD_OF_Q;
138 break;
139 case ORDERED_QUEUE_TAG:
140 task_attr = ORDERED_Q;
141 break;
142 default:
143 task_attr = SIMPLE_Q;
144 break;
145 }
146 }
147
148 return task_attr;
149}
150
151/*
152 * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
153 */
154static inline u8
155bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
156{
157 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
158
159 return cmnd->cmd_len;
160}
161
162/*
163 * Assign queue to be used for the I/O request. This value depends on whether
164 * the driver wants to use the queues via any specific algorithm. Currently,
165 * this is not supported.
166 */
167#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
168
169#endif /* __BFA_HCB_IOIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 2345f48dc57f..1cd5c8b0618d 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -15,13 +15,100 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_modules.h" 19#include "bfa_modules.h"
19#include "bfi_ctreg.h" 20#include "bfi_ctreg.h"
20#include "bfad_drv.h"
21 21
22BFA_TRC_FILE(HAL, CORE); 22BFA_TRC_FILE(HAL, CORE);
23 23
24/* 24/*
25 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
34 &hal_mod_fcpim,
35 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
54 bfa_itnim_isr, /* BFI_MC_ITNIM */
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
92bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93{
94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len;
96 u8 *dm_kva;
97 u64 dm_pa;
98
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109}
110
111/*
25 * BFA IOC FC related definitions 112 * BFA IOC FC related definitions
26 */ 113 */
27 114
@@ -67,18 +154,6 @@ static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
67 * BFA Interrupt handling functions 154 * BFA Interrupt handling functions
68 */ 155 */
69static void 156static void
70bfa_msix_errint(struct bfa_s *bfa, u32 intr)
71{
72 bfa_ioc_error_isr(&bfa->ioc);
73}
74
75static void
76bfa_msix_lpu(struct bfa_s *bfa)
77{
78 bfa_ioc_mbox_isr(&bfa->ioc);
79}
80
81static void
82bfa_reqq_resume(struct bfa_s *bfa, int qid) 157bfa_reqq_resume(struct bfa_s *bfa, int qid)
83{ 158{
84 struct list_head *waitq, *qe, *qen; 159 struct list_head *waitq, *qe, *qen;
@@ -104,9 +179,6 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
104 bfa_intx(bfa); 179 bfa_intx(bfa);
105} 180}
106 181
107/*
108 * hal_intr_api
109 */
110bfa_boolean_t 182bfa_boolean_t
111bfa_intx(struct bfa_s *bfa) 183bfa_intx(struct bfa_s *bfa)
112{ 184{
@@ -151,18 +223,6 @@ bfa_intx(struct bfa_s *bfa)
151} 223}
152 224
153void 225void
154bfa_intx_enable(struct bfa_s *bfa)
155{
156 writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
157}
158
159void
160bfa_intx_disable(struct bfa_s *bfa)
161{
162 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
163}
164
165void
166bfa_isr_enable(struct bfa_s *bfa) 226bfa_isr_enable(struct bfa_s *bfa)
167{ 227{
168 u32 intr_unmask; 228 u32 intr_unmask;
@@ -225,7 +285,7 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
225 bfa_trc(bfa, m->mhdr.msg_class); 285 bfa_trc(bfa, m->mhdr.msg_class);
226 bfa_trc(bfa, m->mhdr.msg_id); 286 bfa_trc(bfa, m->mhdr.msg_id);
227 bfa_trc(bfa, m->mhdr.mtag.i2htok); 287 bfa_trc(bfa, m->mhdr.mtag.i2htok);
228 bfa_assert(0); 288 WARN_ON(1);
229 bfa_trc_stop(bfa->trcmod); 289 bfa_trc_stop(bfa->trcmod);
230} 290}
231 291
@@ -236,8 +296,6 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
236 u32 pi, ci; 296 u32 pi, ci;
237 struct list_head *waitq; 297 struct list_head *waitq;
238 298
239 bfa_trc_fp(bfa, qid);
240
241 qid &= (BFI_IOC_MAX_CQS - 1); 299 qid &= (BFI_IOC_MAX_CQS - 1);
242 300
243 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); 301 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
@@ -245,16 +303,10 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
245 ci = bfa_rspq_ci(bfa, qid); 303 ci = bfa_rspq_ci(bfa, qid);
246 pi = bfa_rspq_pi(bfa, qid); 304 pi = bfa_rspq_pi(bfa, qid);
247 305
248 bfa_trc_fp(bfa, ci);
249 bfa_trc_fp(bfa, pi);
250
251 if (bfa->rme_process) { 306 if (bfa->rme_process) {
252 while (ci != pi) { 307 while (ci != pi) {
253 m = bfa_rspq_elem(bfa, qid, ci); 308 m = bfa_rspq_elem(bfa, qid, ci);
254 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
255
256 bfa_isrs[m->mhdr.msg_class] (bfa, m); 309 bfa_isrs[m->mhdr.msg_class] (bfa, m);
257
258 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); 310 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
259 } 311 }
260 } 312 }
@@ -282,7 +334,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
282 intr = readl(bfa->iocfc.bfa_regs.intr_status); 334 intr = readl(bfa->iocfc.bfa_regs.intr_status);
283 335
284 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 336 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
285 bfa_msix_lpu(bfa); 337 bfa_ioc_mbox_isr(&bfa->ioc);
286 338
287 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 339 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
288 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); 340 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
@@ -313,22 +365,16 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
313 } 365 }
314 366
315 writel(intr, bfa->iocfc.bfa_regs.intr_status); 367 writel(intr, bfa->iocfc.bfa_regs.intr_status);
316 bfa_msix_errint(bfa, intr); 368 bfa_ioc_error_isr(&bfa->ioc);
317 } 369 }
318} 370}
319 371
320void
321bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
322{
323 bfa_isrs[mc] = isr_func;
324}
325
326/* 372/*
327 * BFA IOC FC related functions 373 * BFA IOC FC related functions
328 */ 374 */
329 375
330/* 376/*
331 * hal_ioc_pvt BFA IOC private functions 377 * BFA IOC private functions
332 */ 378 */
333 379
334static void 380static void
@@ -379,7 +425,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
379 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; 425 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
380 int i; 426 int i;
381 427
382 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); 428 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
383 bfa_trc(bfa, cfg->fwcfg.num_cqs); 429 bfa_trc(bfa, cfg->fwcfg.num_cqs);
384 430
385 bfa_iocfc_reset_queues(bfa); 431 bfa_iocfc_reset_queues(bfa);
@@ -488,8 +534,8 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
488 * First allocate dma memory for IOC. 534 * First allocate dma memory for IOC.
489 */ 535 */
490 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); 536 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
491 dm_kva += bfa_ioc_meminfo(); 537 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
492 dm_pa += bfa_ioc_meminfo(); 538 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
493 539
494 /* 540 /*
495 * Claim DMA-able memory for the request/response queues and for shadow 541 * Claim DMA-able memory for the request/response queues and for shadow
@@ -552,7 +598,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
552 bfa_meminfo_dma_virt(meminfo) = dm_kva; 598 bfa_meminfo_dma_virt(meminfo) = dm_kva;
553 bfa_meminfo_dma_phys(meminfo) = dm_pa; 599 bfa_meminfo_dma_phys(meminfo) = dm_pa;
554 600
555 dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover); 601 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
556 if (dbgsz > 0) { 602 if (dbgsz > 0) {
557 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); 603 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
558 bfa_meminfo_kva(meminfo) += dbgsz; 604 bfa_meminfo_kva(meminfo) += dbgsz;
@@ -699,7 +745,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
699 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, 745 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
700 bfa); 746 bfa);
701 else { 747 else {
702 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE); 748 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
703 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, 749 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
704 bfa); 750 bfa);
705 } 751 }
@@ -735,9 +781,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
735 bfa_isr_enable(bfa); 781 bfa_isr_enable(bfa);
736} 782}
737 783
738/*
739 * hal_ioc_public
740 */
741 784
742/* 785/*
743 * Query IOC memory requirement information. 786 * Query IOC memory requirement information.
@@ -747,11 +790,11 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
747 u32 *dm_len) 790 u32 *dm_len)
748{ 791{
749 /* dma memory for IOC */ 792 /* dma memory for IOC */
750 *dm_len += bfa_ioc_meminfo(); 793 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
751 794
752 bfa_iocfc_fw_cfg_sz(cfg, dm_len); 795 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
753 bfa_iocfc_cqs_sz(cfg, dm_len); 796 bfa_iocfc_cqs_sz(cfg, dm_len);
754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); 797 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
755} 798}
756 799
757/* 800/*
@@ -783,7 +826,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
783 826
784 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 827 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
785 bfa_iocfc_mem_claim(bfa, cfg, meminfo); 828 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
786 bfa_timer_init(&bfa->timer_mod); 829 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
787 830
788 INIT_LIST_HEAD(&bfa->comp_q); 831 INIT_LIST_HEAD(&bfa->comp_q);
789 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 832 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
@@ -794,15 +837,6 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
794 * Query IOC memory requirement information. 837 * Query IOC memory requirement information.
795 */ 838 */
796void 839void
797bfa_iocfc_detach(struct bfa_s *bfa)
798{
799 bfa_ioc_detach(&bfa->ioc);
800}
801
802/*
803 * Query IOC memory requirement information.
804 */
805void
806bfa_iocfc_init(struct bfa_s *bfa) 840bfa_iocfc_init(struct bfa_s *bfa)
807{ 841{
808 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 842 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
@@ -852,23 +886,11 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
852 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 886 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
853 break; 887 break;
854 default: 888 default:
855 bfa_assert(0); 889 WARN_ON(1);
856 } 890 }
857} 891}
858 892
859void 893void
860bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
861{
862 bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
863}
864
865u64
866bfa_adapter_get_id(struct bfa_s *bfa)
867{
868 return bfa_ioc_get_adid(&bfa->ioc);
869}
870
871void
872bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) 894bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
873{ 895{
874 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 896 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
@@ -976,18 +998,6 @@ bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
976 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); 998 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
977} 999}
978 1000
979void
980bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
981{
982 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
983 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
984
985 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
986 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
987 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
988 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
989}
990
991int 1001int
992bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) 1002bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
993{ 1003{
@@ -998,9 +1008,6 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
998 return cfgrsp->pbc_cfg.nvports; 1008 return cfgrsp->pbc_cfg.nvports;
999} 1009}
1000 1010
1001/*
1002 * hal_api
1003 */
1004 1011
1005/* 1012/*
1006 * Use this function query the memory requirement of the BFA library. 1013 * Use this function query the memory requirement of the BFA library.
@@ -1036,7 +1043,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1036 int i; 1043 int i;
1037 u32 km_len = 0, dm_len = 0; 1044 u32 km_len = 0, dm_len = 0;
1038 1045
1039 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1046 WARN_ON((cfg == NULL) || (meminfo == NULL));
1040 1047
1041 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1048 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1042 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = 1049 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
@@ -1090,7 +1097,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1090 1097
1091 bfa->fcs = BFA_FALSE; 1098 bfa->fcs = BFA_FALSE;
1092 1099
1093 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1100 WARN_ON((cfg == NULL) || (meminfo == NULL));
1094 1101
1095 /* 1102 /*
1096 * initialize all memory pointers for iterative allocation 1103 * initialize all memory pointers for iterative allocation
@@ -1129,79 +1136,7 @@ bfa_detach(struct bfa_s *bfa)
1129 1136
1130 for (i = 0; hal_mods[i]; i++) 1137 for (i = 0; hal_mods[i]; i++)
1131 hal_mods[i]->detach(bfa); 1138 hal_mods[i]->detach(bfa);
1132 1139 bfa_ioc_detach(&bfa->ioc);
1133 bfa_iocfc_detach(bfa);
1134}
1135
1136
1137void
1138bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
1139{
1140 bfa->trcmod = trcmod;
1141}
1142
1143void
1144bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
1145{
1146 bfa->plog = plog;
1147}
1148
1149/*
1150 * Initialize IOC.
1151 *
1152 * This function will return immediately, when the IOC initialization is
1153 * completed, the bfa_cb_init() will be called.
1154 *
1155 * @param[in] bfa instance
1156 *
1157 * @return void
1158 *
1159 * Special Considerations:
1160 *
1161 * @note
1162 * When this function returns, the driver should register the interrupt service
1163 * routine(s) and enable the device interrupts. If this is not done,
1164 * bfa_cb_init() will never get called
1165 */
1166void
1167bfa_init(struct bfa_s *bfa)
1168{
1169 bfa_iocfc_init(bfa);
1170}
1171
1172/*
1173 * Use this function initiate the IOC configuration setup. This function
1174 * will return immediately.
1175 *
1176 * @param[in] bfa instance
1177 *
1178 * @return None
1179 */
1180void
1181bfa_start(struct bfa_s *bfa)
1182{
1183 bfa_iocfc_start(bfa);
1184}
1185
1186/*
1187 * Use this function quiese the IOC. This function will return immediately,
1188 * when the IOC is actually stopped, the bfad->comp will be set.
1189 *
1190 * @param[in]bfa - pointer to bfa_t.
1191 *
1192 * @return None
1193 *
1194 * Special Considerations:
1195 * bfad->comp can be set before or after bfa_stop() returns.
1196 *
1197 * @note
1198 * In case of any failure, we could handle it automatically by doing a
1199 * reset and then succeed the bfa_stop() call.
1200 */
1201void
1202bfa_stop(struct bfa_s *bfa)
1203{
1204 bfa_iocfc_stop(bfa);
1205} 1140}
1206 1141
1207void 1142void
@@ -1237,20 +1172,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1237 } 1172 }
1238} 1173}
1239 1174
1240void
1241bfa_attach_fcs(struct bfa_s *bfa)
1242{
1243 bfa->fcs = BFA_TRUE;
1244}
1245
1246/*
1247 * Periodic timer heart beat from driver
1248 */
1249void
1250bfa_timer_tick(struct bfa_s *bfa)
1251{
1252 bfa_timer_beat(&bfa->timer_mod);
1253}
1254 1175
1255/* 1176/*
1256 * Return the list of PCI vendor/device id lists supported by this 1177 * Return the list of PCI vendor/device id lists supported by this
@@ -1321,89 +1242,3 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1321 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; 1242 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
1322 cfg->drvcfg.min_cfg = BFA_TRUE; 1243 cfg->drvcfg.min_cfg = BFA_TRUE;
1323} 1244}
1324
1325void
1326bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
1327{
1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
1329}
1330
1331/*
1332 * Retrieve firmware trace information on IOC failure.
1333 */
1334bfa_status_t
1335bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
1336{
1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
1338}
1339
1340/*
1341 * Clear the saved firmware trace information of an IOC.
1342 */
1343void
1344bfa_debug_fwsave_clear(struct bfa_s *bfa)
1345{
1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc);
1347}
1348
1349/*
1350 * Fetch firmware trace data.
1351 *
1352 * @param[in] bfa BFA instance
1353 * @param[out] trcdata Firmware trace buffer
1354 * @param[in,out] trclen Firmware trace buffer len
1355 *
1356 * @retval BFA_STATUS_OK Firmware trace is fetched.
1357 * @retval BFA_STATUS_INPROGRESS Firmware trace fetch is in progress.
1358 */
1359bfa_status_t
1360bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
1361{
1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
1363}
1364
1365/*
1366 * Dump firmware memory.
1367 *
1368 * @param[in] bfa BFA instance
1369 * @param[out] buf buffer for dump
1370 * @param[in,out] offset smem offset to start read
1371 * @param[in,out] buflen length of buffer
1372 *
1373 * @retval BFA_STATUS_OK Firmware memory is dumped.
1374 * @retval BFA_STATUS_INPROGRESS Firmware memory dump is in progress.
1375 */
1376bfa_status_t
1377bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
1378{
1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
1380}
1381/*
1382 * Reset hw semaphore & usage cnt regs and initialize.
1383 */
1384void
1385bfa_chip_reset(struct bfa_s *bfa)
1386{
1387 bfa_ioc_ownership_reset(&bfa->ioc);
1388 bfa_ioc_pll_init(&bfa->ioc);
1389}
1390
1391/*
1392 * Fetch firmware statistics data.
1393 *
1394 * @param[in] bfa BFA instance
1395 * @param[out] data Firmware stats buffer
1396 *
1397 * @retval BFA_STATUS_OK Firmware trace is fetched.
1398 */
1399bfa_status_t
1400bfa_fw_stats_get(struct bfa_s *bfa, void *data)
1401{
1402 return bfa_ioc_fw_stats_get(&bfa->ioc, data);
1403}
1404
1405bfa_status_t
1406bfa_fw_stats_clear(struct bfa_s *bfa)
1407{
1408 return bfa_ioc_fw_stats_clear(&bfa->ioc);
1409}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 99f242b9aa31..12bfeed268eb 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -22,7 +22,7 @@
22#ifndef __BFA_CS_H__ 22#ifndef __BFA_CS_H__
23#define __BFA_CS_H__ 23#define __BFA_CS_H__
24 24
25#include "bfa_os_inc.h" 25#include "bfad_drv.h"
26 26
27/* 27/*
28 * BFA TRC 28 * BFA TRC
@@ -32,12 +32,20 @@
32#define BFA_TRC_MAX (4 * 1024) 32#define BFA_TRC_MAX (4 * 1024)
33#endif 33#endif
34 34
35#define BFA_TRC_TS(_trcm) \
36 ({ \
37 struct timeval tv; \
38 \
39 do_gettimeofday(&tv); \
40 (tv.tv_sec*1000000+tv.tv_usec); \
41 })
42
35#ifndef BFA_TRC_TS 43#ifndef BFA_TRC_TS
36#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) 44#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
37#endif 45#endif
38 46
39struct bfa_trc_s { 47struct bfa_trc_s {
40#ifdef __BIGENDIAN 48#ifdef __BIG_ENDIAN
41 u16 fileno; 49 u16 fileno;
42 u16 line; 50 u16 line;
43#else 51#else
@@ -99,13 +107,6 @@ bfa_trc_stop(struct bfa_trc_mod_s *trcm)
99 trcm->stopped = 1; 107 trcm->stopped = 1;
100} 108}
101 109
102#ifdef FWTRC
103extern void dc_flush(void *data);
104#else
105#define dc_flush(data)
106#endif
107
108
109static inline void 110static inline void
110__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) 111__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
111{ 112{
@@ -119,12 +120,10 @@ __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
119 trc->line = (u16) line; 120 trc->line = (u16) line;
120 trc->data.u64 = data; 121 trc->data.u64 = data;
121 trc->timestamp = BFA_TRC_TS(trcm); 122 trc->timestamp = BFA_TRC_TS(trcm);
122 dc_flush(trc);
123 123
124 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); 124 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
125 if (trcm->tail == trcm->head) 125 if (trcm->tail == trcm->head)
126 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); 126 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
127 dc_flush(trcm);
128} 127}
129 128
130 129
@@ -141,42 +140,18 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
141 trc->line = (u16) line; 140 trc->line = (u16) line;
142 trc->data.u32.u32 = data; 141 trc->data.u32.u32 = data;
143 trc->timestamp = BFA_TRC_TS(trcm); 142 trc->timestamp = BFA_TRC_TS(trcm);
144 dc_flush(trc);
145 143
146 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); 144 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
147 if (trcm->tail == trcm->head) 145 if (trcm->tail == trcm->head)
148 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); 146 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
149 dc_flush(trcm);
150} 147}
151 148
152#ifndef BFA_PERF_BUILD
153#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data)
154#else
155#define bfa_trc_fp(_trcp, _data)
156#endif
157
158/*
159 * @ BFA LOG interfaces
160 */
161#define bfa_assert(__cond) do { \
162 if (!(__cond)) { \
163 printk(KERN_ERR "assert(%s) failed at %s:%d\\n", \
164 #__cond, __FILE__, __LINE__); \
165 } \
166} while (0)
167
168#define bfa_sm_fault(__mod, __event) do { \ 149#define bfa_sm_fault(__mod, __event) do { \
169 bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \ 150 bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \
170 printk(KERN_ERR "Assertion failure: %s:%d: %d", \ 151 printk(KERN_ERR "Assertion failure: %s:%d: %d", \
171 __FILE__, __LINE__, (__event)); \ 152 __FILE__, __LINE__, (__event)); \
172} while (0) 153} while (0)
173 154
174#ifndef BFA_PERF_BUILD
175#define bfa_assert_fp(__cond) bfa_assert(__cond)
176#else
177#define bfa_assert_fp(__cond)
178#endif
179
180/* BFA queue definitions */ 155/* BFA queue definitions */
181#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) 156#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
182#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) 157#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
@@ -199,7 +174,6 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
199 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ 174 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
200 (struct list_head *) (_q); \ 175 (struct list_head *) (_q); \
201 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\ 176 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
202 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
203 } else { \ 177 } else { \
204 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ 178 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
205 } \ 179 } \
@@ -214,7 +188,6 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
214 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ 188 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
215 (struct list_head *) (_q); \ 189 (struct list_head *) (_q); \
216 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\ 190 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
217 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
218 } else { \ 191 } else { \
219 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ 192 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
220 } \ 193 } \
@@ -236,16 +209,6 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
236 return 0; 209 return 0;
237} 210}
238 211
239/*
240 * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
241 * consistent across modules)
242 */
243#ifndef BFA_PERF_BUILD
244#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
245#else
246#define BFA_Q_DBG_INIT(_qe)
247#endif
248
249#define bfa_q_is_on_q(_q, _qe) \ 212#define bfa_q_is_on_q(_q, _qe) \
250 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) 213 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
251 214
@@ -361,4 +324,43 @@ bfa_wc_wait(struct bfa_wc_s *wc)
361 bfa_wc_down(wc); 324 bfa_wc_down(wc);
362} 325}
363 326
327static inline void
328wwn2str(char *wwn_str, u64 wwn)
329{
330 union {
331 u64 wwn;
332 u8 byte[8];
333 } w;
334
335 w.wwn = wwn;
336 sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
337 w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
338 w.byte[6], w.byte[7]);
339}
340
341static inline void
342fcid2str(char *fcid_str, u32 fcid)
343{
344 union {
345 u32 fcid;
346 u8 byte[4];
347 } f;
348
349 f.fcid = fcid;
350 sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
351}
352
353#define bfa_swap_3b(_x) \
354 ((((_x) & 0xff) << 16) | \
355 ((_x) & 0x00ff00) | \
356 (((_x) & 0xff0000) >> 16))
357
358#ifndef __BIG_ENDIAN
359#define bfa_hton3b(_x) bfa_swap_3b(_x)
360#else
361#define bfa_hton3b(_x) (_x)
362#endif
363
364#define bfa_ntoh3b(_x) bfa_hton3b(_x)
365
364#endif /* __BFA_CS_H__ */ 366#endif /* __BFA_CS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index 4b5b9e35abb9..d85f93aea465 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -19,7 +19,7 @@
19#define __BFA_DEFS_H__ 19#define __BFA_DEFS_H__
20 20
21#include "bfa_fc.h" 21#include "bfa_fc.h"
22#include "bfa_os_inc.h" 22#include "bfad_drv.h"
23 23
24#define BFA_MFG_SERIALNUM_SIZE 11 24#define BFA_MFG_SERIALNUM_SIZE 11
25#define STRSZ(_n) (((_n) + 4) & ~3) 25#define STRSZ(_n) (((_n) + 4) & ~3)
@@ -446,8 +446,8 @@ enum bfa_boot_bootopt {
446 * Boot lun information. 446 * Boot lun information.
447 */ 447 */
448struct bfa_boot_bootlun_s { 448struct bfa_boot_bootlun_s {
449 wwn_t pwwn; /* port wwn of target */ 449 wwn_t pwwn; /* port wwn of target */
450 lun_t lun; /* 64-bit lun */ 450 struct scsi_lun lun; /* 64-bit lun */
451}; 451};
452#pragma pack() 452#pragma pack()
453 453
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index e24e9f7ca81f..648c84176722 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -34,8 +34,8 @@
34struct bfa_iocfc_intr_attr_s { 34struct bfa_iocfc_intr_attr_s {
35 u8 coalesce; /* enable/disable coalescing */ 35 u8 coalesce; /* enable/disable coalescing */
36 u8 rsvd[3]; 36 u8 rsvd[3];
37 u16 latency; /* latency in microseconds */ 37 __be16 latency; /* latency in microseconds */
38 u16 delay; /* delay in microseconds */ 38 __be16 delay; /* delay in microseconds */
39}; 39};
40 40
41/* 41/*
@@ -743,7 +743,7 @@ struct bfa_port_cfg_s {
743 u8 qos_enabled; /* qos enabled or not */ 743 u8 qos_enabled; /* qos enabled or not */
744 u8 cfg_hardalpa; /* is hard alpa configured */ 744 u8 cfg_hardalpa; /* is hard alpa configured */
745 u8 hardalpa; /* configured hard alpa */ 745 u8 hardalpa; /* configured hard alpa */
746 u16 maxfrsize; /* maximum frame size */ 746 __be16 maxfrsize; /* maximum frame size */
747 u8 rx_bbcredit; /* receive buffer credits */ 747 u8 rx_bbcredit; /* receive buffer credits */
748 u8 tx_bbcredit; /* transmit buffer credits */ 748 u8 tx_bbcredit; /* transmit buffer credits */
749 u8 ratelimit; /* ratelimit enabled or not */ 749 u8 ratelimit; /* ratelimit enabled or not */
@@ -843,7 +843,7 @@ struct bfa_fcport_fcf_s {
843 u8 fka_disabled; /* FKA is disabled */ 843 u8 fka_disabled; /* FKA is disabled */
844 u8 maxsz_verified; /* FCoE max size verified */ 844 u8 maxsz_verified; /* FCoE max size verified */
845 u8 fc_map[3]; /* FC map */ 845 u8 fc_map[3]; /* FC map */
846 u16 vlan; /* FCoE vlan tag/priority */ 846 __be16 vlan; /* FCoE vlan tag/priority */
847 u32 fka_adv_per; /* FIP ka advert. period */ 847 u32 fka_adv_per; /* FIP ka advert. period */
848 mac_t mac; /* FCF mac */ 848 mac_t mac; /* FCF mac */
849}; 849};
diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c
deleted file mode 100644
index 0222d7c88a9a..000000000000
--- a/drivers/scsi/bfa/bfa_drv.c
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include "bfa_modules.h"
19
20/*
21 * BFA module list terminated by NULL
22 */
23struct bfa_module_s *hal_mods[] = {
24 &hal_mod_sgpg,
25 &hal_mod_fcport,
26 &hal_mod_fcxp,
27 &hal_mod_lps,
28 &hal_mod_uf,
29 &hal_mod_rport,
30 &hal_mod_fcpim,
31 NULL
32};
33
34/*
35 * Message handlers for various modules.
36 */
37bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
38 bfa_isr_unhandled, /* NONE */
39 bfa_isr_unhandled, /* BFI_MC_IOC */
40 bfa_isr_unhandled, /* BFI_MC_DIAG */
41 bfa_isr_unhandled, /* BFI_MC_FLASH */
42 bfa_isr_unhandled, /* BFI_MC_CEE */
43 bfa_fcport_isr, /* BFI_MC_FCPORT */
44 bfa_isr_unhandled, /* BFI_MC_IOCFC */
45 bfa_isr_unhandled, /* BFI_MC_LL */
46 bfa_uf_isr, /* BFI_MC_UF */
47 bfa_fcxp_isr, /* BFI_MC_FCXP */
48 bfa_lps_isr, /* BFI_MC_LPS */
49 bfa_rport_isr, /* BFI_MC_RPORT */
50 bfa_itnim_isr, /* BFI_MC_ITNIM */
51 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
52 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
53 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
54 bfa_ioim_isr, /* BFI_MC_IOIM */
55 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
56 bfa_tskim_isr, /* BFI_MC_TSKIM */
57 bfa_isr_unhandled, /* BFI_MC_SBOOT */
58 bfa_isr_unhandled, /* BFI_MC_IPFC */
59 bfa_isr_unhandled, /* BFI_MC_PORT */
60 bfa_isr_unhandled, /* --------- */
61 bfa_isr_unhandled, /* --------- */
62 bfa_isr_unhandled, /* --------- */
63 bfa_isr_unhandled, /* --------- */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70};
71
72
73/*
74 * Message handlers for mailbox command classes
75 */
76bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
77 NULL,
78 NULL, /* BFI_MC_IOC */
79 NULL, /* BFI_MC_DIAG */
80 NULL, /* BFI_MC_FLASH */
81 NULL, /* BFI_MC_CEE */
82 NULL, /* BFI_MC_PORT */
83 bfa_iocfc_isr, /* BFI_MC_IOCFC */
84 NULL,
85};
86
87
88
89void
90bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
91{
92 struct bfa_port_s *port = &bfa->modules.port;
93 u32 dm_len;
94 u8 *dm_kva;
95 u64 dm_pa;
96
97 dm_len = bfa_port_meminfo();
98 dm_kva = bfa_meminfo_dma_virt(mi);
99 dm_pa = bfa_meminfo_dma_phys(mi);
100
101 memset(port, 0, sizeof(struct bfa_port_s));
102 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
103 bfa_port_mem_claim(port, dm_kva, dm_pa);
104
105 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
106 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
107}
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index e929d25b09e3..8e764fae8dc9 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -18,14 +18,12 @@
18#ifndef __BFA_FC_H__ 18#ifndef __BFA_FC_H__
19#define __BFA_FC_H__ 19#define __BFA_FC_H__
20 20
21#include "bfa_os_inc.h" 21#include "bfad_drv.h"
22 22
23typedef u64 wwn_t; 23typedef u64 wwn_t;
24typedef u64 lun_t;
25 24
26#define WWN_NULL (0) 25#define WWN_NULL (0)
27#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ 26#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
28#define FC_ALPA_MAX 128
29 27
30#pragma pack(1) 28#pragma pack(1)
31 29
@@ -40,7 +38,6 @@ struct mac_s { u8 mac[MAC_ADDRLEN]; };
40struct scsi_cdb_s { 38struct scsi_cdb_s {
41 u8 scsi_cdb[SCSI_MAX_CDBLEN]; 39 u8 scsi_cdb[SCSI_MAX_CDBLEN];
42}; 40};
43#define scsi_cdb_t struct scsi_cdb_s
44 41
45/* ------------------------------------------------------------ 42/* ------------------------------------------------------------
46 * SCSI status byte values 43 * SCSI status byte values
@@ -63,7 +60,7 @@ struct scsi_cdb_s {
63 * Fibre Channel Header Structure (FCHS) definition 60 * Fibre Channel Header Structure (FCHS) definition
64 */ 61 */
65struct fchs_s { 62struct fchs_s {
66#ifdef __BIGENDIAN 63#ifdef __BIG_ENDIAN
67 u32 routing:4; /* routing bits */ 64 u32 routing:4; /* routing bits */
68 u32 cat_info:4; /* category info */ 65 u32 cat_info:4; /* category info */
69#else 66#else
@@ -75,34 +72,19 @@ struct fchs_s {
75 u32 cs_ctl:8; /* class specific control */ 72 u32 cs_ctl:8; /* class specific control */
76 u32 s_id:24; /* source identifier */ 73 u32 s_id:24; /* source identifier */
77 74
78 u32 type:8; /* data structure type */ 75 u32 type:8; /* data structure type */
79 u32 f_ctl:24; /* initial frame control */ 76 u32 f_ctl:24; /* initial frame control */
80 77
81 u8 seq_id; /* sequence identifier */ 78 u8 seq_id; /* sequence identifier */
82 u8 df_ctl; /* data field control */ 79 u8 df_ctl; /* data field control */
83 u16 seq_cnt; /* sequence count */ 80 u16 seq_cnt; /* sequence count */
84 81
85 u16 ox_id; /* originator exchange ID */ 82 __be16 ox_id; /* originator exchange ID */
86 u16 rx_id; /* responder exchange ID */ 83 u16 rx_id; /* responder exchange ID */
87 84
88 u32 ro; /* relative offset */ 85 u32 ro; /* relative offset */
89}; 86};
90 87
91#define FC_SOF_LEN 4
92#define FC_EOF_LEN 4
93#define FC_CRC_LEN 4
94
95/*
96 * Fibre Channel BB_E Header Structure
97 */
98struct fcbbehs_s {
99 u16 ver_rsvd;
100 u32 rsvd[2];
101 u32 rsvd__sof;
102};
103
104#define FC_SEQ_ID_MAX 256
105
106/* 88/*
107 * routing bit definitions 89 * routing bit definitions
108 */ 90 */
@@ -149,22 +131,6 @@ enum {
149}; 131};
150 132
151/* 133/*
152 * information category for Link Control
153 */
154enum {
155 FC_CAT_ACK_1 = 0x00,
156 FC_CAT_ACK_0_N = 0x01,
157 FC_CAT_P_RJT = 0x02,
158 FC_CAT_F_RJT = 0x03,
159 FC_CAT_P_BSY = 0x04,
160 FC_CAT_F_BSY_DATA = 0x05,
161 FC_CAT_F_BSY_LINK_CTL = 0x06,
162 FC_CAT_F_LCR = 0x07,
163 FC_CAT_NTY = 0x08,
164 FC_CAT_END = 0x09,
165};
166
167/*
168 * Type Field Definitions. FC-PH Section 18.5 pg. 165 134 * Type Field Definitions. FC-PH Section 18.5 pg. 165
169 */ 135 */
170enum { 136enum {
@@ -182,10 +148,6 @@ enum {
182 FC_TYPE_MAX = 256, /* 256 FC-4 types */ 148 FC_TYPE_MAX = 256, /* 256 FC-4 types */
183}; 149};
184 150
185struct fc_fc4types_s {
186 u8 bits[FC_TYPE_MAX / 8];
187};
188
189/* 151/*
190 * Frame Control Definitions. FC-PH Table-45. pg. 168 152 * Frame Control Definitions. FC-PH Table-45. pg. 168
191 */ 153 */
@@ -288,7 +250,6 @@ enum {
288 FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */ 250 FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */
289 FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref 251 FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref
290 *FC-SP */ 252 *FC-SP */
291
292}; 253};
293 254
294/* 255/*
@@ -314,12 +275,12 @@ enum {
314 * FC-PH-x. Figure-76. pg. 308. 275 * FC-PH-x. Figure-76. pg. 308.
315 */ 276 */
316struct fc_plogi_csp_s { 277struct fc_plogi_csp_s {
317 u8 verhi; /* FC-PH high version */ 278 u8 verhi; /* FC-PH high version */
318 u8 verlo; /* FC-PH low version */ 279 u8 verlo; /* FC-PH low version */
319 u16 bbcred; /* BB_Credit */ 280 __be16 bbcred; /* BB_Credit */
320 281
321#ifdef __BIGENDIAN 282#ifdef __BIG_ENDIAN
322 u8 ciro:1, /* continuously increasing RO */ 283 u8 ciro:1, /* continuously increasing RO */
323 rro:1, /* random relative offset */ 284 rro:1, /* random relative offset */
324 npiv_supp:1, /* NPIV supported */ 285 npiv_supp:1, /* NPIV supported */
325 port_type:1, /* N_Port/F_port */ 286 port_type:1, /* N_Port/F_port */
@@ -328,7 +289,7 @@ struct fc_plogi_csp_s {
328 vvl_info:1, /* VVL Info included */ 289 vvl_info:1, /* VVL Info included */
329 reserved1:1; 290 reserved1:1;
330 291
331 u8 hg_supp:1, 292 u8 hg_supp:1,
332 query_dbc:1, 293 query_dbc:1,
333 security:1, 294 security:1,
334 sync_cap:1, 295 sync_cap:1,
@@ -337,7 +298,7 @@ struct fc_plogi_csp_s {
337 cisc:1, /* continuously increasing seq count */ 298 cisc:1, /* continuously increasing seq count */
338 payload:1; 299 payload:1;
339#else 300#else
340 u8 reserved2:2, 301 u8 reserved2:2,
341 resolution:1, /* ms/ns ED_TOV resolution */ 302 resolution:1, /* ms/ns ED_TOV resolution */
342 altbbcred:1, /* alternate BB_Credit */ 303 altbbcred:1, /* alternate BB_Credit */
343 port_type:1, /* N_Port/F_port */ 304 port_type:1, /* N_Port/F_port */
@@ -345,7 +306,7 @@ struct fc_plogi_csp_s {
345 rro:1, /* random relative offset */ 306 rro:1, /* random relative offset */
346 ciro:1; /* continuously increasing RO */ 307 ciro:1; /* continuously increasing RO */
347 308
348 u8 payload:1, 309 u8 payload:1,
349 cisc:1, /* continuously increasing seq count */ 310 cisc:1, /* continuously increasing seq count */
350 dh_dup_supp:1, 311 dh_dup_supp:1,
351 r_t_tov:1, 312 r_t_tov:1,
@@ -354,13 +315,10 @@ struct fc_plogi_csp_s {
354 query_dbc:1, 315 query_dbc:1,
355 hg_supp:1; 316 hg_supp:1;
356#endif 317#endif
357 318 __be16 rxsz; /* recieve data_field size */
358 u16 rxsz; /* recieve data_field size */ 319 __be16 conseq;
359 320 __be16 ro_bitmap;
360 u16 conseq; 321 __be32 e_d_tov;
361 u16 ro_bitmap;
362
363 u32 e_d_tov;
364}; 322};
365 323
366/* 324/*
@@ -368,12 +326,11 @@ struct fc_plogi_csp_s {
368 * FC-PH-x. Figure 78. pg. 318. 326 * FC-PH-x. Figure 78. pg. 318.
369 */ 327 */
370struct fc_plogi_clp_s { 328struct fc_plogi_clp_s {
371#ifdef __BIGENDIAN 329#ifdef __BIG_ENDIAN
372 u32 class_valid:1; 330 u32 class_valid:1;
373 u32 intermix:1; /* class intermix supported if set =1. 331 u32 intermix:1; /* class intermix supported if set =1.
374 * valid only for class1. Reserved for 332 * valid only for class1. Reserved for
375 * class2 & class3 333 * class2 & class3 */
376 */
377 u32 reserved1:2; 334 u32 reserved1:2;
378 u32 sequential:1; 335 u32 sequential:1;
379 u32 reserved2:3; 336 u32 reserved2:3;
@@ -382,12 +339,10 @@ struct fc_plogi_clp_s {
382 u32 sequential:1; 339 u32 sequential:1;
383 u32 reserved1:2; 340 u32 reserved1:2;
384 u32 intermix:1; /* class intermix supported if set =1. 341 u32 intermix:1; /* class intermix supported if set =1.
385 * valid only for class1. Reserved for 342 * valid only for class1. Reserved for
386 * class2 & class3 343 * class2 & class3 */
387 */
388 u32 class_valid:1; 344 u32 class_valid:1;
389#endif 345#endif
390
391 u32 reserved3:24; 346 u32 reserved3:24;
392 347
393 u32 reserved4:16; 348 u32 reserved4:16;
@@ -395,7 +350,7 @@ struct fc_plogi_clp_s {
395 350
396 u32 reserved5:8; 351 u32 reserved5:8;
397 u32 conseq:8; 352 u32 conseq:8;
398 u32 e2e_credit:16; /* end to end credit */ 353 u32 e2e_credit:16; /* end to end credit */
399 354
400 u32 reserved7:8; 355 u32 reserved7:8;
401 u32 ospx:8; 356 u32 ospx:8;
@@ -409,24 +364,24 @@ struct fc_plogi_clp_s {
409 * PLOGI els command and reply payload 364 * PLOGI els command and reply payload
410 */ 365 */
411struct fc_logi_s { 366struct fc_logi_s {
412 struct fc_els_cmd_s els_cmd; /* ELS command code */ 367 struct fc_els_cmd_s els_cmd; /* ELS command code */
413 struct fc_plogi_csp_s csp; /* common service params */ 368 struct fc_plogi_csp_s csp; /* common service params */
414 wwn_t port_name; 369 wwn_t port_name;
415 wwn_t node_name; 370 wwn_t node_name;
416 struct fc_plogi_clp_s class1; /* class 1 service parameters */ 371 struct fc_plogi_clp_s class1; /* class 1 service parameters */
417 struct fc_plogi_clp_s class2; /* class 2 service parameters */ 372 struct fc_plogi_clp_s class2; /* class 2 service parameters */
418 struct fc_plogi_clp_s class3; /* class 3 service parameters */ 373 struct fc_plogi_clp_s class3; /* class 3 service parameters */
419 struct fc_plogi_clp_s class4; /* class 4 service parameters */ 374 struct fc_plogi_clp_s class4; /* class 4 service parameters */
420 u8 vvl[16]; /* vendor version level */ 375 u8 vvl[16]; /* vendor version level */
421}; 376};
422 377
423/* 378/*
424 * LOGO els command payload 379 * LOGO els command payload
425 */ 380 */
426struct fc_logo_s { 381struct fc_logo_s {
427 struct fc_els_cmd_s els_cmd; /* ELS command code */ 382 struct fc_els_cmd_s els_cmd; /* ELS command code */
428 u32 res1:8; 383 u32 res1:8;
429 u32 nport_id:24; /* N_Port identifier of source */ 384 u32 nport_id:24; /* N_Port identifier of source */
430 wwn_t orig_port_name; /* Port name of the LOGO originator */ 385 wwn_t orig_port_name; /* Port name of the LOGO originator */
431}; 386};
432 387
@@ -435,12 +390,12 @@ struct fc_logo_s {
435 */ 390 */
436struct fc_adisc_s { 391struct fc_adisc_s {
437 struct fc_els_cmd_s els_cmd; /* ELS command code */ 392 struct fc_els_cmd_s els_cmd; /* ELS command code */
438 u32 res1:8; 393 u32 res1:8;
439 u32 orig_HA:24; /* originator hard address */ 394 u32 orig_HA:24; /* originator hard address */
440 wwn_t orig_port_name; /* originator port name */ 395 wwn_t orig_port_name; /* originator port name */
441 wwn_t orig_node_name; /* originator node name */ 396 wwn_t orig_node_name; /* originator node name */
442 u32 res2:8; 397 u32 res2:8;
443 u32 nport_id:24; /* originator NPortID */ 398 u32 nport_id:24; /* originator NPortID */
444}; 399};
445 400
446/* 401/*
@@ -466,7 +421,7 @@ struct fc_exch_status_blk_s {
466struct fc_res_s { 421struct fc_res_s {
467 struct fc_els_cmd_s els_cmd; /* ELS command code */ 422 struct fc_els_cmd_s els_cmd; /* ELS command code */
468 u32 res1:8; 423 u32 res1:8;
469 u32 nport_id:24; /* N_Port identifier of source */ 424 u32 nport_id:24; /* N_Port identifier of source */
470 u32 oxid:16; 425 u32 oxid:16;
471 u32 rxid:16; 426 u32 rxid:16;
472 u8 assoc_hdr[32]; 427 u8 assoc_hdr[32];
@@ -512,8 +467,8 @@ struct fc_rec_acc_s {
512 u32 orig_id:24; /* N_Port id of exchange originator */ 467 u32 orig_id:24; /* N_Port id of exchange originator */
513 u32 res2:8; 468 u32 res2:8;
514 u32 resp_id:24; /* N_Port id of exchange responder */ 469 u32 resp_id:24; /* N_Port id of exchange responder */
515 u32 count; /* data transfer count */ 470 u32 count; /* data transfer count */
516 u32 e_stat; /* exchange status */ 471 u32 e_stat; /* exchange status */
517}; 472};
518 473
519/* 474/*
@@ -533,7 +488,7 @@ struct fc_rsi_s {
533 */ 488 */
534struct fc_prli_params_s { 489struct fc_prli_params_s {
535 u32 reserved:16; 490 u32 reserved:16;
536#ifdef __BIGENDIAN 491#ifdef __BIG_ENDIAN
537 u32 reserved1:5; 492 u32 reserved1:5;
538 u32 rec_support:1; 493 u32 rec_support:1;
539 u32 task_retry_id:1; 494 u32 task_retry_id:1;
@@ -575,7 +530,7 @@ enum {
575struct fc_prli_params_page_s { 530struct fc_prli_params_page_s {
576 u32 type:8; 531 u32 type:8;
577 u32 codext:8; 532 u32 codext:8;
578#ifdef __BIGENDIAN 533#ifdef __BIG_ENDIAN
579 u32 origprocasv:1; 534 u32 origprocasv:1;
580 u32 rsppav:1; 535 u32 rsppav:1;
581 u32 imagepair:1; 536 u32 imagepair:1;
@@ -611,18 +566,14 @@ struct fc_prli_s {
611struct fc_prlo_params_page_s { 566struct fc_prlo_params_page_s {
612 u32 type:8; 567 u32 type:8;
613 u32 type_ext:8; 568 u32 type_ext:8;
614#ifdef __BIGENDIAN 569#ifdef __BIG_ENDIAN
615 u32 opa_valid:1; /* originator process associator 570 u32 opa_valid:1; /* originator process associator valid */
616 * valid
617 */
618 u32 rpa_valid:1; /* responder process associator valid */ 571 u32 rpa_valid:1; /* responder process associator valid */
619 u32 res1:14; 572 u32 res1:14;
620#else 573#else
621 u32 res1:14; 574 u32 res1:14;
622 u32 rpa_valid:1; /* responder process associator valid */ 575 u32 rpa_valid:1; /* responder process associator valid */
623 u32 opa_valid:1; /* originator process associator 576 u32 opa_valid:1; /* originator process associator valid */
624 * valid
625 */
626#endif 577#endif
627 u32 orig_process_assc; 578 u32 orig_process_assc;
628 u32 resp_process_assc; 579 u32 resp_process_assc;
@@ -647,18 +598,14 @@ struct fc_prlo_acc_params_page_s {
647 u32 type:8; 598 u32 type:8;
648 u32 type_ext:8; 599 u32 type_ext:8;
649 600
650#ifdef __BIGENDIAN 601#ifdef __BIG_ENDIAN
651 u32 opa_valid:1; /* originator process associator 602 u32 opa_valid:1; /* originator process associator valid */
652 * valid
653 */
654 u32 rpa_valid:1; /* responder process associator valid */ 603 u32 rpa_valid:1; /* responder process associator valid */
655 u32 res1:14; 604 u32 res1:14;
656#else 605#else
657 u32 res1:14; 606 u32 res1:14;
658 u32 rpa_valid:1; /* responder process associator valid */ 607 u32 rpa_valid:1; /* responder process associator valid */
659 u32 opa_valid:1; /* originator process associator 608 u32 opa_valid:1; /* originator process associator valid */
660 * valid
661 */
662#endif 609#endif
663 u32 orig_process_assc; 610 u32 orig_process_assc;
664 u32 resp_process_assc; 611 u32 resp_process_assc;
@@ -715,9 +662,9 @@ enum {
715 * LS_RJT els reply payload 662 * LS_RJT els reply payload
716 */ 663 */
717struct fc_ls_rjt_s { 664struct fc_ls_rjt_s {
718 struct fc_els_cmd_s els_cmd; /* ELS command code */ 665 struct fc_els_cmd_s els_cmd; /* ELS command code */
719 u32 res1:8; 666 u32 res1:8;
720 u32 reason_code:8; /* Reason code for reject */ 667 u32 reason_code:8; /* Reason code for reject */
721 u32 reason_code_expl:8; /* Reason code explanation */ 668 u32 reason_code_expl:8; /* Reason code explanation */
722 u32 vendor_unique:8; /* Vendor specific */ 669 u32 vendor_unique:8; /* Vendor specific */
723}; 670};
@@ -779,12 +726,12 @@ struct fc_rrq_s {
779 */ 726 */
780struct fc_ba_acc_s { 727struct fc_ba_acc_s {
781 u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */ 728 u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */
782 u32 seq_id:8; /* invalid for Abort Exchange */ 729 u32 seq_id:8; /* invalid for Abort Exchange */
783 u32 res2:16; 730 u32 res2:16;
784 u32 ox_id:16; /* OX_ID from ABTS frame */ 731 u32 ox_id:16; /* OX_ID from ABTS frame */
785 u32 rx_id:16; /* RX_ID from ABTS frame */ 732 u32 rx_id:16; /* RX_ID from ABTS frame */
786 u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */ 733 u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */
787 u32 high_seq_cnt:16;/* set to 0xFFFF for Abort Exchange */ 734 u32 high_seq_cnt:16; /* set to 0xFFFF for Abort Exchange */
788}; 735};
789 736
790/* 737/*
@@ -794,17 +741,17 @@ struct fc_ba_rjt_s {
794 u32 res1:8; /* Reserved */ 741 u32 res1:8; /* Reserved */
795 u32 reason_code:8; /* reason code for reject */ 742 u32 reason_code:8; /* reason code for reject */
796 u32 reason_expl:8; /* reason code explanation */ 743 u32 reason_expl:8; /* reason code explanation */
797 u32 vendor_unique:8;/* vendor unique reason code,set to 0 */ 744 u32 vendor_unique:8; /* vendor unique reason code,set to 0 */
798}; 745};
799 746
800/* 747/*
801 * TPRLO logout parameter page 748 * TPRLO logout parameter page
802 */ 749 */
803struct fc_tprlo_params_page_s { 750struct fc_tprlo_params_page_s {
804u32 type:8; 751 u32 type:8;
805u32 type_ext:8; 752 u32 type_ext:8;
806 753
807#ifdef __BIGENDIAN 754#ifdef __BIG_ENDIAN
808 u32 opa_valid:1; 755 u32 opa_valid:1;
809 u32 rpa_valid:1; 756 u32 rpa_valid:1;
810 u32 tpo_nport_valid:1; 757 u32 tpo_nport_valid:1;
@@ -864,16 +811,16 @@ enum fc_rscn_format {
864}; 811};
865 812
866struct fc_rscn_event_s { 813struct fc_rscn_event_s {
867 u32 format:2; 814 u32 format:2;
868 u32 qualifier:4; 815 u32 qualifier:4;
869 u32 resvd:2; 816 u32 resvd:2;
870 u32 portid:24; 817 u32 portid:24;
871}; 818};
872 819
873struct fc_rscn_pl_s { 820struct fc_rscn_pl_s {
874 u8 command; 821 u8 command;
875 u8 pagelen; 822 u8 pagelen;
876 u16 payldlen; 823 __be16 payldlen;
877 struct fc_rscn_event_s event[1]; 824 struct fc_rscn_event_s event[1];
878}; 825};
879 826
@@ -887,7 +834,6 @@ struct fc_echo_s {
887/* 834/*
888 * RNID els command 835 * RNID els command
889 */ 836 */
890
891#define RNID_NODEID_DATA_FORMAT_COMMON 0x00 837#define RNID_NODEID_DATA_FORMAT_COMMON 0x00
892#define RNID_NODEID_DATA_FORMAT_FCP3 0x08 838#define RNID_NODEID_DATA_FORMAT_FCP3 0x08
893#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF 839#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF
@@ -920,15 +866,15 @@ struct fc_rnid_cmd_s {
920 */ 866 */
921 867
922struct fc_rnid_common_id_data_s { 868struct fc_rnid_common_id_data_s {
923 wwn_t port_name; 869 wwn_t port_name;
924 wwn_t node_name; 870 wwn_t node_name;
925}; 871};
926 872
927struct fc_rnid_general_topology_data_s { 873struct fc_rnid_general_topology_data_s {
928 u32 vendor_unique[4]; 874 u32 vendor_unique[4];
929 u32 asso_type; 875 __be32 asso_type;
930 u32 phy_port_num; 876 u32 phy_port_num;
931 u32 num_attached_nodes; 877 __be32 num_attached_nodes;
932 u32 node_mgmt:8; 878 u32 node_mgmt:8;
933 u32 ip_version:8; 879 u32 ip_version:8;
934 u32 udp_tcp_port_num:16; 880 u32 udp_tcp_port_num:16;
@@ -980,59 +926,17 @@ enum fc_rpsc_op_speed {
980 RPSC_OP_SPEED_8G = 0x0800, 926 RPSC_OP_SPEED_8G = 0x0800,
981 RPSC_OP_SPEED_16G = 0x0400, 927 RPSC_OP_SPEED_16G = 0x0400,
982 928
983 RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */ 929 RPSC_OP_SPEED_NOT_EST = 0x0001, /* speed not established */
984}; 930};
985 931
986struct fc_rpsc_speed_info_s { 932struct fc_rpsc_speed_info_s {
987 u16 port_speed_cap; /*! see enum fc_rpsc_speed_cap */ 933 __be16 port_speed_cap; /* see enum fc_rpsc_speed_cap */
988 u16 port_op_speed; /*! see enum fc_rpsc_op_speed */ 934 __be16 port_op_speed; /* see enum fc_rpsc_op_speed */
989};
990
991enum link_e2e_beacon_subcmd {
992 LINK_E2E_BEACON_ON = 1,
993 LINK_E2E_BEACON_OFF = 2
994};
995
996enum beacon_type {
997 BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */
998 BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */
999 BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */
1000};
1001
1002struct link_e2e_beacon_param_s {
1003 u8 beacon_type; /* Beacon Type. See enum beacon_type */
1004 u8 beacon_frequency;
1005 /* Beacon frequency. Number of blinks
1006 * per 10 seconds
1007 */
1008 u16 beacon_duration;/* Beacon duration (in Seconds). The
1009 * command operation should be
1010 * terminated at the end of this
1011 * timeout value.
1012 *
1013 * Ignored if diag_sub_cmd is
1014 * LINK_E2E_BEACON_OFF.
1015 *
1016 * If 0, beaconing will continue till a
1017 * BEACON OFF request is received
1018 */
1019};
1020
1021/*
1022 * Link E2E beacon request/good response format.
1023 * For LS_RJTs use struct fc_ls_rjt_s
1024 */
1025struct link_e2e_beacon_req_s {
1026 u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests *
1027 *or FC_ELS_ACC in good replies */
1028 u32 ls_sub_cmd; /*! See enum link_e2e_beacon_subcmd */
1029 struct link_e2e_beacon_param_s beacon_parm;
1030}; 935};
1031 936
1032/* 937/*
1033 * If RPSC request is sent to the Domain Controller, the request is for 938 * If RPSC request is sent to the Domain Controller, the request is for
1034 * all the ports within that domain (TODO - I don't think FOS implements 939 * all the ports within that domain.
1035 * this...).
1036 */ 940 */
1037struct fc_rpsc_cmd_s { 941struct fc_rpsc_cmd_s {
1038 struct fc_els_cmd_s els_cmd; 942 struct fc_els_cmd_s els_cmd;
@@ -1056,9 +960,9 @@ struct fc_rpsc_acc_s {
1056 960
1057struct fc_rpsc2_cmd_s { 961struct fc_rpsc2_cmd_s {
1058 struct fc_els_cmd_s els_cmd; 962 struct fc_els_cmd_s els_cmd;
1059 u32 token; 963 __be32 token;
1060 u16 resvd; 964 u16 resvd;
1061 u16 num_pids; /* Number of pids in the request */ 965 __be16 num_pids; /* Number of pids in the request */
1062 struct { 966 struct {
1063 u32 rsvd1:8; 967 u32 rsvd1:8;
1064 u32 pid:24; /* port identifier */ 968 u32 pid:24; /* port identifier */
@@ -1072,16 +976,17 @@ enum fc_rpsc2_port_type {
1072 RPSC2_PORT_TYPE_NPIV_PORT = 0x5f, 976 RPSC2_PORT_TYPE_NPIV_PORT = 0x5f,
1073 RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f, 977 RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f,
1074}; 978};
979
1075/* 980/*
1076 * RPSC2 portInfo entry structure 981 * RPSC2 portInfo entry structure
1077 */ 982 */
1078struct fc_rpsc2_port_info_s { 983struct fc_rpsc2_port_info_s {
1079 u32 pid; /* PID */ 984 __be32 pid; /* PID */
1080 u16 resvd1; 985 u16 resvd1;
1081 u16 index; /* port number / index */ 986 __be16 index; /* port number / index */
1082 u8 resvd2; 987 u8 resvd2;
1083 u8 type; /* port type N/NL/... */ 988 u8 type; /* port type N/NL/... */
1084 u16 speed; /* port Operating Speed */ 989 __be16 speed; /* port Operating Speed */
1085}; 990};
1086 991
1087/* 992/*
@@ -1090,8 +995,8 @@ struct fc_rpsc2_port_info_s {
1090struct fc_rpsc2_acc_s { 995struct fc_rpsc2_acc_s {
1091 u8 els_cmd; 996 u8 els_cmd;
1092 u8 resvd; 997 u8 resvd;
1093 u16 num_pids; /* Number of pids in the request */ 998 __be16 num_pids; /* Number of pids in the request */
1094 struct fc_rpsc2_port_info_s port_info[1]; /* port information */ 999 struct fc_rpsc2_port_info_s port_info[1]; /* port information */
1095}; 1000};
1096 1001
1097/* 1002/*
@@ -1110,18 +1015,14 @@ struct fc_symname_s {
1110 u8 symname[FC_SYMNAME_MAX]; 1015 u8 symname[FC_SYMNAME_MAX];
1111}; 1016};
1112 1017
1113struct fc_alpabm_s {
1114 u8 alpa_bm[FC_ALPA_MAX / 8];
1115};
1116
1117/* 1018/*
1118 * protocol default timeout values 1019 * protocol default timeout values
1119 */ 1020 */
1120#define FC_ED_TOV 2 1021#define FC_ED_TOV 2
1121#define FC_REC_TOV (FC_ED_TOV + 1) 1022#define FC_REC_TOV (FC_ED_TOV + 1)
1122#define FC_RA_TOV 10 1023#define FC_RA_TOV 10
1123#define FC_ELS_TOV (2 * FC_RA_TOV) 1024#define FC_ELS_TOV (2 * FC_RA_TOV)
1124#define FC_FCCT_TOV (3 * FC_RA_TOV) 1025#define FC_FCCT_TOV (3 * FC_RA_TOV)
1125 1026
1126/* 1027/*
1127 * virtual fabric related defines 1028 * virtual fabric related defines
@@ -1157,50 +1058,34 @@ enum {
1157}; 1058};
1158 1059
1159/* 1060/*
1160 * SRR FC-4 LS payload
1161 */
1162struct fc_srr_s {
1163 u32 ls_cmd;
1164 u32 ox_id:16; /* ox-id */
1165 u32 rx_id:16; /* rx-id */
1166 u32 ro; /* relative offset */
1167 u32 r_ctl:8; /* R_CTL for I.U. */
1168 u32 res:24;
1169};
1170
1171
1172/*
1173 * FCP_CMND definitions 1061 * FCP_CMND definitions
1174 */ 1062 */
1175#define FCP_CMND_CDB_LEN 16 1063#define FCP_CMND_CDB_LEN 16
1176#define FCP_CMND_LUN_LEN 8 1064#define FCP_CMND_LUN_LEN 8
1177 1065
1178struct fcp_cmnd_s { 1066struct fcp_cmnd_s {
1179 lun_t lun; /* 64-bit LU number */ 1067 struct scsi_lun lun; /* 64-bit LU number */
1180 u8 crn; /* command reference number */ 1068 u8 crn; /* command reference number */
1181#ifdef __BIGENDIAN 1069#ifdef __BIG_ENDIAN
1182 u8 resvd:1, 1070 u8 resvd:1,
1183 priority:4, /* FCP-3: SAM-3 priority */ 1071 priority:4, /* FCP-3: SAM-3 priority */
1184 taskattr:3; /* scsi task attribute */ 1072 taskattr:3; /* scsi task attribute */
1185#else 1073#else
1186 u8 taskattr:3, /* scsi task attribute */ 1074 u8 taskattr:3, /* scsi task attribute */
1187 priority:4, /* FCP-3: SAM-3 priority */ 1075 priority:4, /* FCP-3: SAM-3 priority */
1188 resvd:1; 1076 resvd:1;
1189#endif 1077#endif
1190 u8 tm_flags; /* task management flags */ 1078 u8 tm_flags; /* task management flags */
1191#ifdef __BIGENDIAN 1079#ifdef __BIG_ENDIAN
1192 u8 addl_cdb_len:6, /* additional CDB length words */ 1080 u8 addl_cdb_len:6, /* additional CDB length words */
1193 iodir:2; /* read/write FCP_DATA IUs */ 1081 iodir:2; /* read/write FCP_DATA IUs */
1194#else 1082#else
1195 u8 iodir:2, /* read/write FCP_DATA IUs */ 1083 u8 iodir:2, /* read/write FCP_DATA IUs */
1196 addl_cdb_len:6; /* additional CDB length */ 1084 addl_cdb_len:6; /* additional CDB length */
1197#endif 1085#endif
1198 scsi_cdb_t cdb; 1086 struct scsi_cdb_s cdb;
1199 1087
1200 /* 1088 __be32 fcp_dl; /* bytes to be transferred */
1201 * !!! additional cdb bytes follows here!!!
1202 */
1203 u32 fcp_dl; /* bytes to be transferred */
1204}; 1089};
1205 1090
1206#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN) 1091#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
@@ -1210,21 +1095,10 @@ struct fcp_cmnd_s {
1210 * struct fcp_cmnd_s .iodir field values 1095 * struct fcp_cmnd_s .iodir field values
1211 */ 1096 */
1212enum fcp_iodir { 1097enum fcp_iodir {
1213 FCP_IODIR_NONE = 0, 1098 FCP_IODIR_NONE = 0,
1214 FCP_IODIR_WRITE = 1, 1099 FCP_IODIR_WRITE = 1,
1215 FCP_IODIR_READ = 2, 1100 FCP_IODIR_READ = 2,
1216 FCP_IODIR_RW = 3, 1101 FCP_IODIR_RW = 3,
1217};
1218
1219/*
1220 * Task attribute field
1221 */
1222enum {
1223 FCP_TASK_ATTR_SIMPLE = 0,
1224 FCP_TASK_ATTR_HOQ = 1,
1225 FCP_TASK_ATTR_ORDERED = 2,
1226 FCP_TASK_ATTR_ACA = 4,
1227 FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */
1228}; 1102};
1229 1103
1230/* 1104/*
@@ -1239,58 +1113,40 @@ enum fcp_tm_cmnd {
1239}; 1113};
1240 1114
1241/* 1115/*
1242 * FCP_XFER_RDY IU defines
1243 */
1244struct fcp_xfer_rdy_s {
1245 u32 data_ro;
1246 u32 burst_len;
1247 u32 reserved;
1248};
1249
1250/*
1251 * FCP_RSP residue flags 1116 * FCP_RSP residue flags
1252 */ 1117 */
1253enum fcp_residue { 1118enum fcp_residue {
1254 FCP_NO_RESIDUE = 0, /* no residue */ 1119 FCP_NO_RESIDUE = 0, /* no residue */
1255 FCP_RESID_OVER = 1, /* more data left that was not sent */ 1120 FCP_RESID_OVER = 1, /* more data left that was not sent */
1256 FCP_RESID_UNDER = 2, /* less data than requested */ 1121 FCP_RESID_UNDER = 2, /* less data than requested */
1257};
1258
1259enum {
1260 FCP_RSPINFO_GOOD = 0,
1261 FCP_RSPINFO_DATALEN_MISMATCH = 1,
1262 FCP_RSPINFO_CMND_INVALID = 2,
1263 FCP_RSPINFO_ROLEN_MISMATCH = 3,
1264 FCP_RSPINFO_TM_NOT_SUPP = 4,
1265 FCP_RSPINFO_TM_FAILED = 5,
1266}; 1122};
1267 1123
1268struct fcp_rspinfo_s { 1124struct fcp_rspinfo_s {
1269 u32 res0:24; 1125 u32 res0:24;
1270 u32 rsp_code:8; /* response code (as above) */ 1126 u32 rsp_code:8; /* response code (as above) */
1271 u32 res1; 1127 u32 res1;
1272}; 1128};
1273 1129
1274struct fcp_resp_s { 1130struct fcp_resp_s {
1275 u32 reserved[2]; /* 2 words reserved */ 1131 u32 reserved[2]; /* 2 words reserved */
1276 u16 reserved2; 1132 u16 reserved2;
1277#ifdef __BIGENDIAN 1133#ifdef __BIG_ENDIAN
1278 u8 reserved3:3; 1134 u8 reserved3:3;
1279 u8 fcp_conf_req:1; /* FCP_CONF is requested */ 1135 u8 fcp_conf_req:1; /* FCP_CONF is requested */
1280 u8 resid_flags:2; /* underflow/overflow */ 1136 u8 resid_flags:2; /* underflow/overflow */
1281 u8 sns_len_valid:1;/* sense len is valid */ 1137 u8 sns_len_valid:1; /* sense len is valid */
1282 u8 rsp_len_valid:1;/* response len is valid */ 1138 u8 rsp_len_valid:1; /* response len is valid */
1283#else 1139#else
1284 u8 rsp_len_valid:1;/* response len is valid */ 1140 u8 rsp_len_valid:1; /* response len is valid */
1285 u8 sns_len_valid:1;/* sense len is valid */ 1141 u8 sns_len_valid:1; /* sense len is valid */
1286 u8 resid_flags:2; /* underflow/overflow */ 1142 u8 resid_flags:2; /* underflow/overflow */
1287 u8 fcp_conf_req:1; /* FCP_CONF is requested */ 1143 u8 fcp_conf_req:1; /* FCP_CONF is requested */
1288 u8 reserved3:3; 1144 u8 reserved3:3;
1289#endif 1145#endif
1290 u8 scsi_status; /* one byte SCSI status */ 1146 u8 scsi_status; /* one byte SCSI status */
1291 u32 residue; /* residual data bytes */ 1147 u32 residue; /* residual data bytes */
1292 u32 sns_len; /* length od sense info */ 1148 u32 sns_len; /* length od sense info */
1293 u32 rsp_len; /* length of response info */ 1149 u32 rsp_len; /* length of response info */
1294}; 1150};
1295 1151
1296#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \ 1152#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \
@@ -1300,12 +1156,6 @@ struct fcp_resp_s {
1300#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1)) 1156#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1))
1301#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \ 1157#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \
1302 fcp_rsplen(__fcprsp)) 1158 fcp_rsplen(__fcprsp))
1303
1304struct fcp_cmnd_fr_s {
1305 struct fchs_s fchs;
1306 struct fcp_cmnd_s fcp;
1307};
1308
1309/* 1159/*
1310 * CT 1160 * CT
1311 */ 1161 */
@@ -1379,7 +1229,7 @@ enum {
1379 CT_RSN_LOGICAL_BUSY = 0x05, 1229 CT_RSN_LOGICAL_BUSY = 0x05,
1380 CT_RSN_PROTO_ERR = 0x07, 1230 CT_RSN_PROTO_ERR = 0x07,
1381 CT_RSN_UNABLE_TO_PERF = 0x09, 1231 CT_RSN_UNABLE_TO_PERF = 0x09,
1382 CT_RSN_NOT_SUPP = 0x0B, 1232 CT_RSN_NOT_SUPP = 0x0B,
1383 CT_RSN_SERVER_NOT_AVBL = 0x0D, 1233 CT_RSN_SERVER_NOT_AVBL = 0x0D,
1384 CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E, 1234 CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
1385 CT_RSN_VENDOR_SPECIFIC = 0xFF, 1235 CT_RSN_VENDOR_SPECIFIC = 0xFF,
@@ -1419,10 +1269,10 @@ enum {
1419 * defintions for the explanation code for all servers 1269 * defintions for the explanation code for all servers
1420 */ 1270 */
1421enum { 1271enum {
1422 CT_EXP_AUTH_EXCEPTION = 0xF1, 1272 CT_EXP_AUTH_EXCEPTION = 0xF1,
1423 CT_EXP_DB_FULL = 0xF2, 1273 CT_EXP_DB_FULL = 0xF2,
1424 CT_EXP_DB_EMPTY = 0xF3, 1274 CT_EXP_DB_EMPTY = 0xF3,
1425 CT_EXP_PROCESSING_REQ = 0xF4, 1275 CT_EXP_PROCESSING_REQ = 0xF4,
1426 CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5, 1276 CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5,
1427 CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6 1277 CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6
1428}; 1278};
@@ -1446,7 +1296,7 @@ enum {
1446 GS_RFF_ID = 0x021F, /* Register FC4 Feature */ 1296 GS_RFF_ID = 0x021F, /* Register FC4 Feature */
1447}; 1297};
1448 1298
1449struct fcgs_id_req_s{ 1299struct fcgs_id_req_s {
1450 u32 rsvd:8; 1300 u32 rsvd:8;
1451 u32 dap:24; /* port identifier */ 1301 u32 dap:24; /* port identifier */
1452}; 1302};
@@ -1460,7 +1310,7 @@ struct fcgs_gidpn_req_s {
1460 1310
1461struct fcgs_gidpn_resp_s { 1311struct fcgs_gidpn_resp_s {
1462 u32 rsvd:8; 1312 u32 rsvd:8;
1463 u32 dap:24; /* port identifier */ 1313 u32 dap:24; /* port identifier */
1464}; 1314};
1465 1315
1466/* 1316/*
@@ -1469,22 +1319,21 @@ struct fcgs_gidpn_resp_s {
1469struct fcgs_rftid_req_s { 1319struct fcgs_rftid_req_s {
1470 u32 rsvd:8; 1320 u32 rsvd:8;
1471 u32 dap:24; /* port identifier */ 1321 u32 dap:24; /* port identifier */
1472 u32 fc4_type[8]; /* fc4 types */ 1322 __be32 fc4_type[8]; /* fc4 types */
1473}; 1323};
1474 1324
1475/* 1325/*
1476 * RFF_ID : Register FC4 features. 1326 * RFF_ID : Register FC4 features.
1477 */ 1327 */
1478
1479#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02 1328#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02
1480#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 1329#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01
1481 1330
1482struct fcgs_rffid_req_s { 1331struct fcgs_rffid_req_s {
1483 u32 rsvd:8; 1332 u32 rsvd:8;
1484 u32 dap:24; /* port identifier */ 1333 u32 dap:24; /* port identifier */
1485 u32 rsvd1:16; 1334 u32 rsvd1:16;
1486 u32 fc4ftr_bits:8; /* fc4 feature bits */ 1335 u32 fc4ftr_bits:8; /* fc4 feature bits */
1487 u32 fc4_type:8; /* corresponding FC4 Type */ 1336 u32 fc4_type:8; /* corresponding FC4 Type */
1488}; 1337};
1489 1338
1490/* 1339/*
@@ -1495,16 +1344,16 @@ struct fcgs_gidft_req_s {
1495 u8 domain_id; /* domain, 0 - all fabric */ 1344 u8 domain_id; /* domain, 0 - all fabric */
1496 u8 area_id; /* area, 0 - whole domain */ 1345 u8 area_id; /* area, 0 - whole domain */
1497 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ 1346 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
1498}; /* GID_FT Request */ 1347};
1499 1348
1500/* 1349/*
1501 * GID_FT Response 1350 * GID_FT Response
1502 */ 1351 */
1503struct fcgs_gidft_resp_s { 1352struct fcgs_gidft_resp_s {
1504 u8 last:1; /* last port identifier flag */ 1353 u8 last:1; /* last port identifier flag */
1505 u8 reserved:7; 1354 u8 reserved:7;
1506 u32 pid:24; /* port identifier */ 1355 u32 pid:24; /* port identifier */
1507}; /* GID_FT Response */ 1356};
1508 1357
1509/* 1358/*
1510 * RSPN_ID 1359 * RSPN_ID
@@ -1512,8 +1361,8 @@ struct fcgs_gidft_resp_s {
1512struct fcgs_rspnid_req_s { 1361struct fcgs_rspnid_req_s {
1513 u32 rsvd:8; 1362 u32 rsvd:8;
1514 u32 dap:24; /* port identifier */ 1363 u32 dap:24; /* port identifier */
1515 u8 spn_len; /* symbolic port name length */ 1364 u8 spn_len; /* symbolic port name length */
1516 u8 spn[256]; /* symbolic port name */ 1365 u8 spn[256]; /* symbolic port name */
1517}; 1366};
1518 1367
1519/* 1368/*
@@ -1522,7 +1371,7 @@ struct fcgs_rspnid_req_s {
1522struct fcgs_rpnid_req_s { 1371struct fcgs_rpnid_req_s {
1523 u32 rsvd:8; 1372 u32 rsvd:8;
1524 u32 port_id:24; 1373 u32 port_id:24;
1525 wwn_t port_name; 1374 wwn_t port_name;
1526}; 1375};
1527 1376
1528/* 1377/*
@@ -1531,7 +1380,7 @@ struct fcgs_rpnid_req_s {
1531struct fcgs_rnnid_req_s { 1380struct fcgs_rnnid_req_s {
1532 u32 rsvd:8; 1381 u32 rsvd:8;
1533 u32 port_id:24; 1382 u32 port_id:24;
1534 wwn_t node_name; 1383 wwn_t node_name;
1535}; 1384};
1536 1385
1537/* 1386/*
@@ -1565,8 +1414,8 @@ struct fcgs_ganxt_req_s {
1565 * GA_NXT Response 1414 * GA_NXT Response
1566 */ 1415 */
1567struct fcgs_ganxt_rsp_s { 1416struct fcgs_ganxt_rsp_s {
1568 u32 port_type:8; /* Port Type */ 1417 u32 port_type:8; /* Port Type */
1569 u32 port_id:24; /* Port Identifier */ 1418 u32 port_id:24; /* Port Identifier */
1570 wwn_t port_name; /* Port Name */ 1419 wwn_t port_name; /* Port Name */
1571 u8 spn_len; /* Length of Symbolic Port Name */ 1420 u8 spn_len; /* Length of Symbolic Port Name */
1572 char spn[255]; /* Symbolic Port Name */ 1421 char spn[255]; /* Symbolic Port Name */
@@ -1575,19 +1424,14 @@ struct fcgs_ganxt_rsp_s {
1575 char snn[255]; /* Symbolic Node Name */ 1424 char snn[255]; /* Symbolic Node Name */
1576 u8 ipa[8]; /* Initial Process Associator */ 1425 u8 ipa[8]; /* Initial Process Associator */
1577 u8 ip[16]; /* IP Address */ 1426 u8 ip[16]; /* IP Address */
1578 u32 cos; /* Class of Service */ 1427 u32 cos; /* Class of Service */
1579 u32 fc4types[8]; /* FC-4 TYPEs */ 1428 u32 fc4types[8]; /* FC-4 TYPEs */
1580 wwn_t fabric_port_name; 1429 wwn_t fabric_port_name; /* Fabric Port Name */
1581 /* Fabric Port Name */ 1430 u32 rsvd:8; /* Reserved */
1582 u32 rsvd:8; /* Reserved */ 1431 u32 hard_addr:24; /* Hard Address */
1583 u32 hard_addr:24; /* Hard Address */
1584}; 1432};
1585 1433
1586/* 1434/*
1587 * Fabric Config Server
1588 */
1589
1590/*
1591 * Command codes for Fabric Configuration Server 1435 * Command codes for Fabric Configuration Server
1592 */ 1436 */
1593enum { 1437enum {
@@ -1598,159 +1442,9 @@ enum {
1598}; 1442};
1599 1443
1600/* 1444/*
1601 * Source or Destination Port Tags.
1602 */
1603enum {
1604 GS_FTRACE_TAG_NPORT_ID = 1,
1605 GS_FTRACE_TAG_NPORT_NAME = 2,
1606};
1607
1608/*
1609* Port Value : Could be a Port id or wwn
1610 */
1611union fcgs_port_val_u {
1612 u32 nport_id;
1613 wwn_t nport_wwn;
1614};
1615
1616#define GS_FTRACE_MAX_HOP_COUNT 20
1617#define GS_FTRACE_REVISION 1
1618
1619/*
1620 * Ftrace Related Structures.
1621 */
1622
1623/*
1624 * STR (Switch Trace) Reject Reason Codes. From FC-SW.
1625 */
1626enum {
1627 GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0,
1628 GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH,
1629 GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH,
1630 GS_FTRACE_STR_MAX_HOP_CNT_REACHED,
1631 GS_FTRACE_STR_SRC_PORT_NOT_FOUND,
1632 GS_FTRACE_STR_DST_PORT_NOT_FOUND,
1633 GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE,
1634 GS_FTRACE_STR_NO_ROUTE_BW_PORTS,
1635 GS_FTRACE_STR_NO_ADDL_EXPLN,
1636 GS_FTRACE_STR_FABRIC_BUSY,
1637 GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS,
1638 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0,
1639 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff,
1640};
1641
1642/*
1643 * Ftrace Request
1644 */
1645struct fcgs_ftrace_req_s {
1646 u32 revision;
1647 u16 src_port_tag; /* Source Port tag */
1648 u16 src_port_len; /* Source Port len */
1649 union fcgs_port_val_u src_port_val; /* Source Port value */
1650 u16 dst_port_tag; /* Destination Port tag */
1651 u16 dst_port_len; /* Destination Port len */
1652 union fcgs_port_val_u dst_port_val; /* Destination Port value */
1653 u32 token;
1654 u8 vendor_id[8]; /* T10 Vendor Identifier */
1655 u8 vendor_info[8]; /* Vendor specific Info */
1656 u32 max_hop_cnt; /* Max Hop Count */
1657};
1658
1659/*
1660 * Path info structure
1661 */
1662struct fcgs_ftrace_path_info_s {
1663 wwn_t switch_name; /* Switch WWN */
1664 u32 domain_id;
1665 wwn_t ingress_port_name; /* Ingress ports wwn */
1666 u32 ingress_phys_port_num; /* Ingress ports physical port
1667 * number
1668 */
1669 wwn_t egress_port_name; /* Ingress ports wwn */
1670 u32 egress_phys_port_num; /* Ingress ports physical port
1671 * number
1672 */
1673};
1674
1675/*
1676 * Ftrace Acc Response
1677 */
1678struct fcgs_ftrace_resp_s {
1679 u32 revision;
1680 u32 token;
1681 u8 vendor_id[8]; /* T10 Vendor Identifier */
1682 u8 vendor_info[8]; /* Vendor specific Info */
1683 u32 str_rej_reason_code; /* STR Reject Reason Code */
1684 u32 num_path_info_entries; /* No. of path info entries */
1685 /*
1686 * path info entry/entries.
1687 */
1688 struct fcgs_ftrace_path_info_s path_info[1];
1689
1690};
1691
1692/*
1693* Fabric Config Server : FCPing
1694 */
1695
1696/*
1697 * FC Ping Request
1698 */
1699struct fcgs_fcping_req_s {
1700 u32 revision;
1701 u16 port_tag;
1702 u16 port_len; /* Port len */
1703 union fcgs_port_val_u port_val; /* Port value */
1704 u32 token;
1705};
1706
1707/*
1708 * FC Ping Response
1709 */
1710struct fcgs_fcping_resp_s {
1711 u32 token;
1712};
1713
1714/*
1715 * Command codes for zone server query.
1716 */
1717enum {
1718 ZS_GZME = 0x0124, /* Get zone member extended */
1719};
1720
1721/*
1722 * ZS GZME request
1723 */
1724#define ZS_GZME_ZNAMELEN 32
1725struct zs_gzme_req_s {
1726 u8 znamelen;
1727 u8 rsvd[3];
1728 u8 zname[ZS_GZME_ZNAMELEN];
1729};
1730
1731enum zs_mbr_type {
1732 ZS_MBR_TYPE_PWWN = 1,
1733 ZS_MBR_TYPE_DOMPORT = 2,
1734 ZS_MBR_TYPE_PORTID = 3,
1735 ZS_MBR_TYPE_NWWN = 4,
1736};
1737
1738struct zs_mbr_wwn_s {
1739 u8 mbr_type;
1740 u8 rsvd[3];
1741 wwn_t wwn;
1742};
1743
1744struct zs_query_resp_s {
1745 u32 nmbrs; /* number of zone members */
1746 struct zs_mbr_wwn_s mbr[1];
1747};
1748
1749/*
1750 * GMAL Command ( Get ( interconnect Element) Management Address List) 1445 * GMAL Command ( Get ( interconnect Element) Management Address List)
1751 * To retrieve the IP Address of a Switch. 1446 * To retrieve the IP Address of a Switch.
1752 */ 1447 */
1753
1754#define CT_GMAL_RESP_PREFIX_TELNET "telnet://" 1448#define CT_GMAL_RESP_PREFIX_TELNET "telnet://"
1755#define CT_GMAL_RESP_PREFIX_HTTP "http://" 1449#define CT_GMAL_RESP_PREFIX_HTTP "http://"
1756 1450
@@ -1764,7 +1458,7 @@ struct fcgs_req_s {
1764 1458
1765/* Accept Response to GMAL */ 1459/* Accept Response to GMAL */
1766struct fcgs_gmal_resp_s { 1460struct fcgs_gmal_resp_s {
1767 u32 ms_len; /* Num of entries */ 1461 __be32 ms_len; /* Num of entries */
1768 u8 ms_ma[256]; 1462 u8 ms_ma[256];
1769}; 1463};
1770 1464
@@ -1775,9 +1469,6 @@ struct fcgs_gmal_entry_s {
1775}; 1469};
1776 1470
1777/* 1471/*
1778 * FDMI
1779 */
1780/*
1781 * FDMI Command Codes 1472 * FDMI Command Codes
1782 */ 1473 */
1783#define FDMI_GRHL 0x0100 1474#define FDMI_GRHL 0x0100
@@ -1856,8 +1547,8 @@ enum fdmi_port_attribute_type {
1856 * FDMI attribute 1547 * FDMI attribute
1857 */ 1548 */
1858struct fdmi_attr_s { 1549struct fdmi_attr_s {
1859 u16 type; 1550 __be16 type;
1860 u16 len; 1551 __be16 len;
1861 u8 value[1]; 1552 u8 value[1];
1862}; 1553};
1863 1554
@@ -1865,7 +1556,7 @@ struct fdmi_attr_s {
1865 * HBA Attribute Block 1556 * HBA Attribute Block
1866 */ 1557 */
1867struct fdmi_hba_attr_s { 1558struct fdmi_hba_attr_s {
1868 u32 attr_count; /* # of attributes */ 1559 __be32 attr_count; /* # of attributes */
1869 struct fdmi_attr_s hba_attr; /* n attributes */ 1560 struct fdmi_attr_s hba_attr; /* n attributes */
1870}; 1561};
1871 1562
@@ -1873,15 +1564,15 @@ struct fdmi_hba_attr_s {
1873 * Registered Port List 1564 * Registered Port List
1874 */ 1565 */
1875struct fdmi_port_list_s { 1566struct fdmi_port_list_s {
1876 u32 num_ports; /* number Of Port Entries */ 1567 __be32 num_ports; /* number Of Port Entries */
1877 wwn_t port_entry; /* one or more */ 1568 wwn_t port_entry; /* one or more */
1878}; 1569};
1879 1570
1880/* 1571/*
1881 * Port Attribute Block 1572 * Port Attribute Block
1882 */ 1573 */
1883struct fdmi_port_attr_s { 1574struct fdmi_port_attr_s {
1884 u32 attr_count; /* # of attributes */ 1575 __be32 attr_count; /* # of attributes */
1885 struct fdmi_attr_s port_attr; /* n attributes */ 1576 struct fdmi_attr_s port_attr; /* n attributes */
1886}; 1577};
1887 1578
@@ -1889,7 +1580,7 @@ struct fdmi_port_attr_s {
1889 * FDMI Register HBA Attributes 1580 * FDMI Register HBA Attributes
1890 */ 1581 */
1891struct fdmi_rhba_s { 1582struct fdmi_rhba_s {
1892 wwn_t hba_id; /* HBA Identifier */ 1583 wwn_t hba_id; /* HBA Identifier */
1893 struct fdmi_port_list_s port_list; /* Registered Port List */ 1584 struct fdmi_port_list_s port_list; /* Registered Port List */
1894 struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */ 1585 struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */
1895}; 1586};
@@ -1898,8 +1589,8 @@ struct fdmi_rhba_s {
1898 * FDMI Register Port 1589 * FDMI Register Port
1899 */ 1590 */
1900struct fdmi_rprt_s { 1591struct fdmi_rprt_s {
1901 wwn_t hba_id; /* HBA Identifier */ 1592 wwn_t hba_id; /* HBA Identifier */
1902 wwn_t port_name; /* Port wwn */ 1593 wwn_t port_name; /* Port wwn */
1903 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ 1594 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
1904}; 1595};
1905 1596
@@ -1907,7 +1598,7 @@ struct fdmi_rprt_s {
1907 * FDMI Register Port Attributes 1598 * FDMI Register Port Attributes
1908 */ 1599 */
1909struct fdmi_rpa_s { 1600struct fdmi_rpa_s {
1910 wwn_t port_name; /* port wwn */ 1601 wwn_t port_name; /* port wwn */
1911 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ 1602 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
1912}; 1603};
1913 1604
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 9c725314b513..b7e253451654 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -18,16 +18,16 @@
18 * fcbuild.c - FC link service frame building and parsing routines 18 * fcbuild.c - FC link service frame building and parsing routines
19 */ 19 */
20 20
21#include "bfa_os_inc.h" 21#include "bfad_drv.h"
22#include "bfa_fcbuild.h" 22#include "bfa_fcbuild.h"
23 23
24/* 24/*
25 * static build functions 25 * static build functions
26 */ 26 */
27static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 27static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
28 u16 ox_id); 28 __be16 ox_id);
29static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 29static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
30 u16 ox_id); 30 __be16 ox_id);
31static struct fchs_s fc_els_req_tmpl; 31static struct fchs_s fc_els_req_tmpl;
32static struct fchs_s fc_els_rsp_tmpl; 32static struct fchs_s fc_els_rsp_tmpl;
33static struct fchs_s fc_bls_req_tmpl; 33static struct fchs_s fc_bls_req_tmpl;
@@ -48,7 +48,7 @@ fcbuild_init(void)
48 fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST; 48 fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST;
49 fc_els_req_tmpl.type = FC_TYPE_ELS; 49 fc_els_req_tmpl.type = FC_TYPE_ELS;
50 fc_els_req_tmpl.f_ctl = 50 fc_els_req_tmpl.f_ctl =
51 bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | 51 bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
52 FCTL_SI_XFER); 52 FCTL_SI_XFER);
53 fc_els_req_tmpl.rx_id = FC_RXID_ANY; 53 fc_els_req_tmpl.rx_id = FC_RXID_ANY;
54 54
@@ -59,7 +59,7 @@ fcbuild_init(void)
59 fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY; 59 fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY;
60 fc_els_rsp_tmpl.type = FC_TYPE_ELS; 60 fc_els_rsp_tmpl.type = FC_TYPE_ELS;
61 fc_els_rsp_tmpl.f_ctl = 61 fc_els_rsp_tmpl.f_ctl =
62 bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | 62 bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
63 FCTL_END_SEQ | FCTL_SI_XFER); 63 FCTL_END_SEQ | FCTL_SI_XFER);
64 fc_els_rsp_tmpl.rx_id = FC_RXID_ANY; 64 fc_els_rsp_tmpl.rx_id = FC_RXID_ANY;
65 65
@@ -68,7 +68,7 @@ fcbuild_init(void)
68 */ 68 */
69 fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK; 69 fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK;
70 fc_bls_req_tmpl.type = FC_TYPE_BLS; 70 fc_bls_req_tmpl.type = FC_TYPE_BLS;
71 fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER); 71 fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
72 fc_bls_req_tmpl.rx_id = FC_RXID_ANY; 72 fc_bls_req_tmpl.rx_id = FC_RXID_ANY;
73 73
74 /* 74 /*
@@ -78,7 +78,7 @@ fcbuild_init(void)
78 fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC; 78 fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC;
79 fc_bls_rsp_tmpl.type = FC_TYPE_BLS; 79 fc_bls_rsp_tmpl.type = FC_TYPE_BLS;
80 fc_bls_rsp_tmpl.f_ctl = 80 fc_bls_rsp_tmpl.f_ctl =
81 bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | 81 bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
82 FCTL_END_SEQ | FCTL_SI_XFER); 82 FCTL_END_SEQ | FCTL_SI_XFER);
83 fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY; 83 fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY;
84 84
@@ -129,7 +129,7 @@ fcbuild_init(void)
129 fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; 129 fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
130 fcp_fchs_tmpl.type = FC_TYPE_FCP; 130 fcp_fchs_tmpl.type = FC_TYPE_FCP;
131 fcp_fchs_tmpl.f_ctl = 131 fcp_fchs_tmpl.f_ctl =
132 bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); 132 bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
133 fcp_fchs_tmpl.seq_id = 1; 133 fcp_fchs_tmpl.seq_id = 1;
134 fcp_fchs_tmpl.rx_id = FC_RXID_ANY; 134 fcp_fchs_tmpl.rx_id = FC_RXID_ANY;
135} 135}
@@ -143,7 +143,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
143 fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; 143 fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
144 fchs->type = FC_TYPE_SERVICES; 144 fchs->type = FC_TYPE_SERVICES;
145 fchs->f_ctl = 145 fchs->f_ctl =
146 bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | 146 bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
147 FCTL_SI_XFER); 147 FCTL_SI_XFER);
148 fchs->rx_id = FC_RXID_ANY; 148 fchs->rx_id = FC_RXID_ANY;
149 fchs->d_id = (d_id); 149 fchs->d_id = (d_id);
@@ -157,7 +157,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
157} 157}
158 158
159void 159void
160fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 160fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
161{ 161{
162 memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); 162 memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
163 fchs->d_id = (d_id); 163 fchs->d_id = (d_id);
@@ -166,7 +166,7 @@ fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
166} 166}
167 167
168static void 168static void
169fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 169fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
170{ 170{
171 memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); 171 memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
172 fchs->d_id = d_id; 172 fchs->d_id = d_id;
@@ -196,7 +196,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
196} 196}
197 197
198static void 198static void
199fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 199fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
200{ 200{
201 memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); 201 memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
202 fchs->d_id = d_id; 202 fchs->d_id = d_id;
@@ -206,7 +206,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
206 206
207static u16 207static u16
208fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 208fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
209 u16 ox_id, wwn_t port_name, wwn_t node_name, 209 __be16 ox_id, wwn_t port_name, wwn_t node_name,
210 u16 pdu_size, u8 els_code) 210 u16 pdu_size, u8 els_code)
211{ 211{
212 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); 212 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
@@ -232,8 +232,8 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
232 u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, 232 u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
233 u8 set_npiv, u8 set_auth, u16 local_bb_credits) 233 u8 set_npiv, u8 set_auth, u16 local_bb_credits)
234{ 234{
235 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 235 u32 d_id = bfa_hton3b(FC_FABRIC_PORT);
236 u32 *vvl_info; 236 __be32 *vvl_info;
237 237
238 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 238 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
239 239
@@ -267,7 +267,7 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
267 267
268u16 268u16
269fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, 269fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
270 u16 ox_id, wwn_t port_name, wwn_t node_name, 270 __be16 ox_id, wwn_t port_name, wwn_t node_name,
271 u16 pdu_size, u16 local_bb_credits) 271 u16 pdu_size, u16 local_bb_credits)
272{ 272{
273 u32 d_id = 0; 273 u32 d_id = 0;
@@ -289,7 +289,7 @@ u16
289fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, 289fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
290 u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) 290 u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
291{ 291{
292 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 292 u32 d_id = bfa_hton3b(FC_FABRIC_PORT);
293 293
294 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 294 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
295 295
@@ -392,7 +392,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
392 392
393u16 393u16
394fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 394fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
395 u16 ox_id, enum bfa_lport_role role) 395 __be16 ox_id, enum bfa_lport_role role)
396{ 396{
397 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 397 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
398 398
@@ -456,9 +456,9 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
456 return sizeof(struct fc_logo_s); 456 return sizeof(struct fc_logo_s);
457} 457}
458 458
459static u16 459static u16
460fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, 460fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
461 u32 s_id, u16 ox_id, wwn_t port_name, 461 u32 s_id, __be16 ox_id, wwn_t port_name,
462 wwn_t node_name, u8 els_code) 462 wwn_t node_name, u8 els_code)
463{ 463{
464 memset(adisc, '\0', sizeof(struct fc_adisc_s)); 464 memset(adisc, '\0', sizeof(struct fc_adisc_s));
@@ -480,7 +480,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
480 480
481u16 481u16
482fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, 482fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
483 u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name) 483 u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name)
484{ 484{
485 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, 485 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
486 node_name, FC_ELS_ADISC); 486 node_name, FC_ELS_ADISC);
@@ -488,7 +488,7 @@ fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
488 488
489u16 489u16
490fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, 490fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
491 u32 s_id, u16 ox_id, wwn_t port_name, 491 u32 s_id, __be16 ox_id, wwn_t port_name,
492 wwn_t node_name) 492 wwn_t node_name)
493{ 493{
494 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, 494 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
@@ -592,7 +592,7 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
592 592
593u16 593u16
594fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 594fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
595 u16 ox_id) 595 __be16 ox_id)
596{ 596{
597 struct fc_els_cmd_s *acc = pld; 597 struct fc_els_cmd_s *acc = pld;
598 598
@@ -606,7 +606,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
606 606
607u16 607u16
608fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, 608fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
609 u32 s_id, u16 ox_id, u8 reason_code, 609 u32 s_id, __be16 ox_id, u8 reason_code,
610 u8 reason_code_expl) 610 u8 reason_code_expl)
611{ 611{
612 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 612 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
@@ -622,7 +622,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
622 622
623u16 623u16
624fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, 624fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
625 u32 s_id, u16 ox_id, u16 rx_id) 625 u32 s_id, __be16 ox_id, u16 rx_id)
626{ 626{
627 fc_bls_rsp_build(fchs, d_id, s_id, ox_id); 627 fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
628 628
@@ -638,7 +638,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
638 638
639u16 639u16
640fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id, 640fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
641 u32 s_id, u16 ox_id) 641 u32 s_id, __be16 ox_id)
642{ 642{
643 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 643 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
644 memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); 644 memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
@@ -666,7 +666,7 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
666 666
667u16 667u16
668fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, 668fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
669 u32 d_id, u32 s_id, u16 ox_id, int num_pages) 669 u32 d_id, u32 s_id, __be16 ox_id, int num_pages)
670{ 670{
671 int page; 671 int page;
672 672
@@ -690,7 +690,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
690 690
691u16 691u16
692fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, 692fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
693 u32 s_id, u16 ox_id, int num_pages) 693 u32 s_id, __be16 ox_id, int num_pages)
694{ 694{
695 int page; 695 int page;
696 696
@@ -728,7 +728,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
728 728
729u16 729u16
730fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, 730fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
731 u32 s_id, u16 ox_id, u32 data_format, 731 u32 s_id, __be16 ox_id, u32 data_format,
732 struct fc_rnid_common_id_data_s *common_id_data, 732 struct fc_rnid_common_id_data_s *common_id_data,
733 struct fc_rnid_general_topology_data_s *gen_topo_data) 733 struct fc_rnid_general_topology_data_s *gen_topo_data)
734{ 734{
@@ -770,10 +770,10 @@ u16
770fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, 770fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
771 u32 s_id, u32 *pid_list, u16 npids) 771 u32 s_id, u32 *pid_list, u16 npids)
772{ 772{
773 u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id)); 773 u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id));
774 int i = 0; 774 int i = 0;
775 775
776 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); 776 fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0);
777 777
778 memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); 778 memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
779 779
@@ -788,7 +788,7 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
788 788
789u16 789u16
790fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, 790fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
791 u32 d_id, u32 s_id, u16 ox_id, 791 u32 d_id, u32 s_id, __be16 ox_id,
792 struct fc_rpsc_speed_info_s *oper_speed) 792 struct fc_rpsc_speed_info_s *oper_speed)
793{ 793{
794 memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); 794 memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
@@ -807,11 +807,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
807 return sizeof(struct fc_rpsc_acc_s); 807 return sizeof(struct fc_rpsc_acc_s);
808} 808}
809 809
810/*
811 * TBD -
812 * . get rid of unnecessary memsets
813 */
814
815u16 810u16
816fc_logo_rsp_parse(struct fchs_s *fchs, int len) 811fc_logo_rsp_parse(struct fchs_s *fchs, int len)
817{ 812{
@@ -995,7 +990,7 @@ fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
995} 990}
996 991
997u16 992u16
998fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, 993fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
999 u32 reason_code, u32 reason_expl) 994 u32 reason_code, u32 reason_expl)
1000{ 995{
1001 struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); 996 struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
@@ -1045,7 +1040,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1045{ 1040{
1046 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1041 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1047 struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1); 1042 struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
1048 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1043 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1049 1044
1050 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1045 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1051 fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); 1046 fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
@@ -1061,7 +1056,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1061{ 1056{
1062 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1057 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1063 fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); 1058 fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
1064 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1059 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1065 1060
1066 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1061 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1067 fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); 1062 fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
@@ -1077,7 +1072,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1077{ 1072{
1078 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1073 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1079 fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); 1074 fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
1080 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1075 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1081 1076
1082 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1077 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1083 fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); 1078 fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
@@ -1104,7 +1099,7 @@ u16
1104fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, 1099fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
1105 u8 set_br_reg, u32 s_id, u16 ox_id) 1100 u8 set_br_reg, u32 s_id, u16 ox_id)
1106{ 1101{
1107 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); 1102 u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
1108 1103
1109 fc_els_req_build(fchs, d_id, s_id, ox_id); 1104 fc_els_req_build(fchs, d_id, s_id, ox_id);
1110 1105
@@ -1121,7 +1116,7 @@ u16
1121fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, 1116fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
1122 u32 s_id, u16 ox_id) 1117 u32 s_id, u16 ox_id)
1123{ 1118{
1124 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); 1119 u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
1125 u16 payldlen; 1120 u16 payldlen;
1126 1121
1127 fc_els_req_build(fchs, d_id, s_id, ox_id); 1122 fc_els_req_build(fchs, d_id, s_id, ox_id);
@@ -1143,7 +1138,7 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1143{ 1138{
1144 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1139 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1145 struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); 1140 struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
1146 u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER); 1141 u32 type_value, d_id = bfa_hton3b(FC_NAME_SERVER);
1147 u8 index; 1142 u8 index;
1148 1143
1149 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1144 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1167,7 +1162,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1167{ 1162{
1168 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1163 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1169 struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); 1164 struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
1170 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1165 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1171 1166
1172 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1167 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1173 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); 1168 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
@@ -1187,7 +1182,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1187{ 1182{
1188 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1183 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1189 struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1); 1184 struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
1190 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1185 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1191 1186
1192 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1187 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1193 fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); 1188 fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
@@ -1209,7 +1204,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1209 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1204 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1210 struct fcgs_rspnid_req_s *rspnid = 1205 struct fcgs_rspnid_req_s *rspnid =
1211 (struct fcgs_rspnid_req_s *)(cthdr + 1); 1206 (struct fcgs_rspnid_req_s *)(cthdr + 1);
1212 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1207 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1213 1208
1214 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1209 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1215 fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); 1210 fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
@@ -1229,7 +1224,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
1229 1224
1230 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1225 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1231 struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1); 1226 struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
1232 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1227 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1233 1228
1234 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1229 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1235 1230
@@ -1249,7 +1244,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1249{ 1244{
1250 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1245 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1251 struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1); 1246 struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
1252 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1247 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1253 1248
1254 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1249 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1255 fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); 1250 fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
@@ -1267,7 +1262,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1267{ 1262{
1268 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1263 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1269 struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1); 1264 struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
1270 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1265 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1271 1266
1272 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1267 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1273 fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); 1268 fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
@@ -1286,7 +1281,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1286 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1281 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1287 struct fcgs_rcsid_req_s *rcsid = 1282 struct fcgs_rcsid_req_s *rcsid =
1288 (struct fcgs_rcsid_req_s *) (cthdr + 1); 1283 (struct fcgs_rcsid_req_s *) (cthdr + 1);
1289 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1284 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1290 1285
1291 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1286 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1292 fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); 1287 fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
@@ -1304,7 +1299,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1304{ 1299{
1305 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1300 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1306 struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1); 1301 struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
1307 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1302 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1308 1303
1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1304 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1310 fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); 1305 fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
@@ -1321,7 +1316,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
1321{ 1316{
1322 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1317 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1323 struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1); 1318 struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
1324 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1319 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1325 1320
1326 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1321 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1327 fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); 1322 fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
@@ -1341,7 +1336,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1341{ 1336{
1342 1337
1343 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1338 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1344 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1339 u32 d_id = bfa_hton3b(FC_MGMT_SERVER);
1345 1340
1346 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1341 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1347 fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); 1342 fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
@@ -1356,7 +1351,7 @@ void
1356fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) 1351fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
1357{ 1352{
1358 u8 index; 1353 u8 index;
1359 u32 *ptr = (u32 *) bit_mask; 1354 __be32 *ptr = (__be32 *) bit_mask;
1360 u32 type_value; 1355 u32 type_value;
1361 1356
1362 /* 1357 /*
@@ -1377,7 +1372,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1377{ 1372{
1378 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1373 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1379 fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); 1374 fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
1380 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1375 u32 d_id = bfa_hton3b(FC_MGMT_SERVER);
1381 1376
1382 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1377 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1383 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, 1378 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
@@ -1397,7 +1392,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1397{ 1392{
1398 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1393 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1399 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); 1394 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
1400 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1395 u32 d_id = bfa_hton3b(FC_MGMT_SERVER);
1401 1396
1402 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1397 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1403 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, 1398 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index 73abd02e53cc..ece51ec7620b 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -21,7 +21,7 @@
21#ifndef __FCBUILD_H__ 21#ifndef __FCBUILD_H__
22#define __FCBUILD_H__ 22#define __FCBUILD_H__
23 23
24#include "bfa_os_inc.h" 24#include "bfad_drv.h"
25#include "bfa_fc.h" 25#include "bfa_fc.h"
26#include "bfa_defs_fcs.h" 26#include "bfa_defs_fcs.h"
27 27
@@ -138,7 +138,7 @@ u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
138 u16 pdu_size); 138 u16 pdu_size);
139 139
140u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, 140u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
141 u32 s_id, u16 ox_id, 141 u32 s_id, __be16 ox_id,
142 wwn_t port_name, wwn_t node_name, 142 wwn_t port_name, wwn_t node_name,
143 u16 pdu_size, 143 u16 pdu_size,
144 u16 local_bb_credits); 144 u16 local_bb_credits);
@@ -186,7 +186,7 @@ u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
186 u16 pdu_size); 186 u16 pdu_size);
187 187
188u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, 188u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
189 u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, 189 u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
190 wwn_t node_name); 190 wwn_t node_name);
191 191
192enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, 192enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
@@ -196,20 +196,20 @@ enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
196 wwn_t port_name, wwn_t node_name); 196 wwn_t port_name, wwn_t node_name);
197 197
198u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, 198u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
199 u32 d_id, u32 s_id, u16 ox_id, 199 u32 d_id, u32 s_id, __be16 ox_id,
200 wwn_t port_name, wwn_t node_name); 200 wwn_t port_name, wwn_t node_name);
201u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, 201u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
202 u32 d_id, u32 s_id, u16 ox_id, 202 u32 d_id, u32 s_id, __be16 ox_id,
203 u8 reason_code, u8 reason_code_expl); 203 u8 reason_code, u8 reason_code_expl);
204u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, 204u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
205 u32 d_id, u32 s_id, u16 ox_id); 205 u32 d_id, u32 s_id, __be16 ox_id);
206u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, 206u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
207 u32 s_id, u16 ox_id); 207 u32 s_id, u16 ox_id);
208 208
209enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len); 209enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
210 210
211u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, 211u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
212 u32 s_id, u16 ox_id, 212 u32 s_id, __be16 ox_id,
213 enum bfa_lport_role role); 213 enum bfa_lport_role role);
214 214
215u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, 215u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
@@ -218,7 +218,7 @@ u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
218 218
219u16 fc_rnid_acc_build(struct fchs_s *fchs, 219u16 fc_rnid_acc_build(struct fchs_s *fchs,
220 struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, 220 struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
221 u16 ox_id, u32 data_format, 221 __be16 ox_id, u32 data_format,
222 struct fc_rnid_common_id_data_s *common_id_data, 222 struct fc_rnid_common_id_data_s *common_id_data,
223 struct fc_rnid_general_topology_data_s *gen_topo_data); 223 struct fc_rnid_general_topology_data_s *gen_topo_data);
224 224
@@ -228,7 +228,7 @@ u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
228 u32 d_id, u32 s_id, u16 ox_id); 228 u32 d_id, u32 s_id, u16 ox_id);
229u16 fc_rpsc_acc_build(struct fchs_s *fchs, 229u16 fc_rpsc_acc_build(struct fchs_s *fchs,
230 struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, 230 struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
231 u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed); 231 __be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
232u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id, 232u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
233 u8 fc4_type); 233 u8 fc4_type);
234 234
@@ -251,7 +251,7 @@ u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
251 u32 s_id, u16 ox_id, wwn_t port_name); 251 u32 s_id, u16 ox_id, wwn_t port_name);
252 252
253u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, 253u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
254 u32 s_id, u16 ox_id); 254 u32 s_id, __be16 ox_id);
255 255
256u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, 256u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
257 u16 cmd_code); 257 u16 cmd_code);
@@ -261,7 +261,7 @@ u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
261void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask); 261void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
262 262
263void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 263void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
264 u16 ox_id); 264 __be16 ox_id);
265 265
266enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len); 266enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
267 267
@@ -274,15 +274,15 @@ enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
274 wwn_t port_name); 274 wwn_t port_name);
275 275
276u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, 276u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
277 u32 s_id, u16 ox_id, u16 rx_id); 277 u32 s_id, __be16 ox_id, u16 rx_id);
278 278
279int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code); 279int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
280 280
281u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, 281u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
282 u32 d_id, u32 s_id, u16 ox_id, int num_pages); 282 u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
283 283
284u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, 284u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
285 u32 d_id, u32 s_id, u16 ox_id, int num_pages); 285 u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
286 286
287u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len); 287u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
288 288
@@ -304,7 +304,7 @@ u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
304u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len); 304u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
305 305
306u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 306u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
307 u16 ox_id, u32 reason_code, u32 reason_expl); 307 __be16 ox_id, u32 reason_code, u32 reason_expl);
308 308
309u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 309u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
310 u32 port_id); 310 u32 port_id);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 135c4427801c..9c410b21db6d 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -15,17 +15,12 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_modules.h" 19#include "bfa_modules.h"
19#include "bfa_cb_ioim.h"
20 20
21BFA_TRC_FILE(HAL, FCPIM); 21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim); 22BFA_MODULE(fcpim);
23 23
24
25#define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
27
28
29/* 24/*
30 * BFA ITNIM Related definitions 25 * BFA ITNIM Related definitions
31 */ 26 */
@@ -37,12 +32,12 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
37#define bfa_fcpim_additn(__itnim) \ 32#define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) 33 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39#define bfa_fcpim_delitn(__itnim) do { \ 34#define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ 35 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \ 36 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \ 37 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \ 38 WARN_ON(!list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \ 39 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \ 40 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
46} while (0) 41} while (0)
47 42
48#define bfa_itnim_online_cb(__itnim) do { \ 43#define bfa_itnim_online_cb(__itnim) do { \
@@ -73,10 +68,8 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
73} while (0) 68} while (0)
74 69
75/* 70/*
76 * bfa_itnim_sm BFA itnim state machine 71 * itnim state machine event
77 */ 72 */
78
79
80enum bfa_itnim_event { 73enum bfa_itnim_event {
81 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ 74 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ 75 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
@@ -107,9 +100,6 @@ enum bfa_itnim_event {
107 if ((__fcpim)->profile_start) \ 100 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \ 101 (__fcpim)->profile_start(__ioim); \
109} while (0) 102} while (0)
110/*
111 * hal_ioim_sm
112 */
113 103
114/* 104/*
115 * IO state machine events 105 * IO state machine events
@@ -221,8 +211,7 @@ static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
221 * forward declaration for BFA IOIM functions 211 * forward declaration for BFA IOIM functions
222 */ 212 */
223static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); 213static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
224static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim); 214static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
225static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
226static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); 215static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
227static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); 216static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
228static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); 217static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
@@ -232,7 +221,6 @@ static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
232static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); 221static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
233static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 222static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
234 223
235
236/* 224/*
237 * forward declaration of BFA IO state machine 225 * forward declaration of BFA IO state machine
238 */ 226 */
@@ -260,14 +248,13 @@ static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
260 enum bfa_ioim_event event); 248 enum bfa_ioim_event event);
261static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, 249static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
262 enum bfa_ioim_event event); 250 enum bfa_ioim_event event);
263
264/* 251/*
265 * forward declaration for BFA TSKIM functions 252 * forward declaration for BFA TSKIM functions
266 */ 253 */
267static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); 254static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
268static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); 255static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
269static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, 256static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
270 lun_t lun); 257 struct scsi_lun lun);
271static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); 258static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
272static void bfa_tskim_cleanp_comp(void *tskim_cbarg); 259static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
273static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); 260static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
@@ -275,7 +262,6 @@ static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
275static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); 262static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
276static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); 263static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
277 264
278
279/* 265/*
280 * forward declaration of BFA TSKIM state machine 266 * forward declaration of BFA TSKIM state machine
281 */ 267 */
@@ -293,13 +279,12 @@ static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
293 enum bfa_tskim_event event); 279 enum bfa_tskim_event event);
294static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, 280static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
295 enum bfa_tskim_event event); 281 enum bfa_tskim_event event);
296
297/* 282/*
298 * hal_fcpim_mod BFA FCP Initiator Mode module 283 * BFA FCP Initiator Mode module
299 */ 284 */
300 285
301/* 286/*
302 * Compute and return memory needed by FCP(im) module. 287 * Compute and return memory needed by FCP(im) module.
303 */ 288 */
304static void 289static void
305bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 290bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
@@ -357,10 +342,6 @@ bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
357static void 342static void
358bfa_fcpim_detach(struct bfa_s *bfa) 343bfa_fcpim_detach(struct bfa_s *bfa)
359{ 344{
360 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361
362 bfa_ioim_detach(fcpim);
363 bfa_tskim_detach(fcpim);
364} 345}
365 346
366static void 347static void
@@ -387,56 +368,6 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
387} 368}
388 369
389void 370void
390bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
391 struct bfa_itnim_iostats_s *rstats)
392{
393 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
394 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
395 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
396 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
397 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
398 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
399 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
400 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
401 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
402 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
403 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
404 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
405 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
406 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
407 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
408 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
409 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
410 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
411 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
412 bfa_fcpim_add_iostats(lstats, rstats, onlines);
413 bfa_fcpim_add_iostats(lstats, rstats, offlines);
414 bfa_fcpim_add_iostats(lstats, rstats, creates);
415 bfa_fcpim_add_iostats(lstats, rstats, deletes);
416 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
417 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
418 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
419 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
420 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
421 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
422 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
423 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
424 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
425 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
426 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
427 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
428 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
429 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
430 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
431 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
432 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
433 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
434 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
435 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
436 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
437}
438
439void
440bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) 371bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
441{ 372{
442 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 373 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
@@ -454,128 +385,6 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa)
454 return fcpim->path_tov / 1000; 385 return fcpim->path_tov / 1000;
455} 386}
456 387
457bfa_status_t
458bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
459 u8 lp_tag)
460{
461 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
462 struct list_head *qe, *qen;
463 struct bfa_itnim_s *itnim;
464
465 /* accumulate IO stats from itnim */
466 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
467 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
468 itnim = (struct bfa_itnim_s *) qe;
469 if (itnim->rport->rport_info.lp_tag != lp_tag)
470 continue;
471 bfa_fcpim_add_stats(stats, &(itnim->stats));
472 }
473 return BFA_STATUS_OK;
474}
475bfa_status_t
476bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
477{
478 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
479 struct list_head *qe, *qen;
480 struct bfa_itnim_s *itnim;
481
482 /* accumulate IO stats from itnim */
483 memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
484 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
485 itnim = (struct bfa_itnim_s *) qe;
486 bfa_fcpim_add_stats(modstats, &(itnim->stats));
487 }
488 return BFA_STATUS_OK;
489}
490
491bfa_status_t
492bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
493 struct bfa_fcpim_del_itn_stats_s *modstats)
494{
495 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
496
497 *modstats = fcpim->del_itn_stats;
498
499 return BFA_STATUS_OK;
500}
501
502
503bfa_status_t
504bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
505{
506 struct bfa_itnim_s *itnim;
507 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
508 struct list_head *qe, *qen;
509
510 /* accumulate IO stats from itnim */
511 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
512 itnim = (struct bfa_itnim_s *) qe;
513 bfa_itnim_clear_stats(itnim);
514 }
515 fcpim->io_profile = BFA_TRUE;
516 fcpim->io_profile_start_time = time;
517 fcpim->profile_comp = bfa_ioim_profile_comp;
518 fcpim->profile_start = bfa_ioim_profile_start;
519
520 return BFA_STATUS_OK;
521}
522bfa_status_t
523bfa_fcpim_profile_off(struct bfa_s *bfa)
524{
525 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
526 fcpim->io_profile = BFA_FALSE;
527 fcpim->io_profile_start_time = 0;
528 fcpim->profile_comp = NULL;
529 fcpim->profile_start = NULL;
530 return BFA_STATUS_OK;
531}
532
533bfa_status_t
534bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
535{
536 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
537 struct list_head *qe, *qen;
538 struct bfa_itnim_s *itnim;
539
540 /* clear IO stats from all active itnims */
541 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
542 itnim = (struct bfa_itnim_s *) qe;
543 if (itnim->rport->rport_info.lp_tag != lp_tag)
544 continue;
545 bfa_itnim_clear_stats(itnim);
546 }
547 return BFA_STATUS_OK;
548
549}
550
551bfa_status_t
552bfa_fcpim_clr_modstats(struct bfa_s *bfa)
553{
554 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
555 struct list_head *qe, *qen;
556 struct bfa_itnim_s *itnim;
557
558 /* clear IO stats from all active itnims */
559 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
560 itnim = (struct bfa_itnim_s *) qe;
561 bfa_itnim_clear_stats(itnim);
562 }
563 memset(&fcpim->del_itn_stats, 0,
564 sizeof(struct bfa_fcpim_del_itn_stats_s));
565
566 return BFA_STATUS_OK;
567}
568
569void
570bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
571{
572 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
573
574 bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
575
576 fcpim->q_depth = q_depth;
577}
578
579u16 388u16
580bfa_fcpim_qdepth_get(struct bfa_s *bfa) 389bfa_fcpim_qdepth_get(struct bfa_s *bfa)
581{ 390{
@@ -584,32 +393,12 @@ bfa_fcpim_qdepth_get(struct bfa_s *bfa)
584 return fcpim->q_depth; 393 return fcpim->q_depth;
585} 394}
586 395
587void
588bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
589{
590 bfa_boolean_t ioredirect;
591
592 /*
593 * IO redirection is turned off when QoS is enabled and vice versa
594 */
595 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
596}
597
598void
599bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
600{
601 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
602 fcpim->ioredirect = state;
603}
604
605
606
607/* 396/*
608 * BFA ITNIM module state machine functions 397 * BFA ITNIM module state machine functions
609 */ 398 */
610 399
611/* 400/*
612 * Beginning/unallocated state - no events expected. 401 * Beginning/unallocated state - no events expected.
613 */ 402 */
614static void 403static void
615bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 404bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -630,7 +419,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
630} 419}
631 420
632/* 421/*
633 * Beginning state, only online event expected. 422 * Beginning state, only online event expected.
634 */ 423 */
635static void 424static void
636bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 425bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -733,7 +522,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
733} 522}
734 523
735/* 524/*
736 * Waiting for itnim create response from firmware, a delete is pending. 525 * Waiting for itnim create response from firmware, a delete is pending.
737 */ 526 */
738static void 527static void
739bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, 528bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
@@ -761,7 +550,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
761} 550}
762 551
763/* 552/*
764 * Online state - normal parking state. 553 * Online state - normal parking state.
765 */ 554 */
766static void 555static void
767bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 556bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -803,7 +592,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
803} 592}
804 593
805/* 594/*
806 * Second level error recovery need. 595 * Second level error recovery need.
807 */ 596 */
808static void 597static void
809bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 598bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -834,7 +623,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
834} 623}
835 624
836/* 625/*
837 * Going offline. Waiting for active IO cleanup. 626 * Going offline. Waiting for active IO cleanup.
838 */ 627 */
839static void 628static void
840bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, 629bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
@@ -871,7 +660,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
871} 660}
872 661
873/* 662/*
874 * Deleting itnim. Waiting for active IO cleanup. 663 * Deleting itnim. Waiting for active IO cleanup.
875 */ 664 */
876static void 665static void
877bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, 666bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
@@ -956,7 +745,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
956} 745}
957 746
958/* 747/*
959 * Offline state. 748 * Offline state.
960 */ 749 */
961static void 750static void
962bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 751bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -987,9 +776,6 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
987 } 776 }
988} 777}
989 778
990/*
991 * IOC h/w failed state.
992 */
993static void 779static void
994bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, 780bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
995 enum bfa_itnim_event event) 781 enum bfa_itnim_event event)
@@ -1024,7 +810,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
1024} 810}
1025 811
1026/* 812/*
1027 * Itnim is deleted, waiting for firmware response to delete. 813 * Itnim is deleted, waiting for firmware response to delete.
1028 */ 814 */
1029static void 815static void
1030bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 816bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -1069,7 +855,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1069} 855}
1070 856
1071/* 857/*
1072 * Initiate cleanup of all IOs on an IOC failure. 858 * Initiate cleanup of all IOs on an IOC failure.
1073 */ 859 */
1074static void 860static void
1075bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) 861bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
@@ -1103,7 +889,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1103} 889}
1104 890
1105/* 891/*
1106 * IO cleanup completion 892 * IO cleanup completion
1107 */ 893 */
1108static void 894static void
1109bfa_itnim_cleanp_comp(void *itnim_cbarg) 895bfa_itnim_cleanp_comp(void *itnim_cbarg)
@@ -1115,7 +901,7 @@ bfa_itnim_cleanp_comp(void *itnim_cbarg)
1115} 901}
1116 902
1117/* 903/*
1118 * Initiate cleanup of all IOs. 904 * Initiate cleanup of all IOs.
1119 */ 905 */
1120static void 906static void
1121bfa_itnim_cleanup(struct bfa_itnim_s *itnim) 907bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
@@ -1187,9 +973,6 @@ bfa_itnim_qresume(void *cbarg)
1187 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); 973 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1188} 974}
1189 975
1190
1191
1192
1193/* 976/*
1194 * bfa_itnim_public 977 * bfa_itnim_public
1195 */ 978 */
@@ -1401,7 +1184,7 @@ bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1401 if (itnim->fcpim->path_tov > 0) { 1184 if (itnim->fcpim->path_tov > 0) {
1402 1185
1403 itnim->iotov_active = BFA_TRUE; 1186 itnim->iotov_active = BFA_TRUE;
1404 bfa_assert(bfa_itnim_hold_io(itnim)); 1187 WARN_ON(!bfa_itnim_hold_io(itnim));
1405 bfa_timer_start(itnim->bfa, &itnim->timer, 1188 bfa_timer_start(itnim->bfa, &itnim->timer,
1406 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); 1189 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1407 } 1190 }
@@ -1457,14 +1240,12 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1457 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns; 1240 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1458} 1241}
1459 1242
1460
1461
1462/* 1243/*
1463 * bfa_itnim_public 1244 * bfa_itnim_public
1464 */ 1245 */
1465 1246
1466/* 1247/*
1467 * Itnim interrupt processing. 1248 * Itnim interrupt processing.
1468 */ 1249 */
1469void 1250void
1470bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 1251bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
@@ -1481,7 +1262,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1481 case BFI_ITNIM_I2H_CREATE_RSP: 1262 case BFI_ITNIM_I2H_CREATE_RSP:
1482 itnim = BFA_ITNIM_FROM_TAG(fcpim, 1263 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1483 msg.create_rsp->bfa_handle); 1264 msg.create_rsp->bfa_handle);
1484 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); 1265 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1485 bfa_stats(itnim, create_comps); 1266 bfa_stats(itnim, create_comps);
1486 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); 1267 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1487 break; 1268 break;
@@ -1489,7 +1270,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1489 case BFI_ITNIM_I2H_DELETE_RSP: 1270 case BFI_ITNIM_I2H_DELETE_RSP:
1490 itnim = BFA_ITNIM_FROM_TAG(fcpim, 1271 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1491 msg.delete_rsp->bfa_handle); 1272 msg.delete_rsp->bfa_handle);
1492 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); 1273 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1493 bfa_stats(itnim, delete_comps); 1274 bfa_stats(itnim, delete_comps);
1494 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); 1275 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1495 break; 1276 break;
@@ -1503,14 +1284,12 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1503 1284
1504 default: 1285 default:
1505 bfa_trc(bfa, m->mhdr.msg_id); 1286 bfa_trc(bfa, m->mhdr.msg_id);
1506 bfa_assert(0); 1287 WARN_ON(1);
1507 } 1288 }
1508} 1289}
1509 1290
1510
1511
1512/* 1291/*
1513 * bfa_itnim_api 1292 * bfa_itnim_api
1514 */ 1293 */
1515 1294
1516struct bfa_itnim_s * 1295struct bfa_itnim_s *
@@ -1520,7 +1299,7 @@ bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1520 struct bfa_itnim_s *itnim; 1299 struct bfa_itnim_s *itnim;
1521 1300
1522 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); 1301 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1523 bfa_assert(itnim->rport == rport); 1302 WARN_ON(itnim->rport != rport);
1524 1303
1525 itnim->ditn = ditn; 1304 itnim->ditn = ditn;
1526 1305
@@ -1568,31 +1347,6 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1568 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); 1347 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1569} 1348}
1570 1349
1571bfa_status_t
1572bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1573 struct bfa_itnim_ioprofile_s *ioprofile)
1574{
1575 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1576 if (!fcpim->io_profile)
1577 return BFA_STATUS_IOPROFILE_OFF;
1578
1579 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1580 itnim->ioprofile.io_profile_start_time =
1581 bfa_io_profile_start_time(itnim->bfa);
1582 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1583 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1584 *ioprofile = itnim->ioprofile;
1585
1586 return BFA_STATUS_OK;
1587}
1588
1589void
1590bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
1591 struct bfa_itnim_iostats_s *stats)
1592{
1593 *stats = itnim->stats;
1594}
1595
1596void 1350void
1597bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) 1351bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1598{ 1352{
@@ -1608,14 +1362,11 @@ bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1608 */ 1362 */
1609 1363
1610/* 1364/*
1611 * IO is not started (unallocated). 1365 * IO is not started (unallocated).
1612 */ 1366 */
1613static void 1367static void
1614bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1368bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1615{ 1369{
1616 bfa_trc_fp(ioim->bfa, ioim->iotag);
1617 bfa_trc_fp(ioim->bfa, event);
1618
1619 switch (event) { 1370 switch (event) {
1620 case BFA_IOIM_SM_START: 1371 case BFA_IOIM_SM_START:
1621 if (!bfa_itnim_is_online(ioim->itnim)) { 1372 if (!bfa_itnim_is_online(ioim->itnim)) {
@@ -1635,7 +1386,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1635 } 1386 }
1636 1387
1637 if (ioim->nsges > BFI_SGE_INLINE) { 1388 if (ioim->nsges > BFI_SGE_INLINE) {
1638 if (!bfa_ioim_sge_setup(ioim)) { 1389 if (!bfa_ioim_sgpg_alloc(ioim)) {
1639 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); 1390 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1640 return; 1391 return;
1641 } 1392 }
@@ -1662,7 +1413,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1662 * requests immediately. 1413 * requests immediately.
1663 */ 1414 */
1664 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1415 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1665 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); 1416 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1666 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1417 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1667 __bfa_cb_ioim_abort, ioim); 1418 __bfa_cb_ioim_abort, ioim);
1668 break; 1419 break;
@@ -1673,7 +1424,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1673} 1424}
1674 1425
1675/* 1426/*
1676 * IO is waiting for SG pages. 1427 * IO is waiting for SG pages.
1677 */ 1428 */
1678static void 1429static void
1679bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1430bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -1720,14 +1471,11 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1720} 1471}
1721 1472
1722/* 1473/*
1723 * IO is active. 1474 * IO is active.
1724 */ 1475 */
1725static void 1476static void
1726bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1477bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1727{ 1478{
1728 bfa_trc_fp(ioim->bfa, ioim->iotag);
1729 bfa_trc_fp(ioim->bfa, event);
1730
1731 switch (event) { 1479 switch (event) {
1732 case BFA_IOIM_SM_COMP_GOOD: 1480 case BFA_IOIM_SM_COMP_GOOD:
1733 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1481 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
@@ -1786,8 +1534,8 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1786 break; 1534 break;
1787 1535
1788 case BFA_IOIM_SM_SQRETRY: 1536 case BFA_IOIM_SM_SQRETRY:
1789 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) { 1537 if (bfa_ioim_maxretry_reached(ioim)) {
1790 /* max retry completed free IO */ 1538 /* max retry reached, free IO */
1791 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1539 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1792 bfa_ioim_move_to_comp_q(ioim); 1540 bfa_ioim_move_to_comp_q(ioim);
1793 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1541 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
@@ -1804,17 +1552,15 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1804} 1552}
1805 1553
1806/* 1554/*
1807* IO is retried with new tag. 1555 * IO is retried with new tag.
1808*/ 1556 */
1809static void 1557static void
1810bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1558bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1811{ 1559{
1812 bfa_trc_fp(ioim->bfa, ioim->iotag);
1813 bfa_trc_fp(ioim->bfa, event);
1814
1815 switch (event) { 1560 switch (event) {
1816 case BFA_IOIM_SM_FREE: 1561 case BFA_IOIM_SM_FREE:
1817 /* abts and rrq done. Now retry the IO with new tag */ 1562 /* abts and rrq done. Now retry the IO with new tag */
1563 bfa_ioim_update_iotag(ioim);
1818 if (!bfa_ioim_send_ioreq(ioim)) { 1564 if (!bfa_ioim_send_ioreq(ioim)) {
1819 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); 1565 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1820 break; 1566 break;
@@ -1858,7 +1604,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1858} 1604}
1859 1605
1860/* 1606/*
1861 * IO is being aborted, waiting for completion from firmware. 1607 * IO is being aborted, waiting for completion from firmware.
1862 */ 1608 */
1863static void 1609static void
1864bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1610bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -1894,7 +1640,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1894 break; 1640 break;
1895 1641
1896 case BFA_IOIM_SM_CLEANUP: 1642 case BFA_IOIM_SM_CLEANUP:
1897 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); 1643 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1898 ioim->iosp->abort_explicit = BFA_FALSE; 1644 ioim->iosp->abort_explicit = BFA_FALSE;
1899 1645
1900 if (bfa_ioim_send_abort(ioim)) 1646 if (bfa_ioim_send_abort(ioim))
@@ -1981,7 +1727,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1981} 1727}
1982 1728
1983/* 1729/*
1984 * IO is waiting for room in request CQ 1730 * IO is waiting for room in request CQ
1985 */ 1731 */
1986static void 1732static void
1987bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1733bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -2025,7 +1771,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2025} 1771}
2026 1772
2027/* 1773/*
2028 * Active IO is being aborted, waiting for room in request CQ. 1774 * Active IO is being aborted, waiting for room in request CQ.
2029 */ 1775 */
2030static void 1776static void
2031bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1777bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -2040,7 +1786,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2040 break; 1786 break;
2041 1787
2042 case BFA_IOIM_SM_CLEANUP: 1788 case BFA_IOIM_SM_CLEANUP:
2043 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); 1789 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
2044 ioim->iosp->abort_explicit = BFA_FALSE; 1790 ioim->iosp->abort_explicit = BFA_FALSE;
2045 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); 1791 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2046 break; 1792 break;
@@ -2076,7 +1822,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2076} 1822}
2077 1823
2078/* 1824/*
2079 * Active IO is being cleaned up, waiting for room in request CQ. 1825 * Active IO is being cleaned up, waiting for room in request CQ.
2080 */ 1826 */
2081static void 1827static void
2082bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1828bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -2131,9 +1877,6 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2131static void 1877static void
2132bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1878bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2133{ 1879{
2134 bfa_trc_fp(ioim->bfa, ioim->iotag);
2135 bfa_trc_fp(ioim->bfa, event);
2136
2137 switch (event) { 1880 switch (event) {
2138 case BFA_IOIM_SM_HCB: 1881 case BFA_IOIM_SM_HCB:
2139 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); 1882 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
@@ -2213,11 +1956,6 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2213} 1956}
2214 1957
2215 1958
2216
2217/*
2218 * hal_ioim_private
2219 */
2220
2221static void 1959static void
2222__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) 1960__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2223{ 1961{
@@ -2323,7 +2061,7 @@ bfa_ioim_sgpg_alloced(void *cbarg)
2323 2061
2324 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); 2062 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2325 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); 2063 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2326 bfa_ioim_sgpg_setup(ioim); 2064 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); 2065 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2328} 2066}
2329 2067
@@ -2335,13 +2073,16 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2335{ 2073{
2336 struct bfa_itnim_s *itnim = ioim->itnim; 2074 struct bfa_itnim_s *itnim = ioim->itnim;
2337 struct bfi_ioim_req_s *m; 2075 struct bfi_ioim_req_s *m;
2338 static struct fcp_cmnd_s cmnd_z0 = { 0 }; 2076 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2339 struct bfi_sge_s *sge; 2077 struct bfi_sge_s *sge, *sgpge;
2340 u32 pgdlen = 0; 2078 u32 pgdlen = 0;
2341 u32 fcp_dl; 2079 u32 fcp_dl;
2342 u64 addr; 2080 u64 addr;
2343 struct scatterlist *sg; 2081 struct scatterlist *sg;
2082 struct bfa_sgpg_s *sgpg;
2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; 2083 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2084 u32 i, sge_id, pgcumsz;
2085 enum dma_data_direction dmadir;
2345 2086
2346 /* 2087 /*
2347 * check for room in queue to send request now 2088 * check for room in queue to send request now
@@ -2359,22 +2100,61 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2359 */ 2100 */
2360 m->io_tag = cpu_to_be16(ioim->iotag); 2101 m->io_tag = cpu_to_be16(ioim->iotag);
2361 m->rport_hdl = ioim->itnim->rport->fw_handle; 2102 m->rport_hdl = ioim->itnim->rport->fw_handle;
2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); 2103 m->io_timeout = 0;
2363 2104
2364 /*
2365 * build inline IO SG element here
2366 */
2367 sge = &m->sges[0]; 2105 sge = &m->sges[0];
2368 if (ioim->nsges) { 2106 sgpg = ioim->sgpg;
2369 sg = (struct scatterlist *)scsi_sglist(cmnd); 2107 sge_id = 0;
2370 addr = bfa_os_sgaddr(sg_dma_address(sg)); 2108 sgpge = NULL;
2371 sge->sga = *(union bfi_addr_u *) &addr; 2109 pgcumsz = 0;
2372 pgdlen = sg_dma_len(sg); 2110 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2373 sge->sg_len = pgdlen; 2111 if (i == 0) {
2374 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? 2112 /* build inline IO SG element */
2113 addr = bfa_sgaddr_le(sg_dma_address(sg));
2114 sge->sga = *(union bfi_addr_u *) &addr;
2115 pgdlen = sg_dma_len(sg);
2116 sge->sg_len = pgdlen;
2117 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2375 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; 2118 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2376 bfa_sge_to_be(sge); 2119 bfa_sge_to_be(sge);
2377 sge++; 2120 sge++;
2121 } else {
2122 if (sge_id == 0)
2123 sgpge = sgpg->sgpg->sges;
2124
2125 addr = bfa_sgaddr_le(sg_dma_address(sg));
2126 sgpge->sga = *(union bfi_addr_u *) &addr;
2127 sgpge->sg_len = sg_dma_len(sg);
2128 pgcumsz += sgpge->sg_len;
2129
2130 /* set flags */
2131 if (i < (ioim->nsges - 1) &&
2132 sge_id < (BFI_SGPG_DATA_SGES - 1))
2133 sgpge->flags = BFI_SGE_DATA;
2134 else if (i < (ioim->nsges - 1))
2135 sgpge->flags = BFI_SGE_DATA_CPL;
2136 else
2137 sgpge->flags = BFI_SGE_DATA_LAST;
2138
2139 bfa_sge_to_le(sgpge);
2140
2141 sgpge++;
2142 if (i == (ioim->nsges - 1)) {
2143 sgpge->flags = BFI_SGE_PGDLEN;
2144 sgpge->sga.a32.addr_lo = 0;
2145 sgpge->sga.a32.addr_hi = 0;
2146 sgpge->sg_len = pgcumsz;
2147 bfa_sge_to_le(sgpge);
2148 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2149 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2150 sgpge->flags = BFI_SGE_LINK;
2151 sgpge->sga = sgpg->sgpg_pa;
2152 sgpge->sg_len = pgcumsz;
2153 bfa_sge_to_le(sgpge);
2154 sge_id = 0;
2155 pgcumsz = 0;
2156 }
2157 }
2378 } 2158 }
2379 2159
2380 if (ioim->nsges > BFI_SGE_INLINE) { 2160 if (ioim->nsges > BFI_SGE_INLINE) {
@@ -2391,10 +2171,17 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2391 * set up I/O command parameters 2171 * set up I/O command parameters
2392 */ 2172 */
2393 m->cmnd = cmnd_z0; 2173 m->cmnd = cmnd_z0;
2394 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio); 2174 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2395 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio); 2175 dmadir = cmnd->sc_data_direction;
2396 m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio); 2176 if (dmadir == DMA_TO_DEVICE)
2397 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); 2177 m->cmnd.iodir = FCP_IODIR_WRITE;
2178 else if (dmadir == DMA_FROM_DEVICE)
2179 m->cmnd.iodir = FCP_IODIR_READ;
2180 else
2181 m->cmnd.iodir = FCP_IODIR_NONE;
2182
2183 m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2184 fcp_dl = scsi_bufflen(cmnd);
2398 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); 2185 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2399 2186
2400 /* 2187 /*
@@ -2418,28 +2205,9 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2418 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); 2205 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2419 } 2206 }
2420 if (itnim->seq_rec || 2207 if (itnim->seq_rec ||
2421 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1))) 2208 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2422 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); 2209 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2423 2210
2424#ifdef IOIM_ADVANCED
2425 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
2426 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
2427 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
2428
2429 /*
2430 * Handle large CDB (>16 bytes).
2431 */
2432 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
2433 FCP_CMND_CDB_LEN) / sizeof(u32);
2434 if (m->cmnd.addl_cdb_len) {
2435 memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
2436 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
2437 m->cmnd.addl_cdb_len * sizeof(u32));
2438 fcp_cmnd_fcpdl(&m->cmnd) =
2439 cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio));
2440 }
2441#endif
2442
2443 /* 2211 /*
2444 * queue I/O message to firmware 2212 * queue I/O message to firmware
2445 */ 2213 */
@@ -2452,11 +2220,11 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2452 * at queuing time. 2220 * at queuing time.
2453 */ 2221 */
2454static bfa_boolean_t 2222static bfa_boolean_t
2455bfa_ioim_sge_setup(struct bfa_ioim_s *ioim) 2223bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2456{ 2224{
2457 u16 nsgpgs; 2225 u16 nsgpgs;
2458 2226
2459 bfa_assert(ioim->nsges > BFI_SGE_INLINE); 2227 WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2460 2228
2461 /* 2229 /*
2462 * allocate SG pages needed 2230 * allocate SG pages needed
@@ -2472,73 +2240,11 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
2472 } 2240 }
2473 2241
2474 ioim->nsgpgs = nsgpgs; 2242 ioim->nsgpgs = nsgpgs;
2475 bfa_ioim_sgpg_setup(ioim); 2243 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2476 2244
2477 return BFA_TRUE; 2245 return BFA_TRUE;
2478} 2246}
2479 2247
2480static void
2481bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2482{
2483 int sgeid, nsges, i;
2484 struct bfi_sge_s *sge;
2485 struct bfa_sgpg_s *sgpg;
2486 u32 pgcumsz;
2487 u64 addr;
2488 struct scatterlist *sg;
2489 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2490
2491 sgeid = BFI_SGE_INLINE;
2492 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
2493
2494 sg = scsi_sglist(cmnd);
2495 sg = sg_next(sg);
2496
2497 do {
2498 sge = sgpg->sgpg->sges;
2499 nsges = ioim->nsges - sgeid;
2500 if (nsges > BFI_SGPG_DATA_SGES)
2501 nsges = BFI_SGPG_DATA_SGES;
2502
2503 pgcumsz = 0;
2504 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
2505 addr = bfa_os_sgaddr(sg_dma_address(sg));
2506 sge->sga = *(union bfi_addr_u *) &addr;
2507 sge->sg_len = sg_dma_len(sg);
2508 pgcumsz += sge->sg_len;
2509
2510 /*
2511 * set flags
2512 */
2513 if (i < (nsges - 1))
2514 sge->flags = BFI_SGE_DATA;
2515 else if (sgeid < (ioim->nsges - 1))
2516 sge->flags = BFI_SGE_DATA_CPL;
2517 else
2518 sge->flags = BFI_SGE_DATA_LAST;
2519
2520 bfa_sge_to_le(sge);
2521 }
2522
2523 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2524
2525 /*
2526 * set the link element of each page
2527 */
2528 if (sgeid == ioim->nsges) {
2529 sge->flags = BFI_SGE_PGDLEN;
2530 sge->sga.a32.addr_lo = 0;
2531 sge->sga.a32.addr_hi = 0;
2532 } else {
2533 sge->flags = BFI_SGE_LINK;
2534 sge->sga = sgpg->sgpg_pa;
2535 }
2536 sge->sg_len = pgcumsz;
2537
2538 bfa_sge_to_le(sge);
2539 } while (sgeid < ioim->nsges);
2540}
2541
2542/* 2248/*
2543 * Send I/O abort request to firmware. 2249 * Send I/O abort request to firmware.
2544 */ 2250 */
@@ -2605,7 +2311,7 @@ bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2605 } 2311 }
2606 bfa_itnim_iodone(ioim->itnim); 2312 bfa_itnim_iodone(ioim->itnim);
2607 } else 2313 } else
2608 bfa_tskim_iodone(ioim->iosp->tskim); 2314 bfa_wc_down(&ioim->iosp->tskim->wc);
2609} 2315}
2610 2316
2611static bfa_boolean_t 2317static bfa_boolean_t
@@ -2623,9 +2329,6 @@ bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2623 return BFA_TRUE; 2329 return BFA_TRUE;
2624} 2330}
2625 2331
2626/*
2627 * or after the link comes back.
2628 */
2629void 2332void
2630bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) 2333bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2631{ 2334{
@@ -2653,11 +2356,6 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2653} 2356}
2654 2357
2655 2358
2656
2657/*
2658 * hal_ioim_friend
2659 */
2660
2661/* 2359/*
2662 * Memory allocation and initialization. 2360 * Memory allocation and initialization.
2663 */ 2361 */
@@ -2722,14 +2420,6 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2722 } 2420 }
2723} 2421}
2724 2422
2725/*
2726 * Driver detach time call.
2727 */
2728void
2729bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
2730{
2731}
2732
2733void 2423void
2734bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 2424bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2735{ 2425{
@@ -2742,7 +2432,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2742 iotag = be16_to_cpu(rsp->io_tag); 2432 iotag = be16_to_cpu(rsp->io_tag);
2743 2433
2744 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); 2434 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2745 bfa_assert(ioim->iotag == iotag); 2435 WARN_ON(ioim->iotag != iotag);
2746 2436
2747 bfa_trc(ioim->bfa, ioim->iotag); 2437 bfa_trc(ioim->bfa, ioim->iotag);
2748 bfa_trc(ioim->bfa, rsp->io_status); 2438 bfa_trc(ioim->bfa, rsp->io_status);
@@ -2773,13 +2463,13 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2773 2463
2774 case BFI_IOIM_STS_PROTO_ERR: 2464 case BFI_IOIM_STS_PROTO_ERR:
2775 bfa_stats(ioim->itnim, iocom_proto_err); 2465 bfa_stats(ioim->itnim, iocom_proto_err);
2776 bfa_assert(rsp->reuse_io_tag); 2466 WARN_ON(!rsp->reuse_io_tag);
2777 evt = BFA_IOIM_SM_COMP; 2467 evt = BFA_IOIM_SM_COMP;
2778 break; 2468 break;
2779 2469
2780 case BFI_IOIM_STS_SQER_NEEDED: 2470 case BFI_IOIM_STS_SQER_NEEDED:
2781 bfa_stats(ioim->itnim, iocom_sqer_needed); 2471 bfa_stats(ioim->itnim, iocom_sqer_needed);
2782 bfa_assert(rsp->reuse_io_tag == 0); 2472 WARN_ON(rsp->reuse_io_tag != 0);
2783 evt = BFA_IOIM_SM_SQRETRY; 2473 evt = BFA_IOIM_SM_SQRETRY;
2784 break; 2474 break;
2785 2475
@@ -2808,7 +2498,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2808 break; 2498 break;
2809 2499
2810 default: 2500 default:
2811 bfa_assert(0); 2501 WARN_ON(1);
2812 } 2502 }
2813 2503
2814 bfa_sm_send_event(ioim, evt); 2504 bfa_sm_send_event(ioim, evt);
@@ -2825,39 +2515,12 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2825 iotag = be16_to_cpu(rsp->io_tag); 2515 iotag = be16_to_cpu(rsp->io_tag);
2826 2516
2827 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); 2517 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2828 bfa_assert(ioim->iotag == iotag); 2518 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2829 2519
2830 bfa_trc_fp(ioim->bfa, ioim->iotag);
2831 bfa_ioim_cb_profile_comp(fcpim, ioim); 2520 bfa_ioim_cb_profile_comp(fcpim, ioim);
2832
2833 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); 2521 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2834} 2522}
2835 2523
2836void
2837bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2838{
2839 ioim->start_time = jiffies;
2840}
2841
2842void
2843bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2844{
2845 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2846 u32 index = bfa_ioim_get_index(fcp_dl);
2847 u64 end_time = jiffies;
2848 struct bfa_itnim_latency_s *io_lat =
2849 &(ioim->itnim->ioprofile.io_latency);
2850 u32 val = (u32)(end_time - ioim->start_time);
2851
2852 bfa_itnim_ioprofile_update(ioim->itnim, index);
2853
2854 io_lat->count[index]++;
2855 io_lat->min[index] = (io_lat->min[index] < val) ?
2856 io_lat->min[index] : val;
2857 io_lat->max[index] = (io_lat->max[index] > val) ?
2858 io_lat->max[index] : val;
2859 io_lat->avg[index] += val;
2860}
2861/* 2524/*
2862 * Called by itnim to clean up IO while going offline. 2525 * Called by itnim to clean up IO while going offline.
2863 */ 2526 */
@@ -2903,11 +2566,6 @@ bfa_ioim_tov(struct bfa_ioim_s *ioim)
2903} 2566}
2904 2567
2905 2568
2906
2907/*
2908 * hal_ioim_api
2909 */
2910
2911/* 2569/*
2912 * Allocate IOIM resource for initiator mode I/O request. 2570 * Allocate IOIM resource for initiator mode I/O request.
2913 */ 2571 */
@@ -2936,7 +2594,6 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2936 fcpim->ios_active++; 2594 fcpim->ios_active++;
2937 2595
2938 list_add_tail(&ioim->qe, &itnim->io_q); 2596 list_add_tail(&ioim->qe, &itnim->io_q);
2939 bfa_trc_fp(ioim->bfa, ioim->iotag);
2940 2597
2941 return ioim; 2598 return ioim;
2942} 2599}
@@ -2946,18 +2603,13 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
2946{ 2603{
2947 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim; 2604 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2948 2605
2949 bfa_trc_fp(ioim->bfa, ioim->iotag);
2950 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2951
2952 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2953 (ioim->nsges > BFI_SGE_INLINE));
2954
2955 if (ioim->nsgpgs > 0) 2606 if (ioim->nsgpgs > 0)
2956 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); 2607 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2957 2608
2958 bfa_stats(ioim->itnim, io_comps); 2609 bfa_stats(ioim->itnim, io_comps);
2959 fcpim->ios_active--; 2610 fcpim->ios_active--;
2960 2611
2612 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2961 list_del(&ioim->qe); 2613 list_del(&ioim->qe);
2962 list_add_tail(&ioim->qe, &fcpim->ioim_free_q); 2614 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2963} 2615}
@@ -2965,16 +2617,13 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
2965void 2617void
2966bfa_ioim_start(struct bfa_ioim_s *ioim) 2618bfa_ioim_start(struct bfa_ioim_s *ioim)
2967{ 2619{
2968 bfa_trc_fp(ioim->bfa, ioim->iotag);
2969
2970 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2620 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2971 2621
2972 /* 2622 /*
2973 * Obtain the queue over which this request has to be issued 2623 * Obtain the queue over which this request has to be issued
2974 */ 2624 */
2975 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? 2625 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2976 bfa_cb_ioim_get_reqq(ioim->dio) : 2626 BFA_FALSE : bfa_itnim_get_reqq(ioim);
2977 bfa_itnim_get_reqq(ioim);
2978 2627
2979 bfa_sm_send_event(ioim, BFA_IOIM_SM_START); 2628 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2980} 2629}
@@ -2997,13 +2646,12 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim)
2997 return BFA_STATUS_OK; 2646 return BFA_STATUS_OK;
2998} 2647}
2999 2648
3000
3001/* 2649/*
3002 * BFA TSKIM state machine functions 2650 * BFA TSKIM state machine functions
3003 */ 2651 */
3004 2652
3005/* 2653/*
3006 * Task management command beginning state. 2654 * Task management command beginning state.
3007 */ 2655 */
3008static void 2656static void
3009bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2657bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3040,9 +2688,8 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3040} 2688}
3041 2689
3042/* 2690/*
3043 * brief 2691 * TM command is active, awaiting completion from firmware to
3044 * TM command is active, awaiting completion from firmware to 2692 * cleanup IO requests in TM scope.
3045 * cleanup IO requests in TM scope.
3046 */ 2693 */
3047static void 2694static void
3048bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2695bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3077,8 +2724,8 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3077} 2724}
3078 2725
3079/* 2726/*
3080 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup 2727 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3081 * completion event from firmware. 2728 * completion event from firmware.
3082 */ 2729 */
3083static void 2730static void
3084bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2731bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3138,7 +2785,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3138} 2785}
3139 2786
3140/* 2787/*
3141 * Task management command is waiting for room in request CQ 2788 * Task management command is waiting for room in request CQ
3142 */ 2789 */
3143static void 2790static void
3144bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2791bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3173,8 +2820,8 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3173} 2820}
3174 2821
3175/* 2822/*
3176 * Task management command is active, awaiting for room in request CQ 2823 * Task management command is active, awaiting for room in request CQ
3177 * to send clean up request. 2824 * to send clean up request.
3178 */ 2825 */
3179static void 2826static void
3180bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, 2827bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
@@ -3186,10 +2833,8 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3186 case BFA_TSKIM_SM_DONE: 2833 case BFA_TSKIM_SM_DONE:
3187 bfa_reqq_wcancel(&tskim->reqq_wait); 2834 bfa_reqq_wcancel(&tskim->reqq_wait);
3188 /* 2835 /*
3189 *
3190 * Fall through !!! 2836 * Fall through !!!
3191 */ 2837 */
3192
3193 case BFA_TSKIM_SM_QRESUME: 2838 case BFA_TSKIM_SM_QRESUME:
3194 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); 2839 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3195 bfa_tskim_send_abort(tskim); 2840 bfa_tskim_send_abort(tskim);
@@ -3208,7 +2853,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3208} 2853}
3209 2854
3210/* 2855/*
3211 * BFA callback is pending 2856 * BFA callback is pending
3212 */ 2857 */
3213static void 2858static void
3214bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2859bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3233,12 +2878,6 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3233 } 2878 }
3234} 2879}
3235 2880
3236
3237
3238/*
3239 * hal_tskim_private
3240 */
3241
3242static void 2881static void
3243__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) 2882__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3244{ 2883{
@@ -3268,8 +2907,8 @@ __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3268 BFI_TSKIM_STS_FAILED); 2907 BFI_TSKIM_STS_FAILED);
3269} 2908}
3270 2909
3271static bfa_boolean_t 2910static bfa_boolean_t
3272bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun) 2911bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3273{ 2912{
3274 switch (tskim->tm_cmnd) { 2913 switch (tskim->tm_cmnd) {
3275 case FCP_TM_TARGET_RESET: 2914 case FCP_TM_TARGET_RESET:
@@ -3279,24 +2918,26 @@ bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3279 case FCP_TM_CLEAR_TASK_SET: 2918 case FCP_TM_CLEAR_TASK_SET:
3280 case FCP_TM_LUN_RESET: 2919 case FCP_TM_LUN_RESET:
3281 case FCP_TM_CLEAR_ACA: 2920 case FCP_TM_CLEAR_ACA:
3282 return (tskim->lun == lun); 2921 return !memcmp(&tskim->lun, &lun, sizeof(lun));
3283 2922
3284 default: 2923 default:
3285 bfa_assert(0); 2924 WARN_ON(1);
3286 } 2925 }
3287 2926
3288 return BFA_FALSE; 2927 return BFA_FALSE;
3289} 2928}
3290 2929
3291/* 2930/*
3292 * Gather affected IO requests and task management commands. 2931 * Gather affected IO requests and task management commands.
3293 */ 2932 */
3294static void 2933static void
3295bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) 2934bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3296{ 2935{
3297 struct bfa_itnim_s *itnim = tskim->itnim; 2936 struct bfa_itnim_s *itnim = tskim->itnim;
3298 struct bfa_ioim_s *ioim; 2937 struct bfa_ioim_s *ioim;
3299 struct list_head *qe, *qen; 2938 struct list_head *qe, *qen;
2939 struct scsi_cmnd *cmnd;
2940 struct scsi_lun scsilun;
3300 2941
3301 INIT_LIST_HEAD(&tskim->io_q); 2942 INIT_LIST_HEAD(&tskim->io_q);
3302 2943
@@ -3305,8 +2946,9 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3305 */ 2946 */
3306 list_for_each_safe(qe, qen, &itnim->io_q) { 2947 list_for_each_safe(qe, qen, &itnim->io_q) {
3307 ioim = (struct bfa_ioim_s *) qe; 2948 ioim = (struct bfa_ioim_s *) qe;
3308 if (bfa_tskim_match_scope 2949 cmnd = (struct scsi_cmnd *) ioim->dio;
3309 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) { 2950 int_to_scsilun(cmnd->device->lun, &scsilun);
2951 if (bfa_tskim_match_scope(tskim, scsilun)) {
3310 list_del(&ioim->qe); 2952 list_del(&ioim->qe);
3311 list_add_tail(&ioim->qe, &tskim->io_q); 2953 list_add_tail(&ioim->qe, &tskim->io_q);
3312 } 2954 }
@@ -3317,8 +2959,9 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3317 */ 2959 */
3318 list_for_each_safe(qe, qen, &itnim->pending_q) { 2960 list_for_each_safe(qe, qen, &itnim->pending_q) {
3319 ioim = (struct bfa_ioim_s *) qe; 2961 ioim = (struct bfa_ioim_s *) qe;
3320 if (bfa_tskim_match_scope 2962 cmnd = (struct scsi_cmnd *) ioim->dio;
3321 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) { 2963 int_to_scsilun(cmnd->device->lun, &scsilun);
2964 if (bfa_tskim_match_scope(tskim, scsilun)) {
3322 list_del(&ioim->qe); 2965 list_del(&ioim->qe);
3323 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); 2966 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3324 bfa_ioim_tov(ioim); 2967 bfa_ioim_tov(ioim);
@@ -3327,7 +2970,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3327} 2970}
3328 2971
3329/* 2972/*
3330 * IO cleanup completion 2973 * IO cleanup completion
3331 */ 2974 */
3332static void 2975static void
3333bfa_tskim_cleanp_comp(void *tskim_cbarg) 2976bfa_tskim_cleanp_comp(void *tskim_cbarg)
@@ -3339,7 +2982,7 @@ bfa_tskim_cleanp_comp(void *tskim_cbarg)
3339} 2982}
3340 2983
3341/* 2984/*
3342 * Gather affected IO requests and task management commands. 2985 * Gather affected IO requests and task management commands.
3343 */ 2986 */
3344static void 2987static void
3345bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) 2988bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
@@ -3359,7 +3002,7 @@ bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3359} 3002}
3360 3003
3361/* 3004/*
3362 * Send task management request to firmware. 3005 * Send task management request to firmware.
3363 */ 3006 */
3364static bfa_boolean_t 3007static bfa_boolean_t
3365bfa_tskim_send(struct bfa_tskim_s *tskim) 3008bfa_tskim_send(struct bfa_tskim_s *tskim)
@@ -3394,7 +3037,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
3394} 3037}
3395 3038
3396/* 3039/*
3397 * Send abort request to cleanup an active TM to firmware. 3040 * Send abort request to cleanup an active TM to firmware.
3398 */ 3041 */
3399static bfa_boolean_t 3042static bfa_boolean_t
3400bfa_tskim_send_abort(struct bfa_tskim_s *tskim) 3043bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
@@ -3425,7 +3068,7 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3425} 3068}
3426 3069
3427/* 3070/*
3428 * Call to resume task management cmnd waiting for room in request queue. 3071 * Call to resume task management cmnd waiting for room in request queue.
3429 */ 3072 */
3430static void 3073static void
3431bfa_tskim_qresume(void *cbarg) 3074bfa_tskim_qresume(void *cbarg)
@@ -3451,12 +3094,6 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3451 } 3094 }
3452} 3095}
3453 3096
3454
3455
3456/*
3457 * hal_tskim_friend
3458 */
3459
3460/* 3097/*
3461 * Notification on completions from related ioim. 3098 * Notification on completions from related ioim.
3462 */ 3099 */
@@ -3489,7 +3126,7 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3489} 3126}
3490 3127
3491/* 3128/*
3492 * Memory allocation and initialization. 3129 * Memory allocation and initialization.
3493 */ 3130 */
3494void 3131void
3495bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) 3132bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
@@ -3522,14 +3159,6 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3522} 3159}
3523 3160
3524void 3161void
3525bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
3526{
3527 /*
3528 * @todo
3529 */
3530}
3531
3532void
3533bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 3162bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3534{ 3163{
3535 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 3164 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
@@ -3538,7 +3167,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3538 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); 3167 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
3539 3168
3540 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); 3169 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3541 bfa_assert(tskim->tsk_tag == tsk_tag); 3170 WARN_ON(tskim->tsk_tag != tsk_tag);
3542 3171
3543 tskim->tsk_status = rsp->tsk_status; 3172 tskim->tsk_status = rsp->tsk_status;
3544 3173
@@ -3556,12 +3185,6 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3556} 3185}
3557 3186
3558 3187
3559
3560/*
3561 * hal_tskim_api
3562 */
3563
3564
3565struct bfa_tskim_s * 3188struct bfa_tskim_s *
3566bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) 3189bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3567{ 3190{
@@ -3579,13 +3202,13 @@ bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3579void 3202void
3580bfa_tskim_free(struct bfa_tskim_s *tskim) 3203bfa_tskim_free(struct bfa_tskim_s *tskim)
3581{ 3204{
3582 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); 3205 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3583 list_del(&tskim->qe); 3206 list_del(&tskim->qe);
3584 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); 3207 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3585} 3208}
3586 3209
3587/* 3210/*
3588 * Start a task management command. 3211 * Start a task management command.
3589 * 3212 *
3590 * @param[in] tskim BFA task management command instance 3213 * @param[in] tskim BFA task management command instance
3591 * @param[in] itnim i-t nexus for the task management command 3214 * @param[in] itnim i-t nexus for the task management command
@@ -3596,7 +3219,8 @@ bfa_tskim_free(struct bfa_tskim_s *tskim)
3596 * @return None. 3219 * @return None.
3597 */ 3220 */
3598void 3221void
3599bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun, 3222bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3223 struct scsi_lun lun,
3600 enum fcp_tm_cmnd tm_cmnd, u8 tsecs) 3224 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3601{ 3225{
3602 tskim->itnim = itnim; 3226 tskim->itnim = itnim;
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index db53717eeb4b..1e38dade8423 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -41,7 +41,7 @@
41 (__itnim->ioprofile.iocomps[__index]++) 41 (__itnim->ioprofile.iocomps[__index]++)
42 42
43#define BFA_IOIM_RETRY_TAG_OFFSET 11 43#define BFA_IOIM_RETRY_TAG_OFFSET 11
44#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */ 44#define BFA_IOIM_IOTAG_MASK 0x07ff /* 2K IOs */
45#define BFA_IOIM_RETRY_MAX 7 45#define BFA_IOIM_RETRY_MAX 7
46 46
47/* Buckets are are 512 bytes to 2MB */ 47/* Buckets are are 512 bytes to 2MB */
@@ -94,12 +94,12 @@ struct bfa_fcpim_mod_s {
94 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 94 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
95 struct list_head ioim_comp_q; /* IO global comp Q */ 95 struct list_head ioim_comp_q; /* IO global comp Q */
96 struct list_head tskim_free_q; 96 struct list_head tskim_free_q;
97 u32 ios_active; /* current active IOs */ 97 u32 ios_active; /* current active IOs */
98 u32 delay_comp; 98 u32 delay_comp;
99 struct bfa_fcpim_del_itn_stats_s del_itn_stats; 99 struct bfa_fcpim_del_itn_stats_s del_itn_stats;
100 bfa_boolean_t ioredirect; 100 bfa_boolean_t ioredirect;
101 bfa_boolean_t io_profile; 101 bfa_boolean_t io_profile;
102 u32 io_profile_start_time; 102 u32 io_profile_start_time;
103 bfa_fcpim_profile_t profile_comp; 103 bfa_fcpim_profile_t profile_comp;
104 bfa_fcpim_profile_t profile_start; 104 bfa_fcpim_profile_t profile_start;
105}; 105};
@@ -114,25 +114,24 @@ struct bfa_ioim_s {
114 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ 114 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
115 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ 115 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
116 struct bfad_ioim_s *dio; /* driver IO handle */ 116 struct bfad_ioim_s *dio; /* driver IO handle */
117 u16 iotag; /* FWI IO tag */ 117 u16 iotag; /* FWI IO tag */
118 u16 abort_tag; /* unqiue abort request tag */ 118 u16 abort_tag; /* unqiue abort request tag */
119 u16 nsges; /* number of SG elements */ 119 u16 nsges; /* number of SG elements */
120 u16 nsgpgs; /* number of SG pages */ 120 u16 nsgpgs; /* number of SG pages */
121 struct bfa_sgpg_s *sgpg; /* first SG page */ 121 struct bfa_sgpg_s *sgpg; /* first SG page */
122 struct list_head sgpg_q; /* allocated SG pages */ 122 struct list_head sgpg_q; /* allocated SG pages */
123 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ 123 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
124 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ 124 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
125 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ 125 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
126 u8 reqq; /* Request queue for I/O */ 126 u8 reqq; /* Request queue for I/O */
127 u64 start_time; /* IO's Profile start val */ 127 u64 start_time; /* IO's Profile start val */
128}; 128};
129 129
130
131struct bfa_ioim_sp_s { 130struct bfa_ioim_sp_s {
132 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */ 131 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
133 u8 *snsinfo; /* sense info for this IO */ 132 u8 *snsinfo; /* sense info for this IO */
134 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */ 133 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
135 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 134 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
136 bfa_boolean_t abort_explicit; /* aborted by OS */ 135 bfa_boolean_t abort_explicit; /* aborted by OS */
137 struct bfa_tskim_s *tskim; /* Relevant TM cmd */ 136 struct bfa_tskim_s *tskim; /* Relevant TM cmd */
138}; 137};
@@ -143,35 +142,34 @@ struct bfa_ioim_sp_s {
143struct bfa_tskim_s { 142struct bfa_tskim_s {
144 struct list_head qe; 143 struct list_head qe;
145 bfa_sm_t sm; 144 bfa_sm_t sm;
146 struct bfa_s *bfa; /* BFA module */ 145 struct bfa_s *bfa; /* BFA module */
147 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ 146 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
148 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ 147 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
149 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */ 148 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
150 bfa_boolean_t notify; /* notify itnim on TM comp */ 149 bfa_boolean_t notify; /* notify itnim on TM comp */
151 lun_t lun; /* lun if applicable */ 150 struct scsi_lun lun; /* lun if applicable */
152 enum fcp_tm_cmnd tm_cmnd; /* task management command */ 151 enum fcp_tm_cmnd tm_cmnd; /* task management command */
153 u16 tsk_tag; /* FWI IO tag */ 152 u16 tsk_tag; /* FWI IO tag */
154 u8 tsecs; /* timeout in seconds */ 153 u8 tsecs; /* timeout in seconds */
155 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 154 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
156 struct list_head io_q; /* queue of affected IOs */ 155 struct list_head io_q; /* queue of affected IOs */
157 struct bfa_wc_s wc; /* waiting counter */ 156 struct bfa_wc_s wc; /* waiting counter */
158 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ 157 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
159 enum bfi_tskim_status tsk_status; /* TM status */ 158 enum bfi_tskim_status tsk_status; /* TM status */
160}; 159};
161 160
162
163/* 161/*
164 * BFA i-t-n (initiator mode) 162 * BFA i-t-n (initiator mode)
165 */ 163 */
166struct bfa_itnim_s { 164struct bfa_itnim_s {
167 struct list_head qe; /* queue element */ 165 struct list_head qe; /* queue element */
168 bfa_sm_t sm; /* i-t-n im BFA state machine */ 166 bfa_sm_t sm; /* i-t-n im BFA state machine */
169 struct bfa_s *bfa; /* bfa instance */ 167 struct bfa_s *bfa; /* bfa instance */
170 struct bfa_rport_s *rport; /* bfa rport */ 168 struct bfa_rport_s *rport; /* bfa rport */
171 void *ditn; /* driver i-t-n structure */ 169 void *ditn; /* driver i-t-n structure */
172 struct bfi_mhdr_s mhdr; /* pre-built mhdr */ 170 struct bfi_mhdr_s mhdr; /* pre-built mhdr */
173 u8 msg_no; /* itnim/rport firmware handle */ 171 u8 msg_no; /* itnim/rport firmware handle */
174 u8 reqq; /* CQ for requests */ 172 u8 reqq; /* CQ for requests */
175 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ 173 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
176 struct list_head pending_q; /* queue of pending IO requests */ 174 struct list_head pending_q; /* queue of pending IO requests */
177 struct list_head io_q; /* queue of active IO requests */ 175 struct list_head io_q; /* queue of active IO requests */
@@ -181,19 +179,19 @@ struct bfa_itnim_s {
181 bfa_boolean_t seq_rec; /* SQER supported */ 179 bfa_boolean_t seq_rec; /* SQER supported */
182 bfa_boolean_t is_online; /* itnim is ONLINE for IO */ 180 bfa_boolean_t is_online; /* itnim is ONLINE for IO */
183 bfa_boolean_t iotov_active; /* IO TOV timer is active */ 181 bfa_boolean_t iotov_active; /* IO TOV timer is active */
184 struct bfa_wc_s wc; /* waiting counter */ 182 struct bfa_wc_s wc; /* waiting counter */
185 struct bfa_timer_s timer; /* pending IO TOV */ 183 struct bfa_timer_s timer; /* pending IO TOV */
186 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 184 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
187 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */ 185 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
188 struct bfa_itnim_iostats_s stats; 186 struct bfa_itnim_iostats_s stats;
189 struct bfa_itnim_ioprofile_s ioprofile; 187 struct bfa_itnim_ioprofile_s ioprofile;
190}; 188};
191 189
192
193#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) 190#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
194#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) 191#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
192#define BFA_IOIM_TAG_2_ID(_iotag) ((_iotag) & BFA_IOIM_IOTAG_MASK)
195#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ 193#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
196 (&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)]) 194 (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
197#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \ 195#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
198 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)]) 196 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
199 197
@@ -201,26 +199,26 @@ struct bfa_itnim_s {
201 (_bfa->modules.fcpim_mod.io_profile_start_time) 199 (_bfa->modules.fcpim_mod.io_profile_start_time)
202#define bfa_fcpim_get_io_profile(_bfa) \ 200#define bfa_fcpim_get_io_profile(_bfa) \
203 (_bfa->modules.fcpim_mod.io_profile) 201 (_bfa->modules.fcpim_mod.io_profile)
202#define bfa_ioim_update_iotag(__ioim) do { \
203 uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; \
204 k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK; \
205 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
206} while (0)
204 207
205static inline bfa_boolean_t 208static inline bfa_boolean_t
206bfa_ioim_get_iotag(struct bfa_ioim_s *ioim) 209bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
207{ 210{
208 u16 k = ioim->iotag; 211 uint16_t k = ioim->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;
209 212 if (k < BFA_IOIM_RETRY_MAX)
210 k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++;
211
212 if (k > BFA_IOIM_RETRY_MAX)
213 return BFA_FALSE; 213 return BFA_FALSE;
214 ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK;
215 ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET;
216 return BFA_TRUE; 214 return BFA_TRUE;
217} 215}
216
218/* 217/*
219 * function prototypes 218 * function prototypes
220 */ 219 */
221void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, 220void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
222 struct bfa_meminfo_s *minfo); 221 struct bfa_meminfo_s *minfo);
223void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
224void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 222void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
225void bfa_ioim_good_comp_isr(struct bfa_s *bfa, 223void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
226 struct bfi_msg_s *msg); 224 struct bfi_msg_s *msg);
@@ -232,7 +230,6 @@ void bfa_ioim_tov(struct bfa_ioim_s *ioim);
232 230
233void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, 231void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
234 struct bfa_meminfo_s *minfo); 232 struct bfa_meminfo_s *minfo);
235void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
236void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 233void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
237void bfa_tskim_iodone(struct bfa_tskim_s *tskim); 234void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
238void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); 235void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
@@ -248,32 +245,14 @@ void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
248void bfa_itnim_iodone(struct bfa_itnim_s *itnim); 245void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
249void bfa_itnim_tskdone(struct bfa_itnim_s *itnim); 246void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
250bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim); 247bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
251void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim);
252void bfa_ioim_profile_start(struct bfa_ioim_s *ioim);
253
254 248
255/* 249/*
256 * bfa fcpim module API functions 250 * bfa fcpim module API functions
257 */ 251 */
258void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov); 252void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
259u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa); 253u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
260void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
261u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa); 254u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
262bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa, 255
263 struct bfa_itnim_iostats_s *modstats);
264bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
265 struct bfa_itnim_iostats_s *stats, u8 lp_tag);
266bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
267 struct bfa_fcpim_del_itn_stats_s *modstats);
268bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag);
269void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
270 struct bfa_itnim_iostats_s *itnim_stats);
271bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
272void bfa_fcpim_set_ioredirect(struct bfa_s *bfa,
273 bfa_boolean_t state);
274void bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
275bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
276bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
277#define bfa_fcpim_ioredirect_enabled(__bfa) \ 256#define bfa_fcpim_ioredirect_enabled(__bfa) \
278 (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect) 257 (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
279 258
@@ -291,48 +270,33 @@ bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
291 * bfa itnim API functions 270 * bfa itnim API functions
292 */ 271 */
293struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa, 272struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
294 struct bfa_rport_s *rport, void *itnim); 273 struct bfa_rport_s *rport, void *itnim);
295void bfa_itnim_delete(struct bfa_itnim_s *itnim); 274void bfa_itnim_delete(struct bfa_itnim_s *itnim);
296void bfa_itnim_online(struct bfa_itnim_s *itnim, 275void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec);
297 bfa_boolean_t seq_rec); 276void bfa_itnim_offline(struct bfa_itnim_s *itnim);
298void bfa_itnim_offline(struct bfa_itnim_s *itnim); 277void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
299void bfa_itnim_get_stats(struct bfa_itnim_s *itnim, 278bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
300 struct bfa_itnim_iostats_s *stats); 279 struct bfa_itnim_ioprofile_s *ioprofile);
301void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim); 280
302bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
303 struct bfa_itnim_ioprofile_s *ioprofile);
304#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) 281#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
305 282
306/* 283/*
307 * BFA completion callback for bfa_itnim_online(). 284 * BFA completion callback for bfa_itnim_online().
308 *
309 * @param[in] itnim FCS or driver itnim instance
310 *
311 * return None
312 */ 285 */
313void bfa_cb_itnim_online(void *itnim); 286void bfa_cb_itnim_online(void *itnim);
314 287
315/* 288/*
316 * BFA completion callback for bfa_itnim_offline(). 289 * BFA completion callback for bfa_itnim_offline().
317 *
318 * @param[in] itnim FCS or driver itnim instance
319 *
320 * return None
321 */ 290 */
322void bfa_cb_itnim_offline(void *itnim); 291void bfa_cb_itnim_offline(void *itnim);
323void bfa_cb_itnim_tov_begin(void *itnim); 292void bfa_cb_itnim_tov_begin(void *itnim);
324void bfa_cb_itnim_tov(void *itnim); 293void bfa_cb_itnim_tov(void *itnim);
325 294
326/* 295/*
327 * BFA notification to FCS/driver for second level error recovery. 296 * BFA notification to FCS/driver for second level error recovery.
328 *
329 * Atleast one I/O request has timedout and target is unresponsive to 297 * Atleast one I/O request has timedout and target is unresponsive to
330 * repeated abort requests. Second level error recovery should be initiated 298 * repeated abort requests. Second level error recovery should be initiated
331 * by starting implicit logout and recovery procedures. 299 * by starting implicit logout and recovery procedures.
332 *
333 * @param[in] itnim FCS or driver itnim instance
334 *
335 * return None
336 */ 300 */
337void bfa_cb_itnim_sler(void *itnim); 301void bfa_cb_itnim_sler(void *itnim);
338 302
@@ -349,10 +313,8 @@ void bfa_ioim_start(struct bfa_ioim_s *ioim);
349bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim); 313bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim);
350void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, 314void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
351 bfa_boolean_t iotov); 315 bfa_boolean_t iotov);
352
353
354/* 316/*
355 * I/O completion notification. 317 * I/O completion notification.
356 * 318 *
357 * @param[in] dio driver IO structure 319 * @param[in] dio driver IO structure
358 * @param[in] io_status IO completion status 320 * @param[in] io_status IO completion status
@@ -363,39 +325,31 @@ void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
363 * 325 *
364 * @return None 326 * @return None
365 */ 327 */
366void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio, 328void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
367 enum bfi_ioim_status io_status, 329 enum bfi_ioim_status io_status,
368 u8 scsi_status, int sns_len, 330 u8 scsi_status, int sns_len,
369 u8 *sns_info, s32 residue); 331 u8 *sns_info, s32 residue);
370 332
371/* 333/*
372 * I/O good completion notification. 334 * I/O good completion notification.
373 *
374 * @param[in] dio driver IO structure
375 *
376 * @return None
377 */ 335 */
378void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); 336void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
379 337
380/* 338/*
381 * I/O abort completion notification 339 * I/O abort completion notification
382 *
383 * @param[in] dio driver IO that was aborted
384 *
385 * @return None
386 */ 340 */
387void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio); 341void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
388 342
389/* 343/*
390 * bfa tskim API functions 344 * bfa tskim API functions
391 */ 345 */
392struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa, 346struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
393 struct bfad_tskim_s *dtsk); 347 struct bfad_tskim_s *dtsk);
394void bfa_tskim_free(struct bfa_tskim_s *tskim); 348void bfa_tskim_free(struct bfa_tskim_s *tskim);
395void bfa_tskim_start(struct bfa_tskim_s *tskim, 349void bfa_tskim_start(struct bfa_tskim_s *tskim,
396 struct bfa_itnim_s *itnim, lun_t lun, 350 struct bfa_itnim_s *itnim, struct scsi_lun lun,
397 enum fcp_tm_cmnd tm, u8 t_secs); 351 enum fcp_tm_cmnd tm, u8 t_secs);
398void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, 352void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
399 enum bfi_tskim_status tsk_status); 353 enum bfi_tskim_status tsk_status);
400 354
401#endif /* __BFA_FCPIM_H__ */ 355#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 045d7e87b632..f674f9318629 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -19,9 +19,9 @@
19 * bfa_fcs.c BFA FCS main 19 * bfa_fcs.c BFA FCS main
20 */ 20 */
21 21
22#include "bfad_drv.h"
22#include "bfa_fcs.h" 23#include "bfa_fcs.h"
23#include "bfa_fcbuild.h" 24#include "bfa_fcbuild.h"
24#include "bfad_drv.h"
25 25
26BFA_TRC_FILE(FCS, FCS); 26BFA_TRC_FILE(FCS, FCS);
27 27
@@ -76,7 +76,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
76 fcs->bfad = bfad; 76 fcs->bfad = bfad;
77 fcs->min_cfg = min_cfg; 77 fcs->min_cfg = min_cfg;
78 78
79 bfa_attach_fcs(bfa); 79 bfa->fcs = BFA_TRUE;
80 fcbuild_init(); 80 fcbuild_init();
81 81
82 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { 82 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
@@ -110,14 +110,6 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
110 } 110 }
111} 111}
112 112
113/*
114 * Start FCS operations.
115 */
116void
117bfa_fcs_start(struct bfa_fcs_s *fcs)
118{
119 bfa_fcs_fabric_modstart(fcs);
120}
121 113
122/* 114/*
123 * brief 115 * brief
@@ -140,22 +132,6 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
140 132
141/* 133/*
142 * brief 134 * brief
143 * FCS FDMI Driver Parameter Initialization
144 *
145 * param[in] fcs FCS instance
146 * param[in] fdmi_enable TRUE/FALSE
147 *
148 * return None
149 */
150void
151bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
152{
153
154 fcs->fdmi_enabled = fdmi_enable;
155
156}
157/*
158 * brief
159 * FCS instance cleanup and exit. 135 * FCS instance cleanup and exit.
160 * 136 *
161 * param[in] fcs FCS instance 137 * param[in] fcs FCS instance
@@ -184,18 +160,6 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
184} 160}
185 161
186 162
187void
188bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
189{
190 fcs->trcmod = trcmod;
191}
192
193void
194bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
195{
196 bfa_wc_down(&fcs->wc);
197}
198
199/* 163/*
200 * Fabric module implementation. 164 * Fabric module implementation.
201 */ 165 */
@@ -232,31 +196,6 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
232 u32 rsp_len, 196 u32 rsp_len,
233 u32 resid_len, 197 u32 resid_len,
234 struct fchs_s *rspfchs); 198 struct fchs_s *rspfchs);
235/*
236 * fcs_fabric_sm fabric state machine functions
237 */
238
239/*
240 * Fabric state machine events
241 */
242enum bfa_fcs_fabric_event {
243 BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
244 BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
245 BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
246 BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
247 BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
248 BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
249 BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
250 BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
251 BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
252 BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
253 BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
254 BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
255 BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
256 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
257 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
258 BFA_FCS_FABRIC_SM_START = 16, /* from driver */
259};
260 199
261static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, 200static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
262 enum bfa_fcs_fabric_event event); 201 enum bfa_fcs_fabric_event event);
@@ -270,14 +209,8 @@ static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
270 enum bfa_fcs_fabric_event event); 209 enum bfa_fcs_fabric_event event);
271static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, 210static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
272 enum bfa_fcs_fabric_event event); 211 enum bfa_fcs_fabric_event event);
273static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
274 enum bfa_fcs_fabric_event event);
275static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
276 enum bfa_fcs_fabric_event event);
277static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, 212static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
278 enum bfa_fcs_fabric_event event); 213 enum bfa_fcs_fabric_event event);
279static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
280 enum bfa_fcs_fabric_event event);
281static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, 214static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
282 enum bfa_fcs_fabric_event event); 215 enum bfa_fcs_fabric_event event);
283static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, 216static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
@@ -337,7 +270,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
337 270
338 case BFA_FCS_FABRIC_SM_DELETE: 271 case BFA_FCS_FABRIC_SM_DELETE:
339 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); 272 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
340 bfa_fcs_modexit_comp(fabric->fcs); 273 bfa_wc_down(&fabric->fcs->wc);
341 break; 274 break;
342 275
343 default: 276 default:
@@ -410,7 +343,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
410 343
411 case BFA_FCS_FABRIC_SM_LOOPBACK: 344 case BFA_FCS_FABRIC_SM_LOOPBACK:
412 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); 345 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
413 bfa_lps_discard(fabric->lps); 346 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
414 bfa_fcs_fabric_set_opertype(fabric); 347 bfa_fcs_fabric_set_opertype(fabric);
415 break; 348 break;
416 349
@@ -424,12 +357,12 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
424 357
425 case BFA_FCS_FABRIC_SM_LINK_DOWN: 358 case BFA_FCS_FABRIC_SM_LINK_DOWN:
426 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); 359 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
427 bfa_lps_discard(fabric->lps); 360 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
428 break; 361 break;
429 362
430 case BFA_FCS_FABRIC_SM_DELETE: 363 case BFA_FCS_FABRIC_SM_DELETE:
431 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); 364 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
432 bfa_lps_discard(fabric->lps); 365 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
433 bfa_fcs_fabric_delete(fabric); 366 bfa_fcs_fabric_delete(fabric);
434 break; 367 break;
435 368
@@ -481,7 +414,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
481 switch (event) { 414 switch (event) {
482 case BFA_FCS_FABRIC_SM_AUTH_FAILED: 415 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
483 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); 416 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
484 bfa_lps_discard(fabric->lps); 417 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
485 break; 418 break;
486 419
487 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: 420 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
@@ -495,7 +428,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
495 428
496 case BFA_FCS_FABRIC_SM_LINK_DOWN: 429 case BFA_FCS_FABRIC_SM_LINK_DOWN:
497 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); 430 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
498 bfa_lps_discard(fabric->lps); 431 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
499 break; 432 break;
500 433
501 case BFA_FCS_FABRIC_SM_DELETE: 434 case BFA_FCS_FABRIC_SM_DELETE:
@@ -511,7 +444,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
511/* 444/*
512 * Authentication failed 445 * Authentication failed
513 */ 446 */
514static void 447void
515bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, 448bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
516 enum bfa_fcs_fabric_event event) 449 enum bfa_fcs_fabric_event event)
517{ 450{
@@ -537,7 +470,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
537/* 470/*
538 * Port is in loopback mode. 471 * Port is in loopback mode.
539 */ 472 */
540static void 473void
541bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, 474bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
542 enum bfa_fcs_fabric_event event) 475 enum bfa_fcs_fabric_event event)
543{ 476{
@@ -573,7 +506,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
573 switch (event) { 506 switch (event) {
574 case BFA_FCS_FABRIC_SM_LINK_DOWN: 507 case BFA_FCS_FABRIC_SM_LINK_DOWN:
575 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); 508 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
576 bfa_lps_discard(fabric->lps); 509 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
577 bfa_fcs_fabric_notify_offline(fabric); 510 bfa_fcs_fabric_notify_offline(fabric);
578 break; 511 break;
579 512
@@ -596,7 +529,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
596/* 529/*
597 * Fabric is online - normal operating state. 530 * Fabric is online - normal operating state.
598 */ 531 */
599static void 532void
600bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, 533bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
601 enum bfa_fcs_fabric_event event) 534 enum bfa_fcs_fabric_event event)
602{ 535{
@@ -606,7 +539,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
606 switch (event) { 539 switch (event) {
607 case BFA_FCS_FABRIC_SM_LINK_DOWN: 540 case BFA_FCS_FABRIC_SM_LINK_DOWN:
608 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); 541 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
609 bfa_lps_discard(fabric->lps); 542 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
610 bfa_fcs_fabric_notify_offline(fabric); 543 bfa_fcs_fabric_notify_offline(fabric);
611 break; 544 break;
612 545
@@ -617,7 +550,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
617 550
618 case BFA_FCS_FABRIC_SM_AUTH_FAILED: 551 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
619 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); 552 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
620 bfa_lps_discard(fabric->lps); 553 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
621 break; 554 break;
622 555
623 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: 556 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
@@ -697,7 +630,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
697 switch (event) { 630 switch (event) {
698 case BFA_FCS_FABRIC_SM_DELCOMP: 631 case BFA_FCS_FABRIC_SM_DELCOMP:
699 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); 632 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
700 bfa_fcs_modexit_comp(fabric->fcs); 633 bfa_wc_down(&fabric->fcs->wc);
701 break; 634 break;
702 635
703 case BFA_FCS_FABRIC_SM_LINK_UP: 636 case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -724,8 +657,8 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
724 struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; 657 struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
725 658
726 port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; 659 port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
727 port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc); 660 port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn;
728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); 661 port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn;
729} 662}
730 663
731/* 664/*
@@ -813,7 +746,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
813 return; 746 return;
814 747
815 case BFA_STATUS_EPROTOCOL: 748 case BFA_STATUS_EPROTOCOL:
816 switch (bfa_lps_get_extstatus(fabric->lps)) { 749 switch (fabric->lps->ext_status) {
817 case BFA_EPROTO_BAD_ACCEPT: 750 case BFA_EPROTO_BAD_ACCEPT:
818 fabric->stats.flogi_acc_err++; 751 fabric->stats.flogi_acc_err++;
819 break; 752 break;
@@ -840,26 +773,26 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
840 return; 773 return;
841 } 774 }
842 775
843 fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps); 776 fabric->bb_credit = fabric->lps->pr_bbcred;
844 bfa_trc(fabric->fcs, fabric->bb_credit); 777 bfa_trc(fabric->fcs, fabric->bb_credit);
845 778
846 if (!bfa_lps_is_brcd_fabric(fabric->lps)) 779 if (!(fabric->lps->brcd_switch))
847 fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps); 780 fabric->fabric_name = fabric->lps->pr_nwwn;
848 781
849 /* 782 /*
850 * Check port type. It should be 1 = F-port. 783 * Check port type. It should be 1 = F-port.
851 */ 784 */
852 if (bfa_lps_is_fport(fabric->lps)) { 785 if (fabric->lps->fport) {
853 fabric->bport.pid = bfa_lps_get_pid(fabric->lps); 786 fabric->bport.pid = fabric->lps->lp_pid;
854 fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps); 787 fabric->is_npiv = fabric->lps->npiv_en;
855 fabric->is_auth = bfa_lps_is_authreq(fabric->lps); 788 fabric->is_auth = fabric->lps->auth_req;
856 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); 789 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
857 } else { 790 } else {
858 /* 791 /*
859 * Nport-2-Nport direct attached 792 * Nport-2-Nport direct attached
860 */ 793 */
861 fabric->bport.port_topo.pn2n.rem_port_wwn = 794 fabric->bport.port_topo.pn2n.rem_port_wwn =
862 bfa_lps_get_peer_pwwn(fabric->lps); 795 fabric->lps->pr_pwwn;
863 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); 796 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
864 } 797 }
865 798
@@ -987,7 +920,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
987 INIT_LIST_HEAD(&fabric->vport_q); 920 INIT_LIST_HEAD(&fabric->vport_q);
988 INIT_LIST_HEAD(&fabric->vf_q); 921 INIT_LIST_HEAD(&fabric->vf_q);
989 fabric->lps = bfa_lps_alloc(fcs->bfa); 922 fabric->lps = bfa_lps_alloc(fcs->bfa);
990 bfa_assert(fabric->lps); 923 WARN_ON(!fabric->lps);
991 924
992 /* 925 /*
993 * Initialize fabric delete completion handler. Fabric deletion is 926 * Initialize fabric delete completion handler. Fabric deletion is
@@ -1038,31 +971,6 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); 971 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
1039} 972}
1040 973
1041/*
1042 * Suspend fabric activity as part of driver suspend.
1043 */
1044void
1045bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
1046{
1047}
1048
1049bfa_boolean_t
1050bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
1051{
1052 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
1053}
1054
1055bfa_boolean_t
1056bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
1057{
1058 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
1059}
1060
1061enum bfa_port_type
1062bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
1063{
1064 return fabric->oper_type;
1065}
1066 974
1067/* 975/*
1068 * Link up notification from BFA physical port module. 976 * Link up notification from BFA physical port module.
@@ -1123,40 +1031,6 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
1123 bfa_wc_down(&fabric->wc); 1031 bfa_wc_down(&fabric->wc);
1124} 1032}
1125 1033
1126/*
1127 * Base port is deleted.
1128 */
1129void
1130bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
1131{
1132 bfa_wc_down(&fabric->wc);
1133}
1134
1135
1136/*
1137 * Check if fabric is online.
1138 *
1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1140 *
1141 * @return TRUE/FALSE
1142 */
1143int
1144bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
1145{
1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
1147}
1148
1149/*
1150 * brief
1151 *
1152 */
1153bfa_status_t
1154bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
1155 struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
1156{
1157 bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
1158 return BFA_STATUS_OK;
1159}
1160 1034
1161/* 1035/*
1162 * Lookup for a vport withing a fabric given its pwwn 1036 * Lookup for a vport withing a fabric given its pwwn
@@ -1176,18 +1050,6 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
1176 return NULL; 1050 return NULL;
1177} 1051}
1178 1052
1179/*
1180 * In a given fabric, return the number of lports.
1181 *
1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1183 *
1184 * @return : 1 or more.
1185 */
1186u16
1187bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
1188{
1189 return fabric->num_vports;
1190}
1191 1053
1192/* 1054/*
1193 * Get OUI of the attached switch. 1055 * Get OUI of the attached switch.
@@ -1207,7 +1069,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
1207 u8 *tmp; 1069 u8 *tmp;
1208 u16 oui; 1070 u16 oui;
1209 1071
1210 fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps); 1072 fab_nwwn = fabric->lps->pr_nwwn;
1211 1073
1212 tmp = (u8 *)&fab_nwwn; 1074 tmp = (u8 *)&fab_nwwn;
1213 oui = (tmp[3] << 8) | tmp[4]; 1075 oui = (tmp[3] << 8) | tmp[4];
@@ -1235,7 +1097,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1235 * external loopback cable is in place. Our own FLOGI frames are 1097 * external loopback cable is in place. Our own FLOGI frames are
1236 * sometimes looped back when switch port gets temporarily bypassed. 1098 * sometimes looped back when switch port gets temporarily bypassed.
1237 */ 1099 */
1238 if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) && 1100 if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) &&
1239 (els_cmd->els_code == FC_ELS_FLOGI) && 1101 (els_cmd->els_code == FC_ELS_FLOGI) &&
1240 (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { 1102 (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
1241 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); 1103 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
@@ -1245,7 +1107,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1245 /* 1107 /*
1246 * FLOGI/EVFP exchanges should be consumed by base fabric. 1108 * FLOGI/EVFP exchanges should be consumed by base fabric.
1247 */ 1109 */
1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { 1110 if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) {
1249 bfa_trc(fabric->fcs, pid); 1111 bfa_trc(fabric->fcs, pid);
1250 bfa_fcs_fabric_process_uf(fabric, fchs, len); 1112 bfa_fcs_fabric_process_uf(fabric, fchs, len);
1251 return; 1113 return;
@@ -1358,13 +1220,13 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1358 return; 1220 return;
1359 1221
1360 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 1222 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1361 bfa_os_hton3b(FC_FABRIC_PORT), 1223 bfa_hton3b(FC_FABRIC_PORT),
1362 n2n_port->reply_oxid, pcfg->pwwn, 1224 n2n_port->reply_oxid, pcfg->pwwn,
1363 pcfg->nwwn, 1225 pcfg->nwwn,
1364 bfa_fcport_get_maxfrsize(bfa), 1226 bfa_fcport_get_maxfrsize(bfa),
1365 bfa_fcport_get_rx_bbcredit(bfa)); 1227 bfa_fcport_get_rx_bbcredit(bfa));
1366 1228
1367 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps), 1229 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag,
1368 BFA_FALSE, FC_CLASS_3, 1230 BFA_FALSE, FC_CLASS_3,
1369 reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, 1231 reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
1370 FC_MAX_PDUSZ, 0); 1232 FC_MAX_PDUSZ, 0);
@@ -1455,7 +1317,7 @@ bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
1455 break; 1317 break;
1456 1318
1457 default: 1319 default:
1458 bfa_assert(0); 1320 WARN_ON(1);
1459 } 1321 }
1460} 1322}
1461 1323
@@ -1502,7 +1364,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1502 * drop frame if vfid is unknown 1364 * drop frame if vfid is unknown
1503 */ 1365 */
1504 if (!fabric) { 1366 if (!fabric) {
1505 bfa_assert(0); 1367 WARN_ON(1);
1506 bfa_stats(fcs, uf.vfid_unknown); 1368 bfa_stats(fcs, uf.vfid_unknown);
1507 bfa_uf_free(uf); 1369 bfa_uf_free(uf);
1508 return; 1370 return;
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 9cb6a55977c3..0fd63168573f 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -27,6 +27,22 @@
27#define BFA_FCS_OS_STR_LEN 64 27#define BFA_FCS_OS_STR_LEN 64
28 28
29/* 29/*
30 * lps_pvt BFA LPS private functions
31 */
32
33enum bfa_lps_event {
34 BFA_LPS_SM_LOGIN = 1, /* login request from user */
35 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
36 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
37 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
38 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
39 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
40 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
41 BFA_LPS_SM_SET_N2N_PID = 8, /* Set assigned PID for n2n */
42};
43
44
45/*
30 * !!! Only append to the enums defined here to avoid any versioning 46 * !!! Only append to the enums defined here to avoid any versioning
31 * !!! needed between trace utility and driver version 47 * !!! needed between trace utility and driver version
32 */ 48 */
@@ -41,13 +57,12 @@ enum {
41struct bfa_fcs_s; 57struct bfa_fcs_s;
42 58
43#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg) 59#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg)
44void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
45 60
46#define BFA_FCS_BRCD_SWITCH_OUI 0x051e 61#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
47#define N2N_LOCAL_PID 0x010000 62#define N2N_LOCAL_PID 0x010000
48#define N2N_REMOTE_PID 0x020000 63#define N2N_REMOTE_PID 0x020000
49#define BFA_FCS_RETRY_TIMEOUT 2000 64#define BFA_FCS_RETRY_TIMEOUT 2000
50#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0) 65#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
51 66
52 67
53 68
@@ -109,7 +124,7 @@ struct bfa_fcs_lport_loop_s {
109 124
110struct bfa_fcs_lport_n2n_s { 125struct bfa_fcs_lport_n2n_s {
111 u32 rsvd; 126 u32 rsvd;
112 u16 reply_oxid; /* ox_id from the req flogi to be 127 __be16 reply_oxid; /* ox_id from the req flogi to be
113 *used in flogi acc */ 128 *used in flogi acc */
114 wwn_t rem_port_wwn; /* Attached port's wwn */ 129 wwn_t rem_port_wwn; /* Attached port's wwn */
115}; 130};
@@ -316,8 +331,6 @@ void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
316 struct bfa_fcs_rport_s *rport); 331 struct bfa_fcs_rport_s *rport);
317void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port, 332void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
318 struct bfa_fcs_rport_s *rport); 333 struct bfa_fcs_rport_s *rport);
319void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs);
320void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs);
321void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport); 334void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
322void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport); 335void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
323void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport); 336void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
@@ -359,9 +372,6 @@ bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
359bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); 372bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
360void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, 373void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
361 struct bfa_vport_attr_s *vport_attr); 374 struct bfa_vport_attr_s *vport_attr);
362void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
363 struct bfa_vport_stats_s *vport_stats);
364void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
365struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, 375struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
366 u16 vf_id, wwn_t vpwwn); 376 u16 vf_id, wwn_t vpwwn);
367void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); 377void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
@@ -406,7 +416,7 @@ struct bfa_fcs_rport_s {
406 struct bfad_rport_s *rp_drv; /* driver peer instance */ 416 struct bfad_rport_s *rp_drv; /* driver peer instance */
407 u32 pid; /* port ID of rport */ 417 u32 pid; /* port ID of rport */
408 u16 maxfrsize; /* maximum frame size */ 418 u16 maxfrsize; /* maximum frame size */
409 u16 reply_oxid; /* OX_ID of inbound requests */ 419 __be16 reply_oxid; /* OX_ID of inbound requests */
410 enum fc_cos fc_cos; /* FC classes of service supp */ 420 enum fc_cos fc_cos; /* FC classes of service supp */
411 bfa_boolean_t cisc; /* CISC capable device */ 421 bfa_boolean_t cisc; /* CISC capable device */
412 bfa_boolean_t prlo; /* processing prlo or LOGO */ 422 bfa_boolean_t prlo; /* processing prlo or LOGO */
@@ -437,32 +447,18 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
437/* 447/*
438 * bfa fcs rport API functions 448 * bfa fcs rport API functions
439 */ 449 */
440bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
441 struct bfa_fcs_rport_s *rport,
442 struct bfad_rport_s *rport_drv);
443bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
444void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
445 struct bfa_rport_attr_s *attr);
446void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
447 struct bfa_rport_stats_s *stats);
448void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
449struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, 450struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
450 wwn_t rpwwn); 451 wwn_t rpwwn);
451struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( 452struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
452 struct bfa_fcs_lport_s *port, wwn_t rnwwn); 453 struct bfa_fcs_lport_s *port, wwn_t rnwwn);
453void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); 454void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
454 455
455void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
456 enum bfa_port_speed speed);
457void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, 456void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
458 struct fchs_s *fchs, u16 len); 457 struct fchs_s *fchs, u16 len);
459void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); 458void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
460 459
461struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, 460struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
462 u32 pid); 461 u32 pid);
463void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
464void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
465void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
466void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, 462void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
467 struct fc_logi_s *plogi_rsp); 463 struct fc_logi_s *plogi_rsp);
468void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, 464void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
@@ -470,10 +466,8 @@ void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
470 struct fc_logi_s *plogi); 466 struct fc_logi_s *plogi);
471void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, 467void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
472 struct fc_logi_s *plogi); 468 struct fc_logi_s *plogi);
473void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport); 469void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id);
474void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id);
475 470
476void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
477void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport); 471void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
478void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport); 472void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
479int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport); 473int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
@@ -618,7 +612,7 @@ struct bfa_fcs_fdmi_hba_attr_s {
618 u8 option_rom_ver[BFA_VERSION_LEN]; 612 u8 option_rom_ver[BFA_VERSION_LEN];
619 u8 fw_version[8]; 613 u8 fw_version[8];
620 u8 os_name[256]; 614 u8 os_name[256];
621 u32 max_ct_pyld; 615 __be32 max_ct_pyld;
622}; 616};
623 617
624/* 618/*
@@ -626,9 +620,9 @@ struct bfa_fcs_fdmi_hba_attr_s {
626 */ 620 */
627struct bfa_fcs_fdmi_port_attr_s { 621struct bfa_fcs_fdmi_port_attr_s {
628 u8 supp_fc4_types[32]; /* supported FC4 types */ 622 u8 supp_fc4_types[32]; /* supported FC4 types */
629 u32 supp_speed; /* supported speed */ 623 __be32 supp_speed; /* supported speed */
630 u32 curr_speed; /* current Speed */ 624 __be32 curr_speed; /* current Speed */
631 u32 max_frm_size; /* max frame size */ 625 __be32 max_frm_size; /* max frame size */
632 u8 os_device_name[256]; /* OS device Name */ 626 u8 os_device_name[256]; /* OS device Name */
633 u8 host_name[256]; /* host name */ 627 u8 host_name[256]; /* host name */
634}; 628};
@@ -664,6 +658,57 @@ struct bfa_fcs_s {
664}; 658};
665 659
666/* 660/*
661 * fcs_fabric_sm fabric state machine functions
662 */
663
664/*
665 * Fabric state machine events
666 */
667enum bfa_fcs_fabric_event {
668 BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
669 BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
670 BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
671 BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
672 BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
673 BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
674 BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
675 BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
676 BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
677 BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
678 BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
679 BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
680 BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
681 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
682 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
683 BFA_FCS_FABRIC_SM_START = 16, /* from driver */
684};
685
686/*
687 * fcs_rport_sm FCS rport state machine events
688 */
689
690enum rport_event {
691 RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
692 RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
693 RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
694 RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
695 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
696 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
697 RPSM_EVENT_DELETE = 7, /* RPORT delete request */
698 RPSM_EVENT_SCN = 8, /* state change notification */
699 RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
700 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
701 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
702 RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
703 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
704 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
705 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
706 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
707 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
708 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continously */
709};
710
711/*
667 * bfa fcs API functions 712 * bfa fcs API functions
668 */ 713 */
669void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, 714void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
@@ -672,16 +717,12 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
672void bfa_fcs_init(struct bfa_fcs_s *fcs); 717void bfa_fcs_init(struct bfa_fcs_s *fcs);
673void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 718void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
674 struct bfa_fcs_driver_info_s *driver_info); 719 struct bfa_fcs_driver_info_s *driver_info);
675void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
676void bfa_fcs_exit(struct bfa_fcs_s *fcs); 720void bfa_fcs_exit(struct bfa_fcs_s *fcs);
677void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
678void bfa_fcs_start(struct bfa_fcs_s *fcs);
679 721
680/* 722/*
681 * bfa fcs vf public functions 723 * bfa fcs vf public functions
682 */ 724 */
683bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); 725bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
684u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
685 726
686/* 727/*
687 * fabric protected interface functions 728 * fabric protected interface functions
@@ -689,32 +730,29 @@ u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
689void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs); 730void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
690void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); 731void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
691void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); 732void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
692void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
693void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric); 733void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
694void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric); 734void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
695void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, 735void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
696 struct bfa_fcs_vport_s *vport); 736 struct bfa_fcs_vport_s *vport);
697void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, 737void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
698 struct bfa_fcs_vport_s *vport); 738 struct bfa_fcs_vport_s *vport);
699int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
700struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup( 739struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
701 struct bfa_fcs_fabric_s *fabric, wwn_t pwwn); 740 struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
702void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs); 741void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
703void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, 742void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
704 struct fchs_s *fchs, u16 len); 743 struct fchs_s *fchs, u16 len);
705bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
706bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
707enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
708void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); 744void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
709void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
710bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
711 struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg,
712 struct bfad_vf_s *vf_drv);
713void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, 745void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
714 wwn_t fabric_name); 746 wwn_t fabric_name);
715u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); 747u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
716void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); 748void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
717void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); 749void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
750void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
751 enum bfa_fcs_fabric_event event);
752void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
753 enum bfa_fcs_fabric_event event);
754void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
755 enum bfa_fcs_fabric_event event);
718 756
719/* 757/*
720 * BFA FCS callback interfaces 758 * BFA FCS callback interfaces
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 413b58eef93a..e7b49f4cb51f 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -19,9 +19,9 @@
19 * fcpim.c - FCP initiator mode i-t nexus state machine 19 * fcpim.c - FCP initiator mode i-t nexus state machine
20 */ 20 */
21 21
22#include "bfad_drv.h"
22#include "bfa_fcs.h" 23#include "bfa_fcs.h"
23#include "bfa_fcbuild.h" 24#include "bfa_fcbuild.h"
24#include "bfad_drv.h"
25#include "bfad_im.h" 25#include "bfad_im.h"
26 26
27BFA_TRC_FILE(FCS, FCPIM); 27BFA_TRC_FILE(FCS, FCPIM);
@@ -103,7 +103,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
103 break; 103 break;
104 104
105 case BFA_FCS_ITNIM_SM_OFFLINE: 105 case BFA_FCS_ITNIM_SM_OFFLINE:
106 bfa_fcs_rport_itnim_ack(itnim->rport); 106 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
107 break; 107 break;
108 108
109 case BFA_FCS_ITNIM_SM_INITIATOR: 109 case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -140,7 +140,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
140 case BFA_FCS_ITNIM_SM_OFFLINE: 140 case BFA_FCS_ITNIM_SM_OFFLINE:
141 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 141 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
142 bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); 142 bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
143 bfa_fcs_rport_itnim_ack(itnim->rport); 143 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
144 break; 144 break;
145 145
146 case BFA_FCS_ITNIM_SM_DELETE: 146 case BFA_FCS_ITNIM_SM_DELETE:
@@ -181,7 +181,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
181 case BFA_FCS_ITNIM_SM_OFFLINE: 181 case BFA_FCS_ITNIM_SM_OFFLINE:
182 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 182 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
183 bfa_fcxp_discard(itnim->fcxp); 183 bfa_fcxp_discard(itnim->fcxp);
184 bfa_fcs_rport_itnim_ack(itnim->rport); 184 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
185 break; 185 break;
186 186
187 case BFA_FCS_ITNIM_SM_INITIATOR: 187 case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -217,7 +217,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
217 } else { 217 } else {
218 /* invoke target offline */ 218 /* invoke target offline */
219 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 219 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
220 bfa_fcs_rport_logo_imp(itnim->rport); 220 bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
221 } 221 }
222 break; 222 break;
223 223
@@ -225,7 +225,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
225 case BFA_FCS_ITNIM_SM_OFFLINE: 225 case BFA_FCS_ITNIM_SM_OFFLINE:
226 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 226 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
227 bfa_timer_stop(&itnim->timer); 227 bfa_timer_stop(&itnim->timer);
228 bfa_fcs_rport_itnim_ack(itnim->rport); 228 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
229 break; 229 break;
230 230
231 case BFA_FCS_ITNIM_SM_INITIATOR: 231 case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -269,7 +269,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
269 case BFA_FCS_ITNIM_SM_OFFLINE: 269 case BFA_FCS_ITNIM_SM_OFFLINE:
270 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 270 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
271 bfa_itnim_offline(itnim->bfa_itnim); 271 bfa_itnim_offline(itnim->bfa_itnim);
272 bfa_fcs_rport_itnim_ack(itnim->rport); 272 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
273 break; 273 break;
274 274
275 case BFA_FCS_ITNIM_SM_DELETE: 275 case BFA_FCS_ITNIM_SM_DELETE:
@@ -330,7 +330,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
330 switch (event) { 330 switch (event) {
331 case BFA_FCS_ITNIM_SM_HCB_OFFLINE: 331 case BFA_FCS_ITNIM_SM_HCB_OFFLINE:
332 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 332 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
333 bfa_fcs_rport_itnim_ack(itnim->rport); 333 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
334 break; 334 break;
335 335
336 case BFA_FCS_ITNIM_SM_DELETE: 336 case BFA_FCS_ITNIM_SM_DELETE:
@@ -358,7 +358,7 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
358 switch (event) { 358 switch (event) {
359 case BFA_FCS_ITNIM_SM_OFFLINE: 359 case BFA_FCS_ITNIM_SM_OFFLINE:
360 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 360 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
361 bfa_fcs_rport_itnim_ack(itnim->rport); 361 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
362 break; 362 break;
363 363
364 case BFA_FCS_ITNIM_SM_RSP_ERROR: 364 case BFA_FCS_ITNIM_SM_RSP_ERROR:
@@ -536,7 +536,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
536 if (bfa_itnim == NULL) { 536 if (bfa_itnim == NULL) {
537 bfa_trc(port->fcs, rport->pwwn); 537 bfa_trc(port->fcs, rport->pwwn);
538 bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv); 538 bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
539 bfa_assert(0); 539 WARN_ON(1);
540 return NULL; 540 return NULL;
541 } 541 }
542 542
@@ -688,7 +688,7 @@ bfa_cb_itnim_sler(void *cb_arg)
688 688
689 itnim->stats.sler++; 689 itnim->stats.sler++;
690 bfa_trc(itnim->fcs, itnim->rport->pwwn); 690 bfa_trc(itnim->fcs, itnim->rport->pwwn);
691 bfa_fcs_rport_logo_imp(itnim->rport); 691 bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
692} 692}
693 693
694struct bfa_fcs_itnim_s * 694struct bfa_fcs_itnim_s *
@@ -700,7 +700,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
700 if (!rport) 700 if (!rport)
701 return NULL; 701 return NULL;
702 702
703 bfa_assert(rport->itnim != NULL); 703 WARN_ON(rport->itnim == NULL);
704 return rport->itnim; 704 return rport->itnim;
705} 705}
706 706
@@ -729,7 +729,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
729{ 729{
730 struct bfa_fcs_itnim_s *itnim = NULL; 730 struct bfa_fcs_itnim_s *itnim = NULL;
731 731
732 bfa_assert(port != NULL); 732 WARN_ON(port == NULL);
733 733
734 itnim = bfa_fcs_itnim_lookup(port, rpwwn); 734 itnim = bfa_fcs_itnim_lookup(port, rpwwn);
735 735
@@ -746,7 +746,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
746{ 746{
747 struct bfa_fcs_itnim_s *itnim = NULL; 747 struct bfa_fcs_itnim_s *itnim = NULL;
748 748
749 bfa_assert(port != NULL); 749 WARN_ON(port == NULL);
750 750
751 itnim = bfa_fcs_itnim_lookup(port, rpwwn); 751 itnim = bfa_fcs_itnim_lookup(port, rpwwn);
752 752
@@ -778,6 +778,6 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
778 break; 778 break;
779 779
780 default: 780 default:
781 bfa_assert(0); 781 WARN_ON(1);
782 } 782 }
783} 783}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 8d651309302b..4e2eb92ba028 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -15,10 +15,10 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_fcs.h" 19#include "bfa_fcs.h"
19#include "bfa_fcbuild.h" 20#include "bfa_fcbuild.h"
20#include "bfa_fc.h" 21#include "bfa_fc.h"
21#include "bfad_drv.h"
22 22
23BFA_TRC_FILE(FCS, PORT); 23BFA_TRC_FILE(FCS, PORT);
24 24
@@ -159,7 +159,7 @@ bfa_fcs_lport_sm_online(
159 bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); 159 bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
160 list_for_each_safe(qe, qen, &port->rport_q) { 160 list_for_each_safe(qe, qen, &port->rport_q) {
161 rport = (struct bfa_fcs_rport_s *) qe; 161 rport = (struct bfa_fcs_rport_s *) qe;
162 bfa_fcs_rport_delete(rport); 162 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
163 } 163 }
164 } 164 }
165 break; 165 break;
@@ -197,7 +197,7 @@ bfa_fcs_lport_sm_offline(
197 bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); 197 bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
198 list_for_each_safe(qe, qen, &port->rport_q) { 198 list_for_each_safe(qe, qen, &port->rport_q) {
199 rport = (struct bfa_fcs_rport_s *) qe; 199 rport = (struct bfa_fcs_rport_s *) qe;
200 bfa_fcs_rport_delete(rport); 200 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
201 } 201 }
202 } 202 }
203 break; 203 break;
@@ -309,6 +309,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
309 return; 309 return;
310 } 310 }
311 port->pid = rx_fchs->d_id; 311 port->pid = rx_fchs->d_id;
312 bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
312 } 313 }
313 314
314 /* 315 /*
@@ -323,6 +324,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
323 (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), 324 (memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
324 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { 325 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
325 port->pid = rx_fchs->d_id; 326 port->pid = rx_fchs->d_id;
327 bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
326 rport->pid = rx_fchs->s_id; 328 rport->pid = rx_fchs->s_id;
327 } 329 }
328 bfa_fcs_rport_plogi(rport, rx_fchs, plogi); 330 bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
@@ -349,8 +351,8 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
349 * This is a different device with the same pid. Old device 351 * This is a different device with the same pid. Old device
350 * disappeared. Send implicit LOGO to old device. 352 * disappeared. Send implicit LOGO to old device.
351 */ 353 */
352 bfa_assert(rport->pwwn != plogi->port_name); 354 WARN_ON(rport->pwwn == plogi->port_name);
353 bfa_fcs_rport_logo_imp(rport); 355 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
354 356
355 /* 357 /*
356 * Inbound PLOGI from a new device (with old PID). 358 * Inbound PLOGI from a new device (with old PID).
@@ -362,7 +364,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
362 /* 364 /*
363 * PLOGI crossing each other. 365 * PLOGI crossing each other.
364 */ 366 */
365 bfa_assert(rport->pwwn == WWN_NULL); 367 WARN_ON(rport->pwwn != WWN_NULL);
366 bfa_fcs_rport_plogi(rport, rx_fchs, plogi); 368 bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
367} 369}
368 370
@@ -511,7 +513,8 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
511 __port_action[port->fabric->fab_type].offline(port); 513 __port_action[port->fabric->fab_type].offline(port);
512 514
513 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 515 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
514 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) 516 if (bfa_sm_cmp_state(port->fabric,
517 bfa_fcs_fabric_sm_online) == BFA_TRUE)
515 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 518 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
516 "Logical port lost fabric connectivity: WWN = %s Role = %s\n", 519 "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
517 lpwwn_buf, "Initiator"); 520 lpwwn_buf, "Initiator");
@@ -522,26 +525,26 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
522 525
523 list_for_each_safe(qe, qen, &port->rport_q) { 526 list_for_each_safe(qe, qen, &port->rport_q) {
524 rport = (struct bfa_fcs_rport_s *) qe; 527 rport = (struct bfa_fcs_rport_s *) qe;
525 bfa_fcs_rport_offline(rport); 528 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
526 } 529 }
527} 530}
528 531
529static void 532static void
530bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port) 533bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
531{ 534{
532 bfa_assert(0); 535 WARN_ON(1);
533} 536}
534 537
535static void 538static void
536bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port) 539bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
537{ 540{
538 bfa_assert(0); 541 WARN_ON(1);
539} 542}
540 543
541static void 544static void
542bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port) 545bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
543{ 546{
544 bfa_assert(0); 547 WARN_ON(1);
545} 548}
546 549
547static void 550static void
@@ -584,33 +587,11 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
584 port->vport ? port->vport->vport_drv : NULL); 587 port->vport ? port->vport->vport_drv : NULL);
585 bfa_fcs_vport_delete_comp(port->vport); 588 bfa_fcs_vport_delete_comp(port->vport);
586 } else { 589 } else {
587 bfa_fcs_fabric_port_delete_comp(port->fabric); 590 bfa_wc_down(&port->fabric->wc);
588 } 591 }
589} 592}
590 593
591 594
592
593/*
594 * fcs_lport_api BFA FCS port API
595 */
596/*
597 * Module initialization
598 */
599void
600bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
601{
602
603}
604
605/*
606 * Module cleanup
607 */
608void
609bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
610{
611 bfa_fcs_modexit_comp(fcs);
612}
613
614/* 595/*
615 * Unsolicited frame receive handling. 596 * Unsolicited frame receive handling.
616 */ 597 */
@@ -623,6 +604,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
623 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 604 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
624 605
625 bfa_stats(lport, uf_recvs); 606 bfa_stats(lport, uf_recvs);
607 bfa_trc(lport->fcs, fchs->type);
626 608
627 if (!bfa_fcs_lport_is_online(lport)) { 609 if (!bfa_fcs_lport_is_online(lport)) {
628 bfa_stats(lport, uf_recv_drops); 610 bfa_stats(lport, uf_recv_drops);
@@ -682,8 +664,11 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
682 * Only handles ELS frames for now. 664 * Only handles ELS frames for now.
683 */ 665 */
684 if (fchs->type != FC_TYPE_ELS) { 666 if (fchs->type != FC_TYPE_ELS) {
685 bfa_trc(lport->fcs, fchs->type); 667 bfa_trc(lport->fcs, fchs->s_id);
686 bfa_assert(0); 668 bfa_trc(lport->fcs, fchs->d_id);
669 /* ignore type FC_TYPE_FC_FSS */
670 if (fchs->type != FC_TYPE_FC_FSS)
671 bfa_sm_fault(lport->fcs, fchs->type);
687 return; 672 return;
688 } 673 }
689 674
@@ -792,7 +777,7 @@ bfa_fcs_lport_del_rport(
792 struct bfa_fcs_lport_s *port, 777 struct bfa_fcs_lport_s *port,
793 struct bfa_fcs_rport_s *rport) 778 struct bfa_fcs_rport_s *rport)
794{ 779{
795 bfa_assert(bfa_q_is_on_q(&port->rport_q, rport)); 780 WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport));
796 list_del(&rport->qe); 781 list_del(&rport->qe);
797 port->num_rports--; 782 port->num_rports--;
798 783
@@ -850,8 +835,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
850 lport->fcs = fcs; 835 lport->fcs = fcs;
851 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); 836 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
852 lport->vport = vport; 837 lport->vport = vport;
853 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : 838 lport->lp_tag = (vport) ? vport->lps->lp_tag :
854 bfa_lps_get_tag(lport->fabric->lps); 839 lport->fabric->lps->lp_tag;
855 840
856 INIT_LIST_HEAD(&lport->rport_q); 841 INIT_LIST_HEAD(&lport->rport_q);
857 lport->num_rports = 0; 842 lport->num_rports = 0;
@@ -903,10 +888,12 @@ bfa_fcs_lport_get_attr(
903 port_attr->port_cfg = port->port_cfg; 888 port_attr->port_cfg = port->port_cfg;
904 889
905 if (port->fabric) { 890 if (port->fabric) {
906 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); 891 port_attr->port_type = port->fabric->oper_type;
907 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); 892 port_attr->loopback = bfa_sm_cmp_state(port->fabric,
893 bfa_fcs_fabric_sm_loopback);
908 port_attr->authfail = 894 port_attr->authfail =
909 bfa_fcs_fabric_is_auth_failed(port->fabric); 895 bfa_sm_cmp_state(port->fabric,
896 bfa_fcs_fabric_sm_auth_failed);
910 port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port); 897 port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port);
911 memcpy(port_attr->fabric_ip_addr, 898 memcpy(port_attr->fabric_ip_addr,
912 bfa_fcs_lport_get_fabric_ipaddr(port), 899 bfa_fcs_lport_get_fabric_ipaddr(port),
@@ -915,10 +902,10 @@ bfa_fcs_lport_get_attr(
915 if (port->vport != NULL) { 902 if (port->vport != NULL) {
916 port_attr->port_type = BFA_PORT_TYPE_VPORT; 903 port_attr->port_type = BFA_PORT_TYPE_VPORT;
917 port_attr->fpma_mac = 904 port_attr->fpma_mac =
918 bfa_lps_get_lp_mac(port->vport->lps); 905 port->vport->lps->lp_mac;
919 } else { 906 } else {
920 port_attr->fpma_mac = 907 port_attr->fpma_mac =
921 bfa_lps_get_lp_mac(port->fabric->lps); 908 port->fabric->lps->lp_mac;
922 } 909 }
923 } else { 910 } else {
924 port_attr->port_type = BFA_PORT_TYPE_UNKNOWN; 911 port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
@@ -998,6 +985,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
998 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, 985 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
999 sizeof(wwn_t)) > 0) { 986 sizeof(wwn_t)) > 0) {
1000 port->pid = N2N_LOCAL_PID; 987 port->pid = N2N_LOCAL_PID;
988 bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID);
1001 /* 989 /*
1002 * First, check if we know the device by pwwn. 990 * First, check if we know the device by pwwn.
1003 */ 991 */
@@ -1007,7 +995,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
1007 bfa_trc(port->fcs, rport->pid); 995 bfa_trc(port->fcs, rport->pid);
1008 bfa_trc(port->fcs, rport->pwwn); 996 bfa_trc(port->fcs, rport->pwwn);
1009 rport->pid = N2N_REMOTE_PID; 997 rport->pid = N2N_REMOTE_PID;
1010 bfa_fcs_rport_online(rport); 998 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
1011 return; 999 return;
1012 } 1000 }
1013 1001
@@ -1017,10 +1005,10 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
1017 */ 1005 */
1018 if (port->num_rports > 0) { 1006 if (port->num_rports > 0) {
1019 rport = bfa_fcs_lport_get_rport_by_pid(port, 0); 1007 rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
1020 bfa_assert(rport != NULL); 1008 WARN_ON(rport == NULL);
1021 if (rport) { 1009 if (rport) {
1022 bfa_trc(port->fcs, rport->pwwn); 1010 bfa_trc(port->fcs, rport->pwwn);
1023 bfa_fcs_rport_delete(rport); 1011 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
1024 } 1012 }
1025 } 1013 }
1026 bfa_fcs_rport_create(port, N2N_REMOTE_PID); 1014 bfa_fcs_rport_create(port, N2N_REMOTE_PID);
@@ -1569,6 +1557,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1569 struct fdmi_attr_s *attr; 1557 struct fdmi_attr_s *attr;
1570 u8 *curr_ptr; 1558 u8 *curr_ptr;
1571 u16 len, count; 1559 u16 len, count;
1560 u16 templen;
1572 1561
1573 /* 1562 /*
1574 * get hba attributes 1563 * get hba attributes
@@ -1594,69 +1583,69 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1594 */ 1583 */
1595 attr = (struct fdmi_attr_s *) curr_ptr; 1584 attr = (struct fdmi_attr_s *) curr_ptr;
1596 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME); 1585 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
1597 attr->len = sizeof(wwn_t); 1586 templen = sizeof(wwn_t);
1598 memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len); 1587 memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen);
1599 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1588 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1600 len += attr->len; 1589 len += templen;
1601 count++; 1590 count++;
1602 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1591 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1603 sizeof(attr->len)); 1592 sizeof(templen));
1604 1593
1605 /* 1594 /*
1606 * Manufacturer 1595 * Manufacturer
1607 */ 1596 */
1608 attr = (struct fdmi_attr_s *) curr_ptr; 1597 attr = (struct fdmi_attr_s *) curr_ptr;
1609 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); 1598 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
1610 attr->len = (u16) strlen(fcs_hba_attr->manufacturer); 1599 templen = (u16) strlen(fcs_hba_attr->manufacturer);
1611 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); 1600 memcpy(attr->value, fcs_hba_attr->manufacturer, templen);
1612 attr->len = fc_roundup(attr->len, sizeof(u32)); 1601 templen = fc_roundup(templen, sizeof(u32));
1613 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1602 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1614 len += attr->len; 1603 len += templen;
1615 count++; 1604 count++;
1616 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1605 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1617 sizeof(attr->len)); 1606 sizeof(templen));
1618 1607
1619 /* 1608 /*
1620 * Serial Number 1609 * Serial Number
1621 */ 1610 */
1622 attr = (struct fdmi_attr_s *) curr_ptr; 1611 attr = (struct fdmi_attr_s *) curr_ptr;
1623 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); 1612 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
1624 attr->len = (u16) strlen(fcs_hba_attr->serial_num); 1613 templen = (u16) strlen(fcs_hba_attr->serial_num);
1625 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); 1614 memcpy(attr->value, fcs_hba_attr->serial_num, templen);
1626 attr->len = fc_roundup(attr->len, sizeof(u32)); 1615 templen = fc_roundup(templen, sizeof(u32));
1627 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1616 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1628 len += attr->len; 1617 len += templen;
1629 count++; 1618 count++;
1630 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1619 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1631 sizeof(attr->len)); 1620 sizeof(templen));
1632 1621
1633 /* 1622 /*
1634 * Model 1623 * Model
1635 */ 1624 */
1636 attr = (struct fdmi_attr_s *) curr_ptr; 1625 attr = (struct fdmi_attr_s *) curr_ptr;
1637 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); 1626 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
1638 attr->len = (u16) strlen(fcs_hba_attr->model); 1627 templen = (u16) strlen(fcs_hba_attr->model);
1639 memcpy(attr->value, fcs_hba_attr->model, attr->len); 1628 memcpy(attr->value, fcs_hba_attr->model, templen);
1640 attr->len = fc_roundup(attr->len, sizeof(u32)); 1629 templen = fc_roundup(templen, sizeof(u32));
1641 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1630 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1642 len += attr->len; 1631 len += templen;
1643 count++; 1632 count++;
1644 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1633 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1645 sizeof(attr->len)); 1634 sizeof(templen));
1646 1635
1647 /* 1636 /*
1648 * Model Desc 1637 * Model Desc
1649 */ 1638 */
1650 attr = (struct fdmi_attr_s *) curr_ptr; 1639 attr = (struct fdmi_attr_s *) curr_ptr;
1651 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); 1640 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
1652 attr->len = (u16) strlen(fcs_hba_attr->model_desc); 1641 templen = (u16) strlen(fcs_hba_attr->model_desc);
1653 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); 1642 memcpy(attr->value, fcs_hba_attr->model_desc, templen);
1654 attr->len = fc_roundup(attr->len, sizeof(u32)); 1643 templen = fc_roundup(templen, sizeof(u32));
1655 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1644 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1656 len += attr->len; 1645 len += templen;
1657 count++; 1646 count++;
1658 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1647 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1659 sizeof(attr->len)); 1648 sizeof(templen));
1660 1649
1661 /* 1650 /*
1662 * H/W Version 1651 * H/W Version
@@ -1664,14 +1653,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1664 if (fcs_hba_attr->hw_version[0] != '\0') { 1653 if (fcs_hba_attr->hw_version[0] != '\0') {
1665 attr = (struct fdmi_attr_s *) curr_ptr; 1654 attr = (struct fdmi_attr_s *) curr_ptr;
1666 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); 1655 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
1667 attr->len = (u16) strlen(fcs_hba_attr->hw_version); 1656 templen = (u16) strlen(fcs_hba_attr->hw_version);
1668 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); 1657 memcpy(attr->value, fcs_hba_attr->hw_version, templen);
1669 attr->len = fc_roundup(attr->len, sizeof(u32)); 1658 templen = fc_roundup(templen, sizeof(u32));
1670 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1659 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1671 len += attr->len; 1660 len += templen;
1672 count++; 1661 count++;
1673 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1662 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1674 sizeof(attr->len)); 1663 sizeof(templen));
1675 } 1664 }
1676 1665
1677 /* 1666 /*
@@ -1679,14 +1668,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1679 */ 1668 */
1680 attr = (struct fdmi_attr_s *) curr_ptr; 1669 attr = (struct fdmi_attr_s *) curr_ptr;
1681 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); 1670 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
1682 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1671 templen = (u16) strlen(fcs_hba_attr->driver_version);
1683 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1672 memcpy(attr->value, fcs_hba_attr->driver_version, templen);
1684 attr->len = fc_roundup(attr->len, sizeof(u32)); 1673 templen = fc_roundup(templen, sizeof(u32));
1685 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1674 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1686 len += attr->len;; 1675 len += templen;;
1687 count++; 1676 count++;
1688 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1677 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1689 sizeof(attr->len)); 1678 sizeof(templen));
1690 1679
1691 /* 1680 /*
1692 * Option Rom Version 1681 * Option Rom Version
@@ -1694,14 +1683,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1694 if (fcs_hba_attr->option_rom_ver[0] != '\0') { 1683 if (fcs_hba_attr->option_rom_ver[0] != '\0') {
1695 attr = (struct fdmi_attr_s *) curr_ptr; 1684 attr = (struct fdmi_attr_s *) curr_ptr;
1696 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); 1685 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
1697 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); 1686 templen = (u16) strlen(fcs_hba_attr->option_rom_ver);
1698 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); 1687 memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen);
1699 attr->len = fc_roundup(attr->len, sizeof(u32)); 1688 templen = fc_roundup(templen, sizeof(u32));
1700 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1689 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1701 len += attr->len; 1690 len += templen;
1702 count++; 1691 count++;
1703 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1692 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1704 sizeof(attr->len)); 1693 sizeof(templen));
1705 } 1694 }
1706 1695
1707 /* 1696 /*
@@ -1709,14 +1698,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1709 */ 1698 */
1710 attr = (struct fdmi_attr_s *) curr_ptr; 1699 attr = (struct fdmi_attr_s *) curr_ptr;
1711 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); 1700 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
1712 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1701 templen = (u16) strlen(fcs_hba_attr->driver_version);
1713 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1702 memcpy(attr->value, fcs_hba_attr->driver_version, templen);
1714 attr->len = fc_roundup(attr->len, sizeof(u32)); 1703 templen = fc_roundup(templen, sizeof(u32));
1715 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1704 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1716 len += attr->len; 1705 len += templen;
1717 count++; 1706 count++;
1718 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1707 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1719 sizeof(attr->len)); 1708 sizeof(templen));
1720 1709
1721 /* 1710 /*
1722 * OS Name 1711 * OS Name
@@ -1724,14 +1713,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1724 if (fcs_hba_attr->os_name[0] != '\0') { 1713 if (fcs_hba_attr->os_name[0] != '\0') {
1725 attr = (struct fdmi_attr_s *) curr_ptr; 1714 attr = (struct fdmi_attr_s *) curr_ptr;
1726 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); 1715 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
1727 attr->len = (u16) strlen(fcs_hba_attr->os_name); 1716 templen = (u16) strlen(fcs_hba_attr->os_name);
1728 memcpy(attr->value, fcs_hba_attr->os_name, attr->len); 1717 memcpy(attr->value, fcs_hba_attr->os_name, templen);
1729 attr->len = fc_roundup(attr->len, sizeof(u32)); 1718 templen = fc_roundup(templen, sizeof(u32));
1730 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1719 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1731 len += attr->len; 1720 len += templen;
1732 count++; 1721 count++;
1733 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1722 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1734 sizeof(attr->len)); 1723 sizeof(templen));
1735 } 1724 }
1736 1725
1737 /* 1726 /*
@@ -1739,12 +1728,12 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1739 */ 1728 */
1740 attr = (struct fdmi_attr_s *) curr_ptr; 1729 attr = (struct fdmi_attr_s *) curr_ptr;
1741 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT); 1730 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
1742 attr->len = sizeof(fcs_hba_attr->max_ct_pyld); 1731 templen = sizeof(fcs_hba_attr->max_ct_pyld);
1743 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); 1732 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
1744 len += attr->len; 1733 len += templen;
1745 count++; 1734 count++;
1746 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1735 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1747 sizeof(attr->len)); 1736 sizeof(templen));
1748 1737
1749 /* 1738 /*
1750 * Update size of payload 1739 * Update size of payload
@@ -1845,6 +1834,7 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1845 u8 *curr_ptr; 1834 u8 *curr_ptr;
1846 u16 len; 1835 u16 len;
1847 u8 count = 0; 1836 u8 count = 0;
1837 u16 templen;
1848 1838
1849 /* 1839 /*
1850 * get port attributes 1840 * get port attributes
@@ -1863,54 +1853,54 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1863 */ 1853 */
1864 attr = (struct fdmi_attr_s *) curr_ptr; 1854 attr = (struct fdmi_attr_s *) curr_ptr;
1865 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES); 1855 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
1866 attr->len = sizeof(fcs_port_attr.supp_fc4_types); 1856 templen = sizeof(fcs_port_attr.supp_fc4_types);
1867 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len); 1857 memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen);
1868 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1858 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1869 len += attr->len; 1859 len += templen;
1870 ++count; 1860 ++count;
1871 attr->len = 1861 attr->len =
1872 cpu_to_be16(attr->len + sizeof(attr->type) + 1862 cpu_to_be16(templen + sizeof(attr->type) +
1873 sizeof(attr->len)); 1863 sizeof(templen));
1874 1864
1875 /* 1865 /*
1876 * Supported Speed 1866 * Supported Speed
1877 */ 1867 */
1878 attr = (struct fdmi_attr_s *) curr_ptr; 1868 attr = (struct fdmi_attr_s *) curr_ptr;
1879 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED); 1869 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
1880 attr->len = sizeof(fcs_port_attr.supp_speed); 1870 templen = sizeof(fcs_port_attr.supp_speed);
1881 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len); 1871 memcpy(attr->value, &fcs_port_attr.supp_speed, templen);
1882 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1872 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1883 len += attr->len; 1873 len += templen;
1884 ++count; 1874 ++count;
1885 attr->len = 1875 attr->len =
1886 cpu_to_be16(attr->len + sizeof(attr->type) + 1876 cpu_to_be16(templen + sizeof(attr->type) +
1887 sizeof(attr->len)); 1877 sizeof(templen));
1888 1878
1889 /* 1879 /*
1890 * current Port Speed 1880 * current Port Speed
1891 */ 1881 */
1892 attr = (struct fdmi_attr_s *) curr_ptr; 1882 attr = (struct fdmi_attr_s *) curr_ptr;
1893 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED); 1883 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
1894 attr->len = sizeof(fcs_port_attr.curr_speed); 1884 templen = sizeof(fcs_port_attr.curr_speed);
1895 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len); 1885 memcpy(attr->value, &fcs_port_attr.curr_speed, templen);
1896 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1886 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1897 len += attr->len; 1887 len += templen;
1898 ++count; 1888 ++count;
1899 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1889 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1900 sizeof(attr->len)); 1890 sizeof(templen));
1901 1891
1902 /* 1892 /*
1903 * max frame size 1893 * max frame size
1904 */ 1894 */
1905 attr = (struct fdmi_attr_s *) curr_ptr; 1895 attr = (struct fdmi_attr_s *) curr_ptr;
1906 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE); 1896 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
1907 attr->len = sizeof(fcs_port_attr.max_frm_size); 1897 templen = sizeof(fcs_port_attr.max_frm_size);
1908 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len); 1898 memcpy(attr->value, &fcs_port_attr.max_frm_size, templen);
1909 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1899 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1910 len += attr->len; 1900 len += templen;
1911 ++count; 1901 ++count;
1912 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1902 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1913 sizeof(attr->len)); 1903 sizeof(templen));
1914 1904
1915 /* 1905 /*
1916 * OS Device Name 1906 * OS Device Name
@@ -1918,14 +1908,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1918 if (fcs_port_attr.os_device_name[0] != '\0') { 1908 if (fcs_port_attr.os_device_name[0] != '\0') {
1919 attr = (struct fdmi_attr_s *) curr_ptr; 1909 attr = (struct fdmi_attr_s *) curr_ptr;
1920 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); 1910 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
1921 attr->len = (u16) strlen(fcs_port_attr.os_device_name); 1911 templen = (u16) strlen(fcs_port_attr.os_device_name);
1922 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); 1912 memcpy(attr->value, fcs_port_attr.os_device_name, templen);
1923 attr->len = fc_roundup(attr->len, sizeof(u32)); 1913 templen = fc_roundup(templen, sizeof(u32));
1924 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1914 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1925 len += attr->len; 1915 len += templen;
1926 ++count; 1916 ++count;
1927 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1917 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1928 sizeof(attr->len)); 1918 sizeof(templen));
1929 } 1919 }
1930 /* 1920 /*
1931 * Host Name 1921 * Host Name
@@ -1933,14 +1923,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1933 if (fcs_port_attr.host_name[0] != '\0') { 1923 if (fcs_port_attr.host_name[0] != '\0') {
1934 attr = (struct fdmi_attr_s *) curr_ptr; 1924 attr = (struct fdmi_attr_s *) curr_ptr;
1935 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); 1925 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
1936 attr->len = (u16) strlen(fcs_port_attr.host_name); 1926 templen = (u16) strlen(fcs_port_attr.host_name);
1937 memcpy(attr->value, fcs_port_attr.host_name, attr->len); 1927 memcpy(attr->value, fcs_port_attr.host_name, templen);
1938 attr->len = fc_roundup(attr->len, sizeof(u32)); 1928 templen = fc_roundup(templen, sizeof(u32));
1939 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1929 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1940 len += attr->len; 1930 len += templen;
1941 ++count; 1931 ++count;
1942 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1932 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1943 sizeof(attr->len)); 1933 sizeof(templen));
1944 } 1934 }
1945 1935
1946 /* 1936 /*
@@ -2103,7 +2093,7 @@ bfa_fcs_lport_fdmi_timeout(void *arg)
2103 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); 2093 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
2104} 2094}
2105 2095
2106void 2096static void
2107bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, 2097bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2108 struct bfa_fcs_fdmi_hba_attr_s *hba_attr) 2098 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
2109{ 2099{
@@ -2147,7 +2137,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2147 hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); 2137 hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
2148} 2138}
2149 2139
2150void 2140static void
2151bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, 2141bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2152 struct bfa_fcs_fdmi_port_attr_s *port_attr) 2142 struct bfa_fcs_fdmi_port_attr_s *port_attr)
2153{ 2143{
@@ -2560,7 +2550,7 @@ bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2560 2550
2561 len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 2551 len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2562 bfa_fcs_lport_get_fcid(port), 2552 bfa_fcs_lport_get_fcid(port),
2563 bfa_lps_get_peer_nwwn(port->fabric->lps)); 2553 port->fabric->lps->pr_nwwn);
2564 2554
2565 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2555 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2566 FC_CLASS_3, len, &fchs, 2556 FC_CLASS_3, len, &fchs,
@@ -2760,7 +2750,7 @@ bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2760 2750
2761 len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 2751 len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2762 bfa_fcs_lport_get_fcid(port), 2752 bfa_fcs_lport_get_fcid(port),
2763 bfa_lps_get_peer_nwwn(port->fabric->lps)); 2753 port->fabric->lps->pr_nwwn);
2764 2754
2765 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2755 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2766 FC_CLASS_3, len, &fchs, 2756 FC_CLASS_3, len, &fchs,
@@ -2836,7 +2826,7 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2836 ms->fcxp = fcxp; 2826 ms->fcxp = fcxp;
2837 2827
2838 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 2828 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2839 bfa_os_hton3b(FC_MGMT_SERVER), 2829 bfa_hton3b(FC_MGMT_SERVER),
2840 bfa_fcs_lport_get_fcid(port), 0, 2830 bfa_fcs_lport_get_fcid(port), 0,
2841 port->port_cfg.pwwn, port->port_cfg.nwwn, 2831 port->port_cfg.pwwn, port->port_cfg.nwwn,
2842 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 2832 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@ -3593,7 +3583,7 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3593 ns->fcxp = fcxp; 3583 ns->fcxp = fcxp;
3594 3584
3595 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 3585 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3596 bfa_os_hton3b(FC_NAME_SERVER), 3586 bfa_hton3b(FC_NAME_SERVER),
3597 bfa_fcs_lport_get_fcid(port), 0, 3587 bfa_fcs_lport_get_fcid(port), 0,
3598 port->port_cfg.pwwn, port->port_cfg.nwwn, 3588 port->port_cfg.pwwn, port->port_cfg.nwwn,
3599 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 3589 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@ -4150,7 +4140,7 @@ bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
4150 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); 4140 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
4151} 4141}
4152 4142
4153void 4143static void
4154bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port) 4144bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
4155{ 4145{
4156 4146
@@ -4163,7 +4153,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
4163 4153
4164 for (ii = 0 ; ii < nwwns; ++ii) { 4154 for (ii = 0 ; ii < nwwns; ++ii) {
4165 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]); 4155 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
4166 bfa_assert(rport); 4156 WARN_ON(!rport);
4167 } 4157 }
4168} 4158}
4169 4159
@@ -4352,8 +4342,8 @@ bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4352 /* Handle VU registrations for Base port only */ 4342 /* Handle VU registrations for Base port only */
4353 if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) { 4343 if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
4354 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 4344 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4355 bfa_lps_is_brcd_fabric(port->fabric->lps), 4345 port->fabric->lps->brcd_switch,
4356 port->pid, 0); 4346 port->pid, 0);
4357 } else { 4347 } else {
4358 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 4348 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4359 BFA_FALSE, 4349 BFA_FALSE,
@@ -4626,7 +4616,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
4626 4616
4627 4617
4628 default: 4618 default:
4629 bfa_assert(0); 4619 WARN_ON(1);
4630 nsquery = BFA_TRUE; 4620 nsquery = BFA_TRUE;
4631 } 4621 }
4632 } 4622 }
@@ -4672,7 +4662,7 @@ bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
4672 4662
4673 while ((qe != qh) && (i < nrports)) { 4663 while ((qe != qh) && (i < nrports)) {
4674 rport = (struct bfa_fcs_rport_s *) qe; 4664 rport = (struct bfa_fcs_rport_s *) qe;
4675 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { 4665 if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
4676 qe = bfa_q_next(qe); 4666 qe = bfa_q_next(qe);
4677 bfa_trc(fcs, (u32) rport->pwwn); 4667 bfa_trc(fcs, (u32) rport->pwwn);
4678 bfa_trc(fcs, rport->pid); 4668 bfa_trc(fcs, rport->pid);
@@ -4720,7 +4710,7 @@ bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
4720 4710
4721 while ((qe != qh) && (i < *nrports)) { 4711 while ((qe != qh) && (i < *nrports)) {
4722 rport = (struct bfa_fcs_rport_s *) qe; 4712 rport = (struct bfa_fcs_rport_s *) qe;
4723 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { 4713 if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
4724 qe = bfa_q_next(qe); 4714 qe = bfa_q_next(qe);
4725 bfa_trc(fcs, (u32) rport->pwwn); 4715 bfa_trc(fcs, (u32) rport->pwwn);
4726 bfa_trc(fcs, rport->pid); 4716 bfa_trc(fcs, rport->pid);
@@ -4771,7 +4761,7 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
4771 4761
4772 while (qe != qh) { 4762 while (qe != qh) {
4773 rport = (struct bfa_fcs_rport_s *) qe; 4763 rport = (struct bfa_fcs_rport_s *) qe;
4774 if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) || 4764 if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
4775 (bfa_fcs_rport_get_state(rport) == 4765 (bfa_fcs_rport_get_state(rport) ==
4776 BFA_RPORT_OFFLINE)) { 4766 BFA_RPORT_OFFLINE)) {
4777 qe = bfa_q_next(qe); 4767 qe = bfa_q_next(qe);
@@ -4807,7 +4797,7 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
4807 struct bfa_fcs_vport_s *vport; 4797 struct bfa_fcs_vport_s *vport;
4808 bfa_fcs_vf_t *vf; 4798 bfa_fcs_vf_t *vf;
4809 4799
4810 bfa_assert(fcs != NULL); 4800 WARN_ON(fcs == NULL);
4811 4801
4812 vf = bfa_fcs_vf_lookup(fcs, vf_id); 4802 vf = bfa_fcs_vf_lookup(fcs, vf_id);
4813 if (vf == NULL) { 4803 if (vf == NULL) {
@@ -4853,7 +4843,7 @@ bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
4853 port_info->max_vports_supp = 4843 port_info->max_vports_supp =
4854 bfa_lps_get_max_vport(port->fcs->bfa); 4844 bfa_lps_get_max_vport(port->fcs->bfa);
4855 port_info->num_vports_inuse = 4845 port_info->num_vports_inuse =
4856 bfa_fcs_fabric_vport_count(port->fabric); 4846 port->fabric->num_vports;
4857 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; 4847 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
4858 port_info->num_rports_inuse = port->num_rports; 4848 port_info->num_rports_inuse = port->num_rports;
4859 } else { 4849 } else {
@@ -4997,7 +4987,8 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
4997 4987
4998 switch (event) { 4988 switch (event) {
4999 case BFA_FCS_VPORT_SM_START: 4989 case BFA_FCS_VPORT_SM_START:
5000 if (bfa_fcs_fabric_is_online(__vport_fabric(vport)) 4990 if (bfa_sm_cmp_state(__vport_fabric(vport),
4991 bfa_fcs_fabric_sm_online)
5001 && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) { 4992 && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
5002 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); 4993 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5003 bfa_fcs_vport_do_fdisc(vport); 4994 bfa_fcs_vport_do_fdisc(vport);
@@ -5080,13 +5071,13 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
5080 switch (event) { 5071 switch (event) {
5081 case BFA_FCS_VPORT_SM_DELETE: 5072 case BFA_FCS_VPORT_SM_DELETE:
5082 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); 5073 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5083 bfa_lps_discard(vport->lps); 5074 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
5084 bfa_fcs_lport_delete(&vport->lport); 5075 bfa_fcs_lport_delete(&vport->lport);
5085 break; 5076 break;
5086 5077
5087 case BFA_FCS_VPORT_SM_OFFLINE: 5078 case BFA_FCS_VPORT_SM_OFFLINE:
5088 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); 5079 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5089 bfa_lps_discard(vport->lps); 5080 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
5090 break; 5081 break;
5091 5082
5092 case BFA_FCS_VPORT_SM_RSP_OK: 5083 case BFA_FCS_VPORT_SM_RSP_OK:
@@ -5166,7 +5157,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5166 5157
5167 case BFA_FCS_VPORT_SM_OFFLINE: 5158 case BFA_FCS_VPORT_SM_OFFLINE:
5168 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); 5159 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5169 bfa_lps_discard(vport->lps); 5160 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
5170 bfa_fcs_lport_offline(&vport->lport); 5161 bfa_fcs_lport_offline(&vport->lport);
5171 break; 5162 break;
5172 5163
@@ -5266,7 +5257,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5266 5257
5267 switch (event) { 5258 switch (event) {
5268 case BFA_FCS_VPORT_SM_OFFLINE: 5259 case BFA_FCS_VPORT_SM_OFFLINE:
5269 bfa_lps_discard(vport->lps); 5260 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
5270 /* 5261 /*
5271 * !!! fall through !!! 5262 * !!! fall through !!!
5272 */ 5263 */
@@ -5305,14 +5296,14 @@ bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
5305static void 5296static void
5306bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) 5297bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5307{ 5298{
5308 u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps); 5299 u8 lsrjt_rsn = vport->lps->lsrjt_rsn;
5309 u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps); 5300 u8 lsrjt_expl = vport->lps->lsrjt_expl;
5310 5301
5311 bfa_trc(__vport_fcs(vport), lsrjt_rsn); 5302 bfa_trc(__vport_fcs(vport), lsrjt_rsn);
5312 bfa_trc(__vport_fcs(vport), lsrjt_expl); 5303 bfa_trc(__vport_fcs(vport), lsrjt_expl);
5313 5304
5314 /* For certain reason codes, we don't want to retry. */ 5305 /* For certain reason codes, we don't want to retry. */
5315 switch (bfa_lps_get_lsrjt_expl(vport->lps)) { 5306 switch (vport->lps->lsrjt_expl) {
5316 case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */ 5307 case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
5317 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ 5308 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
5318 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) 5309 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
@@ -5476,7 +5467,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5476 if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL) 5467 if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
5477 return BFA_STATUS_VPORT_EXISTS; 5468 return BFA_STATUS_VPORT_EXISTS;
5478 5469
5479 if (bfa_fcs_fabric_vport_count(&fcs->fabric) == 5470 if (fcs->fabric.num_vports ==
5480 bfa_lps_get_max_vport(fcs->bfa)) 5471 bfa_lps_get_max_vport(fcs->bfa))
5481 return BFA_STATUS_VPORT_MAX; 5472 return BFA_STATUS_VPORT_MAX;
5482 5473
@@ -5618,33 +5609,6 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
5618 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); 5609 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
5619} 5610}
5620 5611
5621/*
5622 * Use this function to get vport's statistics.
5623 *
5624 * param[in] vport pointer to bfa_fcs_vport_t.
5625 * param[out] stats pointer to return vport statistics in
5626 *
5627 * return None
5628 */
5629void
5630bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
5631 struct bfa_vport_stats_s *stats)
5632{
5633 *stats = vport->vport_stats;
5634}
5635
5636/*
5637 * Use this function to clear vport's statistics.
5638 *
5639 * param[in] vport pointer to bfa_fcs_vport_t.
5640 *
5641 * return None
5642 */
5643void
5644bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
5645{
5646 memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
5647}
5648 5612
5649/* 5613/*
5650 * Lookup a virtual port. Excludes base port from lookup. 5614 * Lookup a virtual port. Excludes base port from lookup.
@@ -5684,7 +5648,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
5684 /* 5648 /*
5685 * Initialiaze the V-Port fields 5649 * Initialiaze the V-Port fields
5686 */ 5650 */
5687 __vport_fcid(vport) = bfa_lps_get_pid(vport->lps); 5651 __vport_fcid(vport) = vport->lps->lp_pid;
5688 vport->vport_stats.fdisc_accepts++; 5652 vport->vport_stats.fdisc_accepts++;
5689 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 5653 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
5690 break; 5654 break;
@@ -5697,7 +5661,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
5697 break; 5661 break;
5698 5662
5699 case BFA_STATUS_EPROTOCOL: 5663 case BFA_STATUS_EPROTOCOL:
5700 switch (bfa_lps_get_extstatus(vport->lps)) { 5664 switch (vport->lps->ext_status) {
5701 case BFA_EPROTO_BAD_ACCEPT: 5665 case BFA_EPROTO_BAD_ACCEPT:
5702 vport->vport_stats.fdisc_acc_bad++; 5666 vport->vport_stats.fdisc_acc_bad++;
5703 break; 5667 break;
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index cf4a6e73e60d..caaee6f06937 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -19,9 +19,9 @@
19 * rport.c Remote port implementation. 19 * rport.c Remote port implementation.
20 */ 20 */
21 21
22#include "bfad_drv.h"
22#include "bfa_fcs.h" 23#include "bfa_fcs.h"
23#include "bfa_fcbuild.h" 24#include "bfa_fcbuild.h"
24#include "bfad_drv.h"
25 25
26BFA_TRC_FILE(FCS, RPORT); 26BFA_TRC_FILE(FCS, RPORT);
27 27
@@ -75,30 +75,6 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
75static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 75static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
76 struct fchs_s *rx_fchs, u16 len); 76 struct fchs_s *rx_fchs, u16 len);
77static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 77static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
78/*
79 * fcs_rport_sm FCS rport state machine events
80 */
81
82enum rport_event {
83 RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
84 RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
85 RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
86 RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
87 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
88 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
89 RPSM_EVENT_DELETE = 7, /* RPORT delete request */
90 RPSM_EVENT_SCN = 8, /* state change notification */
91 RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
92 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
93 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
94 RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
95 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
96 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
97 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
98 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
99 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
100 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continously */
101};
102 78
103static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, 79static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
104 enum rport_event event); 80 enum rport_event event);
@@ -498,24 +474,24 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
498 474
499 case RPSM_EVENT_LOGO_RCVD: 475 case RPSM_EVENT_LOGO_RCVD:
500 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); 476 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
501 bfa_rport_offline(rport->bfa_rport); 477 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
502 break; 478 break;
503 479
504 case RPSM_EVENT_LOGO_IMP: 480 case RPSM_EVENT_LOGO_IMP:
505 case RPSM_EVENT_ADDRESS_CHANGE: 481 case RPSM_EVENT_ADDRESS_CHANGE:
506 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); 482 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
507 bfa_rport_offline(rport->bfa_rport); 483 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
508 break; 484 break;
509 485
510 case RPSM_EVENT_PLOGI_RCVD: 486 case RPSM_EVENT_PLOGI_RCVD:
511 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); 487 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
512 bfa_rport_offline(rport->bfa_rport); 488 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
513 bfa_fcs_rport_send_plogiacc(rport, NULL); 489 bfa_fcs_rport_send_plogiacc(rport, NULL);
514 break; 490 break;
515 491
516 case RPSM_EVENT_DELETE: 492 case RPSM_EVENT_DELETE:
517 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); 493 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
518 bfa_rport_offline(rport->bfa_rport); 494 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
519 break; 495 break;
520 496
521 case RPSM_EVENT_SCN: 497 case RPSM_EVENT_SCN:
@@ -824,7 +800,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
824 switch (event) { 800 switch (event) {
825 case RPSM_EVENT_FC4_OFFLINE: 801 case RPSM_EVENT_FC4_OFFLINE:
826 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); 802 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
827 bfa_rport_offline(rport->bfa_rport); 803 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
828 break; 804 break;
829 805
830 case RPSM_EVENT_DELETE: 806 case RPSM_EVENT_DELETE:
@@ -856,7 +832,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
856 switch (event) { 832 switch (event) {
857 case RPSM_EVENT_FC4_OFFLINE: 833 case RPSM_EVENT_FC4_OFFLINE:
858 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); 834 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
859 bfa_rport_offline(rport->bfa_rport); 835 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
860 break; 836 break;
861 837
862 default: 838 default:
@@ -878,7 +854,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
878 switch (event) { 854 switch (event) {
879 case RPSM_EVENT_FC4_OFFLINE: 855 case RPSM_EVENT_FC4_OFFLINE:
880 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); 856 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
881 bfa_rport_offline(rport->bfa_rport); 857 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
882 break; 858 break;
883 859
884 case RPSM_EVENT_SCN: 860 case RPSM_EVENT_SCN:
@@ -1459,7 +1435,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1459 twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; 1435 twin->stats.plogi_rcvd += rport->stats.plogi_rcvd;
1460 twin->stats.plogi_accs++; 1436 twin->stats.plogi_accs++;
1461 1437
1462 bfa_fcs_rport_delete(rport); 1438 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
1463 1439
1464 bfa_fcs_rport_update(twin, plogi_rsp); 1440 bfa_fcs_rport_update(twin, plogi_rsp);
1465 twin->pid = rsp_fchs->s_id; 1441 twin->pid = rsp_fchs->s_id;
@@ -1992,13 +1968,14 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1992 /* 1968 /*
1993 * allocate FC-4s 1969 * allocate FC-4s
1994 */ 1970 */
1995 bfa_assert(bfa_fcs_lport_is_initiator(port)); 1971 WARN_ON(!bfa_fcs_lport_is_initiator(port));
1996 1972
1997 if (bfa_fcs_lport_is_initiator(port)) { 1973 if (bfa_fcs_lport_is_initiator(port)) {
1998 rport->itnim = bfa_fcs_itnim_create(rport); 1974 rport->itnim = bfa_fcs_itnim_create(rport);
1999 if (!rport->itnim) { 1975 if (!rport->itnim) {
2000 bfa_trc(fcs, rpid); 1976 bfa_trc(fcs, rpid);
2001 bfa_rport_delete(rport->bfa_rport); 1977 bfa_sm_send_event(rport->bfa_rport,
1978 BFA_RPORT_SM_DELETE);
2002 kfree(rport_drv); 1979 kfree(rport_drv);
2003 return NULL; 1980 return NULL;
2004 } 1981 }
@@ -2032,7 +2009,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2032 bfa_fcs_rpf_rport_offline(rport); 2009 bfa_fcs_rpf_rport_offline(rport);
2033 } 2010 }
2034 2011
2035 bfa_rport_delete(rport->bfa_rport); 2012 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
2036 bfa_fcs_lport_del_rport(port, rport); 2013 bfa_fcs_lport_del_rport(port, rport);
2037 kfree(rport->rp_drv); 2014 kfree(rport->rp_drv);
2038} 2015}
@@ -2307,40 +2284,8 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2307 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); 2284 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
2308} 2285}
2309 2286
2310/*
2311 * Called by bport/vport to delete a remote port instance.
2312 *
2313 * Rport delete is called under the following conditions:
2314 * - vport is deleted
2315 * - vf is deleted
2316 * - explicit request from OS to delete rport
2317 */
2318void
2319bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
2320{
2321 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
2322}
2323 2287
2324/* 2288/*
2325 * Called by bport/vport to when a target goes offline.
2326 *
2327 */
2328void
2329bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
2330{
2331 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
2332}
2333
2334/*
2335 * Called by bport in n2n when a target (attached port) becomes online.
2336 *
2337 */
2338void
2339bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
2340{
2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
2342}
2343/*
2344 * Called by bport/vport to notify SCN for the remote port 2289 * Called by bport/vport to notify SCN for the remote port
2345 */ 2290 */
2346void 2291void
@@ -2350,23 +2295,6 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2295 bfa_sm_send_event(rport, RPSM_EVENT_SCN);
2351} 2296}
2352 2297
2353/*
2354 * Called by fcpim to notify that the ITN cleanup is done.
2355 */
2356void
2357bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
2358{
2359 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
2360}
2361
2362/*
2363 * Called by fcptm to notify that the ITN cleanup is done.
2364 */
2365void
2366bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
2367{
2368 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
2369}
2370 2298
2371/* 2299/*
2372 * brief 2300 * brief
@@ -2465,15 +2393,6 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
2465 * Called to process any unsolicted frames from this remote port 2393 * Called to process any unsolicted frames from this remote port
2466 */ 2394 */
2467void 2395void
2468bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
2469{
2470 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
2471}
2472
2473/*
2474 * Called to process any unsolicted frames from this remote port
2475 */
2476void
2477bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, 2396bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
2478 struct fchs_s *fchs, u16 len) 2397 struct fchs_s *fchs, u16 len)
2479{ 2398{
@@ -2586,6 +2505,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
2586 return bfa_sm_to_state(rport_sm_table, rport->sm); 2505 return bfa_sm_to_state(rport_sm_table, rport->sm);
2587} 2506}
2588 2507
2508
2589/* 2509/*
2590 * brief 2510 * brief
2591 * Called by the Driver to set rport delete/ageout timeout 2511 * Called by the Driver to set rport delete/ageout timeout
@@ -2602,7 +2522,7 @@ bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
2602 bfa_fcs_rport_del_timeout = rport_tmo * 1000; 2522 bfa_fcs_rport_del_timeout = rport_tmo * 1000;
2603} 2523}
2604void 2524void
2605bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id) 2525bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
2606{ 2526{
2607 bfa_trc(rport->fcs, rport->pid); 2527 bfa_trc(rport->fcs, rport->pid);
2608 2528
@@ -2621,106 +2541,6 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
2621 * fcs_rport_api FCS rport API. 2541 * fcs_rport_api FCS rport API.
2622 */ 2542 */
2623 2543
2624/*
2625 * Direct API to add a target by port wwn. This interface is used, for
2626 * example, by bios when target pwwn is known from boot lun configuration.
2627 */
2628bfa_status_t
2629bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
2630 struct bfa_fcs_rport_s *rport, struct bfad_rport_s *rport_drv)
2631{
2632 bfa_trc(port->fcs, *pwwn);
2633
2634 return BFA_STATUS_OK;
2635}
2636
2637/*
2638 * Direct API to remove a target and its associated resources. This
2639 * interface is used, for example, by driver to remove target
2640 * ports from the target list for a VM.
2641 */
2642bfa_status_t
2643bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
2644{
2645
2646 struct bfa_fcs_rport_s *rport;
2647
2648 bfa_trc(rport_in->fcs, rport_in->pwwn);
2649
2650 rport = bfa_fcs_lport_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
2651 if (rport == NULL) {
2652 /*
2653 * TBD Error handling
2654 */
2655 bfa_trc(rport_in->fcs, rport_in->pid);
2656 return BFA_STATUS_UNKNOWN_RWWN;
2657 }
2658
2659 /*
2660 * TBD if this remote port is online, send a logo
2661 */
2662 return BFA_STATUS_OK;
2663
2664}
2665
2666/*
2667 * Remote device status for display/debug.
2668 */
2669void
2670bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2671 struct bfa_rport_attr_s *rport_attr)
2672{
2673 struct bfa_rport_qos_attr_s qos_attr;
2674 bfa_fcs_lport_t *port = rport->port;
2675 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
2676
2677 memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
2678
2679 rport_attr->pid = rport->pid;
2680 rport_attr->pwwn = rport->pwwn;
2681 rport_attr->nwwn = rport->nwwn;
2682 rport_attr->cos_supported = rport->fc_cos;
2683 rport_attr->df_sz = rport->maxfrsize;
2684 rport_attr->state = bfa_fcs_rport_get_state(rport);
2685 rport_attr->fc_cos = rport->fc_cos;
2686 rport_attr->cisc = rport->cisc;
2687 rport_attr->scsi_function = rport->scsi_function;
2688 rport_attr->curr_speed = rport->rpf.rpsc_speed;
2689 rport_attr->assigned_speed = rport->rpf.assigned_speed;
2690
2691 bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
2692 rport_attr->qos_attr = qos_attr;
2693
2694 rport_attr->trl_enforced = BFA_FALSE;
2695 if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
2696 if (rport_speed == BFA_PORT_SPEED_UNKNOWN) {
2697 /* Use default ratelim speed setting */
2698 rport_speed =
2699 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
2700 }
2701
2702 if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
2703 rport_attr->trl_enforced = BFA_TRUE;
2704 }
2705}
2706
2707/*
2708 * Per remote device statistics.
2709 */
2710void
2711bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
2712 struct bfa_rport_stats_s *stats)
2713{
2714 *stats = rport->stats;
2715}
2716
2717void
2718bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
2719{
2720 memset((char *)&rport->stats, 0,
2721 sizeof(struct bfa_rport_stats_s));
2722}
2723
2724struct bfa_fcs_rport_s * 2544struct bfa_fcs_rport_s *
2725bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) 2545bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
2726{ 2546{
@@ -2752,22 +2572,6 @@ bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn)
2752} 2572}
2753 2573
2754/* 2574/*
2755 * This API is to set the Rport's speed. Should be used when RPSC is not
2756 * supported by the rport.
2757 */
2758void
2759bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
2760{
2761 rport->rpf.assigned_speed = speed;
2762
2763 /* Set this speed in f/w only if the RPSC speed is not available */
2764 if (rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
2765 bfa_rport_speed(rport->bfa_rport, speed);
2766}
2767
2768
2769
2770/*
2771 * Remote port features (RPF) implementation. 2575 * Remote port features (RPF) implementation.
2772 */ 2576 */
2773 2577
@@ -2827,7 +2631,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2827 case RPFSM_EVENT_RPORT_ONLINE: 2631 case RPFSM_EVENT_RPORT_ONLINE:
2828 /* Send RPSC2 to a Brocade fabric only. */ 2632 /* Send RPSC2 to a Brocade fabric only. */
2829 if ((!BFA_FCS_PID_IS_WKA(rport->pid)) && 2633 if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
2830 ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) || 2634 ((rport->port->fabric->lps->brcd_switch) ||
2831 (bfa_fcs_fabric_get_switch_oui(fabric) == 2635 (bfa_fcs_fabric_get_switch_oui(fabric) ==
2832 BFA_FCS_BRCD_SWITCH_OUI))) { 2636 BFA_FCS_BRCD_SWITCH_OUI))) {
2833 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); 2637 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
@@ -3093,7 +2897,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
3093 num_ents = be16_to_cpu(rpsc2_acc->num_pids); 2897 num_ents = be16_to_cpu(rpsc2_acc->num_pids);
3094 bfa_trc(rport->fcs, num_ents); 2898 bfa_trc(rport->fcs, num_ents);
3095 if (num_ents > 0) { 2899 if (num_ents > 0) {
3096 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid); 2900 WARN_ON(rpsc2_acc->port_info[0].pid == rport->pid);
3097 bfa_trc(rport->fcs, 2901 bfa_trc(rport->fcs,
3098 be16_to_cpu(rpsc2_acc->port_info[0].pid)); 2902 be16_to_cpu(rpsc2_acc->port_info[0].pid));
3099 bfa_trc(rport->fcs, 2903 bfa_trc(rport->fcs,
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index d8464ae60070..977e681ec803 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -15,6 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_modules.h" 19#include "bfa_modules.h"
19#include "bfi_cbreg.h" 20#include "bfi_cbreg.h"
20 21
@@ -110,7 +111,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
110{ 111{
111 int i; 112 int i;
112 113
113 bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS)); 114 WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
114 115
115 bfa->msix.nvecs = nvecs; 116 bfa->msix.nvecs = nvecs;
116 if (nvecs == 1) { 117 if (nvecs == 1) {
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index b0efbc713ffe..21018d98a07b 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -15,6 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_modules.h" 19#include "bfa_modules.h"
19#include "bfi_ctreg.h" 20#include "bfi_ctreg.h"
20 21
@@ -116,7 +117,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
116void 117void
117bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) 118bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
118{ 119{
119 bfa_assert((nvecs == 1) || (nvecs == BFA_MSIX_CT_MAX)); 120 WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX));
120 bfa_trc(bfa, nvecs); 121 bfa_trc(bfa, nvecs);
121 122
122 bfa->msix.nvecs = nvecs; 123 bfa->msix.nvecs = nvecs;
@@ -143,7 +144,7 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
143 for (; i <= BFA_MSIX_RME_Q3; i++) 144 for (; i <= BFA_MSIX_RME_Q3; i++)
144 bfa->msix.handler[i] = bfa_msix_rspq; 145 bfa->msix.handler[i] = bfa_msix_rspq;
145 146
146 bfa_assert(i == BFA_MSIX_LPU_ERR); 147 WARN_ON(i != BFA_MSIX_LPU_ERR);
147 bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err; 148 bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
148} 149}
149 150
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 9f4aa391ea9d..c1f72c49196f 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -15,11 +15,11 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_ioc.h" 19#include "bfa_ioc.h"
19#include "bfi_ctreg.h" 20#include "bfi_ctreg.h"
20#include "bfa_defs.h" 21#include "bfa_defs.h"
21#include "bfa_defs_svc.h" 22#include "bfa_defs_svc.h"
22#include "bfad_drv.h"
23 23
24BFA_TRC_FILE(CNA, IOC); 24BFA_TRC_FILE(CNA, IOC);
25 25
@@ -29,7 +29,7 @@ BFA_TRC_FILE(CNA, IOC);
29#define BFA_IOC_TOV 3000 /* msecs */ 29#define BFA_IOC_TOV 3000 /* msecs */
30#define BFA_IOC_HWSEM_TOV 500 /* msecs */ 30#define BFA_IOC_HWSEM_TOV 500 /* msecs */
31#define BFA_IOC_HB_TOV 500 /* msecs */ 31#define BFA_IOC_HB_TOV 500 /* msecs */
32#define BFA_IOC_HWINIT_MAX 2 32#define BFA_IOC_HWINIT_MAX 5
33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV 33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 34
35#define bfa_ioc_timer_start(__ioc) \ 35#define bfa_ioc_timer_start(__ioc) \
@@ -42,11 +42,6 @@ BFA_TRC_FILE(CNA, IOC);
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV) 42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer) 43#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
44 44
45#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
46#define BFA_DBG_FWTRC_LEN \
47 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
48 (sizeof(struct bfa_trc_mod_s) - \
49 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
50#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 45#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
51 46
52/* 47/*
@@ -59,17 +54,16 @@ BFA_TRC_FILE(CNA, IOC);
59 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 54 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
60#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 55#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
61#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
62#define bfa_ioc_notify_hbfail(__ioc) \ 57#define bfa_ioc_notify_fail(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
64 59#define bfa_ioc_sync_join(__ioc) \
65#ifdef BFA_IOC_IS_UEFI 60 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
66#define bfa_ioc_is_bios_optrom(__ioc) (0) 61#define bfa_ioc_sync_leave(__ioc) \
67#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI 62 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
68#else 63#define bfa_ioc_sync_ack(__ioc) \
69#define bfa_ioc_is_bios_optrom(__ioc) \ 64 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
70 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) 65#define bfa_ioc_sync_complete(__ioc) \
71#define bfa_ioc_is_uefi(__ioc) (0) 66 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
72#endif
73 67
74#define bfa_ioc_mbox_cmd_pending(__ioc) \ 68#define bfa_ioc_mbox_cmd_pending(__ioc) \
75 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 69 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -81,29 +75,22 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE;
81 * forward declarations 75 * forward declarations
82 */ 76 */
83static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 77static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
84static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
85static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 78static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
86static void bfa_ioc_timeout(void *ioc); 79static void bfa_ioc_timeout(void *ioc);
87static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); 80static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
88static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); 81static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
89static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); 82static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
90static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); 83static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
91static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
92static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
93static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 84static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
94static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 85static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
95static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 86static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
96static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); 87static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
97static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 88static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
98static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 89static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
99static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc); 90static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
100static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc); 91static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
101static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
102static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); 92static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
103 93
104/*
105 * hal_ioc_sm
106 */
107 94
108/* 95/*
109 * IOC state machine definitions/declarations 96 * IOC state machine definitions/declarations
@@ -116,10 +103,11 @@ enum ioc_event {
116 IOC_E_ENABLED = 5, /* f/w enabled */ 103 IOC_E_ENABLED = 5, /* f/w enabled */
117 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ 104 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
118 IOC_E_DISABLED = 7, /* f/w disabled */ 105 IOC_E_DISABLED = 7, /* f/w disabled */
119 IOC_E_FAILED = 8, /* failure notice by iocpf sm */ 106 IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */
120 IOC_E_HBFAIL = 9, /* heartbeat failure */ 107 IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */
121 IOC_E_HWERROR = 10, /* hardware error interrupt */ 108 IOC_E_HBFAIL = 10, /* heartbeat failure */
122 IOC_E_TIMEOUT = 11, /* timeout */ 109 IOC_E_HWERROR = 11, /* hardware error interrupt */
110 IOC_E_TIMEOUT = 12, /* timeout */
123}; 111};
124 112
125bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); 113bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -127,7 +115,7 @@ bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); 115bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); 116bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); 117bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); 118bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
131bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); 119bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
132bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); 120bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
133bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); 121bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
@@ -138,7 +126,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
138 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, 126 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
139 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 127 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
140 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 128 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
141 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, 129 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
142 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 130 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
143 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 131 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 132 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
@@ -165,12 +153,6 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
165/* 153/*
166 * Forward declareations for iocpf state machine 154 * Forward declareations for iocpf state machine
167 */ 155 */
168static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
169static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
170static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
171static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
172static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
173static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
174static void bfa_iocpf_timeout(void *ioc_arg); 156static void bfa_iocpf_timeout(void *ioc_arg);
175static void bfa_iocpf_sem_timeout(void *ioc_arg); 157static void bfa_iocpf_sem_timeout(void *ioc_arg);
176 158
@@ -213,9 +195,14 @@ bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
213bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event); 195bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
214bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event); 196bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
215bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event); 197bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
199 enum iocpf_event);
216bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event); 200bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
217bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event); 202bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
218bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event); 203bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
204bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
205 enum iocpf_event);
219bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); 206bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
220 207
221static struct bfa_sm_table_s iocpf_sm_table[] = { 208static struct bfa_sm_table_s iocpf_sm_table[] = {
@@ -226,9 +213,12 @@ static struct bfa_sm_table_s iocpf_sm_table[] = {
226 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, 213 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, 214 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
228 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, 215 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
216 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, 217 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
218 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
230 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, 219 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
231 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, 220 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
221 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 222 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
233}; 223};
234 224
@@ -301,7 +291,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
301static void 291static void
302bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) 292bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
303{ 293{
304 bfa_iocpf_enable(ioc); 294 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
305} 295}
306 296
307/* 297/*
@@ -318,13 +308,13 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 308 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
319 break; 309 break;
320 310
321 case IOC_E_FAILED: 311 case IOC_E_PFFAILED:
322 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 312 /* !!! fall through !!! */
323 break;
324
325 case IOC_E_HWERROR: 313 case IOC_E_HWERROR:
326 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 314 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
327 bfa_iocpf_initfail(ioc); 315 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
316 if (event != IOC_E_PFFAILED)
317 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
328 break; 318 break;
329 319
330 case IOC_E_DISABLE: 320 case IOC_E_DISABLE:
@@ -333,7 +323,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
333 323
334 case IOC_E_DETACH: 324 case IOC_E_DETACH:
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 325 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
336 bfa_iocpf_stop(ioc); 326 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
337 break; 327 break;
338 328
339 case IOC_E_ENABLE: 329 case IOC_E_ENABLE:
@@ -367,18 +357,16 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 357 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
368 break; 358 break;
369 359
370 case IOC_E_FAILED:
371 bfa_ioc_timer_stop(ioc);
372 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
373 break; 360 break;
374 361 case IOC_E_PFFAILED:
375 case IOC_E_HWERROR: 362 case IOC_E_HWERROR:
376 bfa_ioc_timer_stop(ioc); 363 bfa_ioc_timer_stop(ioc);
377 /* fall through */ 364 /* !!! fall through !!! */
378
379 case IOC_E_TIMEOUT: 365 case IOC_E_TIMEOUT:
380 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 366 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
381 bfa_iocpf_getattrfail(ioc); 367 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
368 if (event != IOC_E_PFFAILED)
369 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
382 break; 370 break;
383 371
384 case IOC_E_DISABLE: 372 case IOC_E_DISABLE:
@@ -415,22 +403,24 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
415 break; 403 break;
416 404
417 case IOC_E_DISABLE: 405 case IOC_E_DISABLE:
418 bfa_ioc_hb_stop(ioc); 406 bfa_hb_timer_stop(ioc);
419 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 407 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
420 break; 408 break;
421 409
422 case IOC_E_FAILED: 410 case IOC_E_PFFAILED:
423 bfa_ioc_hb_stop(ioc);
424 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
425 break;
426
427 case IOC_E_HWERROR: 411 case IOC_E_HWERROR:
428 bfa_ioc_hb_stop(ioc); 412 bfa_hb_timer_stop(ioc);
429 /* !!! fall through !!! */ 413 /* !!! fall through !!! */
430
431 case IOC_E_HBFAIL: 414 case IOC_E_HBFAIL:
432 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 415 bfa_ioc_fail_notify(ioc);
433 bfa_iocpf_fail(ioc); 416
417 if (ioc->iocpf.auto_recover)
418 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
419 else
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
421
422 if (event != IOC_E_PFFAILED)
423 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
434 break; 424 break;
435 425
436 default: 426 default:
@@ -443,7 +433,7 @@ static void
443bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) 433bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
444{ 434{
445 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 435 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
446 bfa_iocpf_disable(ioc); 436 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
447 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); 437 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
448} 438}
449 439
@@ -466,7 +456,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
466 * after iocpf sm completes failure processing and 456 * after iocpf sm completes failure processing and
467 * moves to disabled state. 457 * moves to disabled state.
468 */ 458 */
469 bfa_iocpf_fail(ioc); 459 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
470 break; 460 break;
471 461
472 default: 462 default:
@@ -499,7 +489,7 @@ bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
499 489
500 case IOC_E_DETACH: 490 case IOC_E_DETACH:
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 491 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
502 bfa_iocpf_stop(ioc); 492 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
503 break; 493 break;
504 494
505 default: 495 default:
@@ -509,16 +499,16 @@ bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
509 499
510 500
511static void 501static void
512bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) 502bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
513{ 503{
514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 504 bfa_trc(ioc, 0);
515} 505}
516 506
517/* 507/*
518 * Hardware initialization failed. 508 * Hardware initialization retry.
519 */ 509 */
520static void 510static void
521bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) 511bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
522{ 512{
523 bfa_trc(ioc, event); 513 bfa_trc(ioc, event);
524 514
@@ -527,11 +517,21 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 517 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
528 break; 518 break;
529 519
530 case IOC_E_FAILED: 520 case IOC_E_PFFAILED:
521 case IOC_E_HWERROR:
531 /* 522 /*
532 * Initialization failure during iocpf init retry. 523 * Initialization retry failed.
533 */ 524 */
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 525 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
526 if (event != IOC_E_PFFAILED)
527 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
528 break;
529
530 case IOC_E_INITFAILED:
531 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
532 break;
533
534 case IOC_E_ENABLE:
535 break; 535 break;
536 536
537 case IOC_E_DISABLE: 537 case IOC_E_DISABLE:
@@ -540,7 +540,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
540 540
541 case IOC_E_DETACH: 541 case IOC_E_DETACH:
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 bfa_iocpf_stop(ioc); 543 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
544 break; 544 break;
545 545
546 default: 546 default:
@@ -552,21 +552,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
552static void 552static void
553bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) 553bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
554{ 554{
555 struct list_head *qe; 555 bfa_trc(ioc, 0);
556 struct bfa_ioc_hbfail_notify_s *notify;
557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
558
559 /*
560 * Notify driver and common modules registered for notification.
561 */
562 ioc->cbfn->hbfail_cbfn(ioc->bfa);
563 list_for_each(qe, &ioc->hb_notify_q) {
564 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
565 notify->cbfn(notify->cbarg);
566 }
567
568 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
569 "Heart Beat of IOC has failed\n");
570} 556}
571 557
572/* 558/*
@@ -579,23 +565,19 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
579 565
580 switch (event) { 566 switch (event) {
581 567
582 case IOC_E_FAILED:
583 /*
584 * Initialization failure during iocpf recovery.
585 * !!! Fall through !!!
586 */
587 case IOC_E_ENABLE: 568 case IOC_E_ENABLE:
588 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 569 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
589 break; 570 break;
590 571
591 case IOC_E_ENABLED:
592 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
593 break;
594
595 case IOC_E_DISABLE: 572 case IOC_E_DISABLE:
596 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 573 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
597 break; 574 break;
598 575
576 case IOC_E_DETACH:
577 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
578 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
579 break;
580
599 case IOC_E_HWERROR: 581 case IOC_E_HWERROR:
600 /* 582 /*
601 * HB failure notification, ignore. 583 * HB failure notification, ignore.
@@ -606,13 +588,10 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
606 } 588 }
607} 589}
608 590
609
610
611/* 591/*
612 * IOCPF State Machine 592 * IOCPF State Machine
613 */ 593 */
614 594
615
616/* 595/*
617 * Reset entry actions -- initialize state machine 596 * Reset entry actions -- initialize state machine
618 */ 597 */
@@ -668,22 +647,29 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
668 switch (event) { 647 switch (event) {
669 case IOCPF_E_SEMLOCKED: 648 case IOCPF_E_SEMLOCKED:
670 if (bfa_ioc_firmware_lock(ioc)) { 649 if (bfa_ioc_firmware_lock(ioc)) {
671 iocpf->retry_count = 0; 650 if (bfa_ioc_sync_complete(ioc)) {
672 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 651 iocpf->retry_count = 0;
652 bfa_ioc_sync_join(ioc);
653 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
654 } else {
655 bfa_ioc_firmware_unlock(ioc);
656 writel(1, ioc->ioc_regs.ioc_sem_reg);
657 bfa_sem_timer_start(ioc);
658 }
673 } else { 659 } else {
674 bfa_ioc_hw_sem_release(ioc); 660 writel(1, ioc->ioc_regs.ioc_sem_reg);
675 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); 661 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
676 } 662 }
677 break; 663 break;
678 664
679 case IOCPF_E_DISABLE: 665 case IOCPF_E_DISABLE:
680 bfa_ioc_hw_sem_get_cancel(ioc); 666 bfa_sem_timer_stop(ioc);
681 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 667 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
682 bfa_ioc_pf_disabled(ioc); 668 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
683 break; 669 break;
684 670
685 case IOCPF_E_STOP: 671 case IOCPF_E_STOP:
686 bfa_ioc_hw_sem_get_cancel(ioc); 672 bfa_sem_timer_stop(ioc);
687 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
688 break; 674 break;
689 675
@@ -726,7 +712,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
726 case IOCPF_E_DISABLE: 712 case IOCPF_E_DISABLE:
727 bfa_iocpf_timer_stop(ioc); 713 bfa_iocpf_timer_stop(ioc);
728 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 714 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
729 bfa_ioc_pf_disabled(ioc); 715 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
730 break; 716 break;
731 717
732 case IOCPF_E_STOP: 718 case IOCPF_E_STOP:
@@ -760,13 +746,18 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
760 746
761 switch (event) { 747 switch (event) {
762 case IOCPF_E_SEMLOCKED: 748 case IOCPF_E_SEMLOCKED:
763 iocpf->retry_count = 0; 749 if (bfa_ioc_sync_complete(ioc)) {
764 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 750 bfa_ioc_sync_join(ioc);
751 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
752 } else {
753 writel(1, ioc->ioc_regs.ioc_sem_reg);
754 bfa_sem_timer_start(ioc);
755 }
765 break; 756 break;
766 757
767 case IOCPF_E_DISABLE: 758 case IOCPF_E_DISABLE:
768 bfa_ioc_hw_sem_get_cancel(ioc); 759 bfa_sem_timer_stop(ioc);
769 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
770 break; 761 break;
771 762
772 default: 763 default:
@@ -774,12 +765,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
774 } 765 }
775} 766}
776 767
777
778static void 768static void
779bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) 769bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
780{ 770{
781 bfa_iocpf_timer_start(iocpf->ioc); 771 bfa_iocpf_timer_start(iocpf->ioc);
782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE); 772 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
783} 773}
784 774
785/* 775/*
@@ -806,23 +796,16 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
806 */ 796 */
807 797
808 case IOCPF_E_TIMEOUT: 798 case IOCPF_E_TIMEOUT:
809 iocpf->retry_count++; 799 writel(1, ioc->ioc_regs.ioc_sem_reg);
810 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
811 bfa_iocpf_timer_start(ioc);
812 bfa_ioc_reset(ioc, BFA_TRUE);
813 break;
814 }
815
816 bfa_ioc_hw_sem_release(ioc);
817 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
818
819 if (event == IOCPF_E_TIMEOUT) 800 if (event == IOCPF_E_TIMEOUT)
820 bfa_ioc_pf_failed(ioc); 801 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
802 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
821 break; 803 break;
822 804
823 case IOCPF_E_DISABLE: 805 case IOCPF_E_DISABLE:
824 bfa_ioc_hw_sem_release(ioc);
825 bfa_iocpf_timer_stop(ioc); 806 bfa_iocpf_timer_stop(ioc);
807 bfa_ioc_sync_leave(ioc);
808 writel(1, ioc->ioc_regs.ioc_sem_reg);
826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 809 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
827 break; 810 break;
828 811
@@ -831,7 +814,6 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
831 } 814 }
832} 815}
833 816
834
835static void 817static void
836bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) 818bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
837{ 819{
@@ -853,7 +835,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
853 switch (event) { 835 switch (event) {
854 case IOCPF_E_FWRSP_ENABLE: 836 case IOCPF_E_FWRSP_ENABLE:
855 bfa_iocpf_timer_stop(ioc); 837 bfa_iocpf_timer_stop(ioc);
856 bfa_ioc_hw_sem_release(ioc); 838 writel(1, ioc->ioc_regs.ioc_sem_reg);
857 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 839 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
858 break; 840 break;
859 841
@@ -864,23 +846,15 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
864 */ 846 */
865 847
866 case IOCPF_E_TIMEOUT: 848 case IOCPF_E_TIMEOUT:
867 iocpf->retry_count++; 849 writel(1, ioc->ioc_regs.ioc_sem_reg);
868 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
869 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
870 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
871 break;
872 }
873
874 bfa_ioc_hw_sem_release(ioc);
875 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
876
877 if (event == IOCPF_E_TIMEOUT) 850 if (event == IOCPF_E_TIMEOUT)
878 bfa_ioc_pf_failed(ioc); 851 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
852 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
879 break; 853 break;
880 854
881 case IOCPF_E_DISABLE: 855 case IOCPF_E_DISABLE:
882 bfa_iocpf_timer_stop(ioc); 856 bfa_iocpf_timer_stop(ioc);
883 bfa_ioc_hw_sem_release(ioc); 857 writel(1, ioc->ioc_regs.ioc_sem_reg);
884 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 858 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
885 break; 859 break;
886 860
@@ -893,12 +867,10 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
893 } 867 }
894} 868}
895 869
896
897
898static void 870static void
899bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf) 871bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
900{ 872{
901 bfa_ioc_pf_enabled(iocpf->ioc); 873 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
902} 874}
903 875
904static void 876static void
@@ -914,20 +886,21 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
914 break; 886 break;
915 887
916 case IOCPF_E_GETATTRFAIL: 888 case IOCPF_E_GETATTRFAIL:
917 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 889 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
918 break; 890 break;
919 891
920 case IOCPF_E_FAIL: 892 case IOCPF_E_FAIL:
921 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 893 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
922 break; 894 break;
923 895
924 case IOCPF_E_FWREADY: 896 case IOCPF_E_FWREADY:
925 if (bfa_ioc_is_operational(ioc)) 897 if (bfa_ioc_is_operational(ioc)) {
926 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 898 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
927 else 899 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
928 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 900 } else {
929 901 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
930 bfa_ioc_pf_failed(ioc); 902 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
903 }
931 break; 904 break;
932 905
933 default: 906 default:
@@ -935,7 +908,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
935 } 908 }
936} 909}
937 910
938
939static void 911static void
940bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf) 912bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
941{ 913{
@@ -957,7 +929,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
957 case IOCPF_E_FWRSP_DISABLE: 929 case IOCPF_E_FWRSP_DISABLE:
958 case IOCPF_E_FWREADY: 930 case IOCPF_E_FWREADY:
959 bfa_iocpf_timer_stop(ioc); 931 bfa_iocpf_timer_stop(ioc);
960 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 932 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
961 break; 933 break;
962 934
963 case IOCPF_E_FAIL: 935 case IOCPF_E_FAIL:
@@ -968,7 +940,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
968 940
969 case IOCPF_E_TIMEOUT: 941 case IOCPF_E_TIMEOUT:
970 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 942 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
971 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 943 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
972 break; 944 break;
973 945
974 case IOCPF_E_FWRSP_ENABLE: 946 case IOCPF_E_FWRSP_ENABLE:
@@ -979,13 +951,44 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
979 } 951 }
980} 952}
981 953
954static void
955bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
956{
957 bfa_ioc_hw_sem_get(iocpf->ioc);
958}
959
960/*
961 * IOC hb ack request is being removed.
962 */
963static void
964bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
965{
966 struct bfa_ioc_s *ioc = iocpf->ioc;
967
968 bfa_trc(ioc, event);
969
970 switch (event) {
971 case IOCPF_E_SEMLOCKED:
972 bfa_ioc_sync_leave(ioc);
973 writel(1, ioc->ioc_regs.ioc_sem_reg);
974 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
975 break;
976
977 case IOCPF_E_FAIL:
978 break;
979
980 default:
981 bfa_sm_fault(ioc, event);
982 }
983}
984
982/* 985/*
983 * IOC disable completion entry. 986 * IOC disable completion entry.
984 */ 987 */
985static void 988static void
986bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) 989bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
987{ 990{
988 bfa_ioc_pf_disabled(iocpf->ioc); 991 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
989} 992}
990 993
991static void 994static void
@@ -997,6 +1000,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
997 1000
998 switch (event) { 1001 switch (event) {
999 case IOCPF_E_ENABLE: 1002 case IOCPF_E_ENABLE:
1003 iocpf->retry_count = 0;
1000 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1004 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1001 break; 1005 break;
1002 1006
@@ -1010,11 +1014,64 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1010 } 1014 }
1011} 1015}
1012 1016
1017static void
1018bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1019{
1020 bfa_ioc_hw_sem_get(iocpf->ioc);
1021}
1022
1023/*
1024 * Hardware initialization failed.
1025 */
1026static void
1027bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1028{
1029 struct bfa_ioc_s *ioc = iocpf->ioc;
1030
1031 bfa_trc(ioc, event);
1032
1033 switch (event) {
1034 case IOCPF_E_SEMLOCKED:
1035 bfa_ioc_notify_fail(ioc);
1036 bfa_ioc_sync_ack(ioc);
1037 iocpf->retry_count++;
1038 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
1039 bfa_ioc_sync_leave(ioc);
1040 writel(1, ioc->ioc_regs.ioc_sem_reg);
1041 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1042 } else {
1043 if (bfa_ioc_sync_complete(ioc))
1044 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1045 else {
1046 writel(1, ioc->ioc_regs.ioc_sem_reg);
1047 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1048 }
1049 }
1050 break;
1051
1052 case IOCPF_E_DISABLE:
1053 bfa_sem_timer_stop(ioc);
1054 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055 break;
1056
1057 case IOCPF_E_STOP:
1058 bfa_sem_timer_stop(ioc);
1059 bfa_ioc_firmware_unlock(ioc);
1060 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1061 break;
1062
1063 case IOCPF_E_FAIL:
1064 break;
1065
1066 default:
1067 bfa_sm_fault(ioc, event);
1068 }
1069}
1013 1070
1014static void 1071static void
1015bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) 1072bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1016{ 1073{
1017 bfa_iocpf_timer_start(iocpf->ioc); 1074 bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
1018} 1075}
1019 1076
1020/* 1077/*
@@ -1029,47 +1086,77 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1029 1086
1030 switch (event) { 1087 switch (event) {
1031 case IOCPF_E_DISABLE: 1088 case IOCPF_E_DISABLE:
1032 bfa_iocpf_timer_stop(ioc);
1033 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1089 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1034 break; 1090 break;
1035 1091
1036 case IOCPF_E_STOP: 1092 case IOCPF_E_STOP:
1037 bfa_iocpf_timer_stop(ioc);
1038 bfa_ioc_firmware_unlock(ioc); 1093 bfa_ioc_firmware_unlock(ioc);
1039 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1094 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1040 break; 1095 break;
1041 1096
1042 case IOCPF_E_TIMEOUT:
1043 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1044 break;
1045
1046 default: 1097 default:
1047 bfa_sm_fault(ioc, event); 1098 bfa_sm_fault(ioc, event);
1048 } 1099 }
1049} 1100}
1050 1101
1051
1052static void 1102static void
1053bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) 1103bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1054{ 1104{
1055 /* 1105 /*
1056 * Mark IOC as failed in hardware and stop firmware. 1106 * Mark IOC as failed in hardware and stop firmware.
1057 */ 1107 */
1058 bfa_ioc_lpu_stop(iocpf->ioc); 1108 bfa_ioc_lpu_stop(iocpf->ioc);
1059 writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
1060
1061 /*
1062 * Notify other functions on HB failure.
1063 */
1064 bfa_ioc_notify_hbfail(iocpf->ioc);
1065 1109
1066 /* 1110 /*
1067 * Flush any queued up mailbox requests. 1111 * Flush any queued up mailbox requests.
1068 */ 1112 */
1069 bfa_ioc_mbox_hbfail(iocpf->ioc); 1113 bfa_ioc_mbox_hbfail(iocpf->ioc);
1070 1114
1071 if (iocpf->auto_recover) 1115 bfa_ioc_hw_sem_get(iocpf->ioc);
1072 bfa_iocpf_recovery_timer_start(iocpf->ioc); 1116}
1117
1118static void
1119bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1120{
1121 struct bfa_ioc_s *ioc = iocpf->ioc;
1122
1123 bfa_trc(ioc, event);
1124
1125 switch (event) {
1126 case IOCPF_E_SEMLOCKED:
1127 iocpf->retry_count = 0;
1128 bfa_ioc_sync_ack(ioc);
1129 bfa_ioc_notify_fail(ioc);
1130 if (!iocpf->auto_recover) {
1131 bfa_ioc_sync_leave(ioc);
1132 writel(1, ioc->ioc_regs.ioc_sem_reg);
1133 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1134 } else {
1135 if (bfa_ioc_sync_complete(ioc))
1136 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1137 else {
1138 writel(1, ioc->ioc_regs.ioc_sem_reg);
1139 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1140 }
1141 }
1142 break;
1143
1144 case IOCPF_E_DISABLE:
1145 bfa_sem_timer_stop(ioc);
1146 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1147 break;
1148
1149 case IOCPF_E_FAIL:
1150 break;
1151
1152 default:
1153 bfa_sm_fault(ioc, event);
1154 }
1155}
1156
1157static void
1158bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1159{
1073} 1160}
1074 1161
1075/* 1162/*
@@ -1084,24 +1171,16 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1084 1171
1085 switch (event) { 1172 switch (event) {
1086 case IOCPF_E_DISABLE: 1173 case IOCPF_E_DISABLE:
1087 if (iocpf->auto_recover)
1088 bfa_iocpf_timer_stop(ioc);
1089 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1174 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1090 break; 1175 break;
1091 1176
1092 case IOCPF_E_TIMEOUT:
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1094 break;
1095
1096 default: 1177 default:
1097 bfa_sm_fault(ioc, event); 1178 bfa_sm_fault(ioc, event);
1098 } 1179 }
1099} 1180}
1100 1181
1101
1102
1103/* 1182/*
1104 * hal_ioc_pvt BFA IOC private functions 1183 * BFA IOC private functions
1105 */ 1184 */
1106 1185
1107static void 1186static void
@@ -1139,16 +1218,10 @@ bfa_ioc_sem_get(void __iomem *sem_reg)
1139 if (r32 == 0) 1218 if (r32 == 0)
1140 return BFA_TRUE; 1219 return BFA_TRUE;
1141 1220
1142 bfa_assert(cnt < BFA_SEM_SPINCNT); 1221 WARN_ON(cnt >= BFA_SEM_SPINCNT);
1143 return BFA_FALSE; 1222 return BFA_FALSE;
1144} 1223}
1145 1224
1146void
1147bfa_ioc_sem_release(void __iomem *sem_reg)
1148{
1149 writel(1, sem_reg);
1150}
1151
1152static void 1225static void
1153bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) 1226bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1154{ 1227{
@@ -1167,18 +1240,6 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1167 bfa_sem_timer_start(ioc); 1240 bfa_sem_timer_start(ioc);
1168} 1241}
1169 1242
1170void
1171bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
1172{
1173 writel(1, ioc->ioc_regs.ioc_sem_reg);
1174}
1175
1176static void
1177bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
1178{
1179 bfa_sem_timer_stop(ioc);
1180}
1181
1182/* 1243/*
1183 * Initialize LPU local memory (aka secondary memory / SRAM) 1244 * Initialize LPU local memory (aka secondary memory / SRAM)
1184 */ 1245 */
@@ -1212,7 +1273,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1212 * If memory initialization is not successful, IOC timeout will catch 1273 * If memory initialization is not successful, IOC timeout will catch
1213 * such failures. 1274 * such failures.
1214 */ 1275 */
1215 bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE); 1276 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1216 bfa_trc(ioc, pss_ctl); 1277 bfa_trc(ioc, pss_ctl);
1217 1278
1218 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 1279 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
@@ -1258,8 +1319,8 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1258 int i; 1319 int i;
1259 u32 *fwsig = (u32 *) fwhdr; 1320 u32 *fwsig = (u32 *) fwhdr;
1260 1321
1261 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1322 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1262 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1323 pgoff = PSS_SMEM_PGOFF(loff);
1263 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1324 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1264 1325
1265 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); 1326 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
@@ -1304,12 +1365,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1304{ 1365{
1305 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 1366 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1306 1367
1307 /*
1308 * If bios/efi boot (flash based) -- return true
1309 */
1310 if (bfa_ioc_is_bios_optrom(ioc))
1311 return BFA_TRUE;
1312
1313 bfa_ioc_fwver_get(ioc, &fwhdr); 1368 bfa_ioc_fwver_get(ioc, &fwhdr);
1314 drv_fwhdr = (struct bfi_ioc_image_hdr_s *) 1369 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1315 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1370 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
@@ -1342,7 +1397,6 @@ bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1342 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1397 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1343} 1398}
1344 1399
1345
1346static void 1400static void
1347bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) 1401bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1348{ 1402{
@@ -1362,22 +1416,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1362 boot_env = BFI_BOOT_LOADER_OS; 1416 boot_env = BFI_BOOT_LOADER_OS;
1363 1417
1364 /* 1418 /*
1365 * Flash based firmware boot BIOS env.
1366 */
1367 if (bfa_ioc_is_bios_optrom(ioc)) {
1368 boot_type = BFI_BOOT_TYPE_FLASH;
1369 boot_env = BFI_BOOT_LOADER_BIOS;
1370 }
1371
1372 /*
1373 * Flash based firmware boot UEFI env.
1374 */
1375 if (bfa_ioc_is_uefi(ioc)) {
1376 boot_type = BFI_BOOT_TYPE_FLASH;
1377 boot_env = BFI_BOOT_LOADER_UEFI;
1378 }
1379
1380 /*
1381 * check if firmware is valid 1419 * check if firmware is valid
1382 */ 1420 */
1383 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1421 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
@@ -1405,8 +1443,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1405 * convergence, IOC will be in operational state when 2nd driver 1443 * convergence, IOC will be in operational state when 2nd driver
1406 * is loaded. 1444 * is loaded.
1407 */ 1445 */
1408 if (ioc_fwstate == BFI_IOC_DISABLED || 1446 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1409 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
1410 1447
1411 /* 1448 /*
1412 * When using MSI-X any pending firmware ready event should 1449 * When using MSI-X any pending firmware ready event should
@@ -1442,7 +1479,7 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1442 bfa_trc(ioc, msgp[0]); 1479 bfa_trc(ioc, msgp[0]);
1443 bfa_trc(ioc, len); 1480 bfa_trc(ioc, len);
1444 1481
1445 bfa_assert(len <= BFI_IOC_MSGLEN_MAX); 1482 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1446 1483
1447 /* 1484 /*
1448 * first write msg to mailbox registers 1485 * first write msg to mailbox registers
@@ -1465,12 +1502,12 @@ static void
1465bfa_ioc_send_enable(struct bfa_ioc_s *ioc) 1502bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1466{ 1503{
1467 struct bfi_ioc_ctrl_req_s enable_req; 1504 struct bfi_ioc_ctrl_req_s enable_req;
1468 struct bfa_timeval_s tv; 1505 struct timeval tv;
1469 1506
1470 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1507 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1471 bfa_ioc_portid(ioc)); 1508 bfa_ioc_portid(ioc));
1472 enable_req.ioc_class = ioc->ioc_mc; 1509 enable_req.ioc_class = ioc->ioc_mc;
1473 bfa_os_gettimeofday(&tv); 1510 do_gettimeofday(&tv);
1474 enable_req.tv_sec = be32_to_cpu(tv.tv_sec); 1511 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1475 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1512 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1476} 1513}
@@ -1504,7 +1541,6 @@ bfa_ioc_hb_check(void *cbarg)
1504 1541
1505 hb_count = readl(ioc->ioc_regs.heartbeat); 1542 hb_count = readl(ioc->ioc_regs.heartbeat);
1506 if (ioc->hb_count == hb_count) { 1543 if (ioc->hb_count == hb_count) {
1507 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
1508 bfa_ioc_recover(ioc); 1544 bfa_ioc_recover(ioc);
1509 return; 1545 return;
1510 } else { 1546 } else {
@@ -1522,13 +1558,6 @@ bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1522 bfa_hb_timer_start(ioc); 1558 bfa_hb_timer_start(ioc);
1523} 1559}
1524 1560
1525static void
1526bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1527{
1528 bfa_hb_timer_stop(ioc);
1529}
1530
1531
1532/* 1561/*
1533 * Initiate a full firmware download. 1562 * Initiate a full firmware download.
1534 */ 1563 */
@@ -1550,8 +1579,8 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1550 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); 1579 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1551 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); 1580 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1552 1581
1553 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1582 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1554 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1583 pgoff = PSS_SMEM_PGOFF(loff);
1555 1584
1556 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1585 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1557 1586
@@ -1581,7 +1610,8 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1581 } 1610 }
1582 } 1611 }
1583 1612
1584 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); 1613 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1614 ioc->ioc_regs.host_page_num_fn);
1585 1615
1586 /* 1616 /*
1587 * Set boot type and boot param at the end. 1617 * Set boot type and boot param at the end.
@@ -1592,11 +1622,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1592 swab32(boot_env)); 1622 swab32(boot_env));
1593} 1623}
1594 1624
1595static void
1596bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1597{
1598 bfa_ioc_hwinit(ioc, force);
1599}
1600 1625
1601/* 1626/*
1602 * Update BFA configuration from firmware configuration. 1627 * Update BFA configuration from firmware configuration.
@@ -1683,12 +1708,13 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1683static bfa_status_t 1708static bfa_status_t
1684bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) 1709bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1685{ 1710{
1686 u32 pgnum, loff, r32; 1711 u32 pgnum, loff;
1712 __be32 r32;
1687 int i, len; 1713 int i, len;
1688 u32 *buf = tbuf; 1714 u32 *buf = tbuf;
1689 1715
1690 pgnum = bfa_ioc_smem_pgnum(ioc, soff); 1716 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1691 loff = bfa_ioc_smem_pgoff(ioc, soff); 1717 loff = PSS_SMEM_PGOFF(soff);
1692 bfa_trc(ioc, pgnum); 1718 bfa_trc(ioc, pgnum);
1693 bfa_trc(ioc, loff); 1719 bfa_trc(ioc, loff);
1694 bfa_trc(ioc, sz); 1720 bfa_trc(ioc, sz);
@@ -1719,11 +1745,12 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1719 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1745 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1720 } 1746 }
1721 } 1747 }
1722 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); 1748 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1749 ioc->ioc_regs.host_page_num_fn);
1723 /* 1750 /*
1724 * release semaphore. 1751 * release semaphore.
1725 */ 1752 */
1726 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 1753 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1727 1754
1728 bfa_trc(ioc, pgnum); 1755 bfa_trc(ioc, pgnum);
1729 return BFA_STATUS_OK; 1756 return BFA_STATUS_OK;
@@ -1742,8 +1769,8 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1742 int i, len; 1769 int i, len;
1743 u32 pgnum, loff; 1770 u32 pgnum, loff;
1744 1771
1745 pgnum = bfa_ioc_smem_pgnum(ioc, soff); 1772 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1746 loff = bfa_ioc_smem_pgoff(ioc, soff); 1773 loff = PSS_SMEM_PGOFF(soff);
1747 bfa_trc(ioc, pgnum); 1774 bfa_trc(ioc, pgnum);
1748 bfa_trc(ioc, loff); 1775 bfa_trc(ioc, loff);
1749 bfa_trc(ioc, sz); 1776 bfa_trc(ioc, sz);
@@ -1773,35 +1800,38 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1773 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1800 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1774 } 1801 }
1775 } 1802 }
1776 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); 1803 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1804 ioc->ioc_regs.host_page_num_fn);
1777 1805
1778 /* 1806 /*
1779 * release semaphore. 1807 * release semaphore.
1780 */ 1808 */
1781 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 1809 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1782 bfa_trc(ioc, pgnum); 1810 bfa_trc(ioc, pgnum);
1783 return BFA_STATUS_OK; 1811 return BFA_STATUS_OK;
1784} 1812}
1785 1813
1786/*
1787 * hal iocpf to ioc interface
1788 */
1789static void 1814static void
1790bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc) 1815bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1791{ 1816{
1792 bfa_fsm_send_event(ioc, IOC_E_ENABLED); 1817 struct list_head *qe;
1793} 1818 struct bfa_ioc_hbfail_notify_s *notify;
1819 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1794 1820
1795static void 1821 /*
1796bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc) 1822 * Notify driver and common modules registered for notification.
1797{ 1823 */
1798 bfa_fsm_send_event(ioc, IOC_E_DISABLED); 1824 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1799} 1825 list_for_each(qe, &ioc->hb_notify_q) {
1826 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1827 notify->cbfn(notify->cbarg);
1828 }
1829
1830 bfa_ioc_debug_save_ftrc(ioc);
1831
1832 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1833 "Heart Beat of IOC has failed\n");
1800 1834
1801static void
1802bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
1803{
1804 bfa_fsm_send_event(ioc, IOC_E_FAILED);
1805} 1835}
1806 1836
1807static void 1837static void
@@ -1817,12 +1847,6 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1817 "with the driver version\n"); 1847 "with the driver version\n");
1818} 1848}
1819 1849
1820
1821
1822/*
1823 * hal_ioc_public
1824 */
1825
1826bfa_status_t 1850bfa_status_t
1827bfa_ioc_pll_init(struct bfa_ioc_s *ioc) 1851bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1828{ 1852{
@@ -1838,7 +1862,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1838 /* 1862 /*
1839 * release semaphore. 1863 * release semaphore.
1840 */ 1864 */
1841 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 1865 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1842 1866
1843 return BFA_STATUS_OK; 1867 return BFA_STATUS_OK;
1844} 1868}
@@ -1909,7 +1933,7 @@ bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1909void 1933void
1910bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) 1934bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1911{ 1935{
1912 u32 *msgp = mbmsg; 1936 __be32 *msgp = mbmsg;
1913 u32 r32; 1937 u32 r32;
1914 int i; 1938 int i;
1915 1939
@@ -1962,7 +1986,7 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1962 1986
1963 default: 1987 default:
1964 bfa_trc(ioc, msg->mh.msg_id); 1988 bfa_trc(ioc, msg->mh.msg_id);
1965 bfa_assert(0); 1989 WARN_ON(1);
1966 } 1990 }
1967} 1991}
1968 1992
@@ -2043,15 +2067,6 @@ bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2043 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; 2067 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2044} 2068}
2045 2069
2046/*
2047 * Return size of dma memory required.
2048 */
2049u32
2050bfa_ioc_meminfo(void)
2051{
2052 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
2053}
2054
2055void 2070void
2056bfa_ioc_enable(struct bfa_ioc_s *ioc) 2071bfa_ioc_enable(struct bfa_ioc_s *ioc)
2057{ 2072{
@@ -2068,18 +2083,6 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
2068 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2083 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2069} 2084}
2070 2085
2071/*
2072 * Returns memory required for saving firmware trace in case of crash.
2073 * Driver must call this interface to allocate memory required for
2074 * automatic saving of firmware trace. Driver should call
2075 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
2076 * trace memory.
2077 */
2078int
2079bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
2080{
2081 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2082}
2083 2086
2084/* 2087/*
2085 * Initialize memory for saving firmware trace. Driver must initialize 2088 * Initialize memory for saving firmware trace. Driver must initialize
@@ -2089,19 +2092,7 @@ void
2089bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) 2092bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2090{ 2093{
2091 ioc->dbg_fwsave = dbg_fwsave; 2094 ioc->dbg_fwsave = dbg_fwsave;
2092 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover); 2095 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2093}
2094
2095u32
2096bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
2097{
2098 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2099}
2100
2101u32
2102bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
2103{
2104 return PSS_SMEM_PGOFF(fmaddr);
2105} 2096}
2106 2097
2107/* 2098/*
@@ -2265,14 +2256,13 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2265} 2256}
2266 2257
2267/* 2258/*
2268 * Add to IOC heartbeat failure notification queue. To be used by common 2259 * Reset IOC fwstate registers.
2269 * modules such as cee, port, diag.
2270 */ 2260 */
2271void 2261void
2272bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 2262bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2273 struct bfa_ioc_hbfail_notify_s *notify)
2274{ 2263{
2275 list_add_tail(&notify->qe, &ioc->hb_notify_q); 2264 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2265 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2276} 2266}
2277 2267
2278#define BFA_MFG_NAME "Brocade" 2268#define BFA_MFG_NAME "Brocade"
@@ -2306,7 +2296,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2306 else 2296 else
2307 ad_attr->prototype = 0; 2297 ad_attr->prototype = 0;
2308 2298
2309 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 2299 ad_attr->pwwn = ioc->attr->pwwn;
2310 ad_attr->mac = bfa_ioc_get_mac(ioc); 2300 ad_attr->mac = bfa_ioc_get_mac(ioc);
2311 2301
2312 ad_attr->pcie_gen = ioc_attr->pcie_gen; 2302 ad_attr->pcie_gen = ioc_attr->pcie_gen;
@@ -2317,7 +2307,8 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2317 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2307 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2318 2308
2319 ad_attr->cna_capable = ioc->cna; 2309 ad_attr->cna_capable = ioc->cna;
2320 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna; 2310 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
2311 !ad_attr->is_mezz;
2321} 2312}
2322 2313
2323enum bfa_ioc_type_e 2314enum bfa_ioc_type_e
@@ -2330,7 +2321,7 @@ bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2330 else if (ioc->ioc_mc == BFI_MC_LL) 2321 else if (ioc->ioc_mc == BFI_MC_LL)
2331 return BFA_IOC_TYPE_LL; 2322 return BFA_IOC_TYPE_LL;
2332 else { 2323 else {
2333 bfa_assert(ioc->ioc_mc == BFI_MC_LL); 2324 WARN_ON(ioc->ioc_mc != BFI_MC_LL);
2334 return BFA_IOC_TYPE_LL; 2325 return BFA_IOC_TYPE_LL;
2335 } 2326 }
2336} 2327}
@@ -2354,7 +2345,7 @@ bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2354void 2345void
2355bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) 2346bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2356{ 2347{
2357 bfa_assert(chip_rev); 2348 WARN_ON(!chip_rev);
2358 2349
2359 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2350 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2360 2351
@@ -2386,7 +2377,7 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2386{ 2377{
2387 struct bfi_ioc_attr_s *ioc_attr; 2378 struct bfi_ioc_attr_s *ioc_attr;
2388 2379
2389 bfa_assert(model); 2380 WARN_ON(!model);
2390 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2381 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2391 2382
2392 ioc_attr = ioc->attr; 2383 ioc_attr = ioc->attr;
@@ -2455,27 +2446,6 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2455 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2446 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2456} 2447}
2457 2448
2458/*
2459 * hal_wwn_public
2460 */
2461wwn_t
2462bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
2463{
2464 return ioc->attr->pwwn;
2465}
2466
2467wwn_t
2468bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
2469{
2470 return ioc->attr->nwwn;
2471}
2472
2473u64
2474bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
2475{
2476 return ioc->attr->mfg_pwwn;
2477}
2478
2479mac_t 2449mac_t
2480bfa_ioc_get_mac(struct bfa_ioc_s *ioc) 2450bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2481{ 2451{
@@ -2488,18 +2458,6 @@ bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2488 return ioc->attr->mac; 2458 return ioc->attr->mac;
2489} 2459}
2490 2460
2491wwn_t
2492bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
2493{
2494 return ioc->attr->mfg_pwwn;
2495}
2496
2497wwn_t
2498bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
2499{
2500 return ioc->attr->mfg_nwwn;
2501}
2502
2503mac_t 2461mac_t
2504bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) 2462bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2505{ 2463{
@@ -2541,14 +2499,6 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2541 return BFA_STATUS_OK; 2499 return BFA_STATUS_OK;
2542} 2500}
2543 2501
2544/*
2545 * Clear saved firmware trace
2546 */
2547void
2548bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
2549{
2550 ioc->dbg_fwsave_once = BFA_TRUE;
2551}
2552 2502
2553/* 2503/*
2554 * Retrieve saved firmware trace from a prior IOC failure. 2504 * Retrieve saved firmware trace from a prior IOC failure.
@@ -2701,13 +2651,16 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2701 * Save firmware trace if configured. 2651 * Save firmware trace if configured.
2702 */ 2652 */
2703static void 2653static void
2704bfa_ioc_debug_save(struct bfa_ioc_s *ioc) 2654bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2705{ 2655{
2706 int tlen; 2656 int tlen;
2707 2657
2708 if (ioc->dbg_fwsave_len) { 2658 if (ioc->dbg_fwsave_once) {
2709 tlen = ioc->dbg_fwsave_len; 2659 ioc->dbg_fwsave_once = BFA_FALSE;
2710 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); 2660 if (ioc->dbg_fwsave_len) {
2661 tlen = ioc->dbg_fwsave_len;
2662 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2663 }
2711 } 2664 }
2712} 2665}
2713 2666
@@ -2717,11 +2670,6 @@ bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2717static void 2670static void
2718bfa_ioc_recover(struct bfa_ioc_s *ioc) 2671bfa_ioc_recover(struct bfa_ioc_s *ioc)
2719{ 2672{
2720 if (ioc->dbg_fwsave_once) {
2721 ioc->dbg_fwsave_once = BFA_FALSE;
2722 bfa_ioc_debug_save(ioc);
2723 }
2724
2725 bfa_ioc_stats(ioc, ioc_hbfails); 2673 bfa_ioc_stats(ioc, ioc_hbfails);
2726 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2674 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2727} 2675}
@@ -2734,45 +2682,8 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2734} 2682}
2735 2683
2736/* 2684/*
2737 * hal_iocpf_pvt BFA IOC PF private functions 2685 * BFA IOC PF private functions
2738 */ 2686 */
2739
2740static void
2741bfa_iocpf_enable(struct bfa_ioc_s *ioc)
2742{
2743 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2744}
2745
2746static void
2747bfa_iocpf_disable(struct bfa_ioc_s *ioc)
2748{
2749 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2750}
2751
2752static void
2753bfa_iocpf_fail(struct bfa_ioc_s *ioc)
2754{
2755 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2756}
2757
2758static void
2759bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
2760{
2761 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2762}
2763
2764static void
2765bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
2766{
2767 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2768}
2769
2770static void
2771bfa_iocpf_stop(struct bfa_ioc_s *ioc)
2772{
2773 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2774}
2775
2776static void 2687static void
2777bfa_iocpf_timeout(void *ioc_arg) 2688bfa_iocpf_timeout(void *ioc_arg)
2778{ 2689{
@@ -2794,12 +2705,6 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
2794 * bfa timer function 2705 * bfa timer function
2795 */ 2706 */
2796void 2707void
2797bfa_timer_init(struct bfa_timer_mod_s *mod)
2798{
2799 INIT_LIST_HEAD(&mod->timer_q);
2800}
2801
2802void
2803bfa_timer_beat(struct bfa_timer_mod_s *mod) 2708bfa_timer_beat(struct bfa_timer_mod_s *mod)
2804{ 2709{
2805 struct list_head *qh = &mod->timer_q; 2710 struct list_head *qh = &mod->timer_q;
@@ -2843,8 +2748,8 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2843 void (*timercb) (void *), void *arg, unsigned int timeout) 2748 void (*timercb) (void *), void *arg, unsigned int timeout)
2844{ 2749{
2845 2750
2846 bfa_assert(timercb != NULL); 2751 WARN_ON(timercb == NULL);
2847 bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer)); 2752 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2848 2753
2849 timer->timeout = timeout; 2754 timer->timeout = timeout;
2850 timer->timercb = timercb; 2755 timer->timercb = timercb;
@@ -2859,7 +2764,7 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2859void 2764void
2860bfa_timer_stop(struct bfa_timer_s *timer) 2765bfa_timer_stop(struct bfa_timer_s *timer)
2861{ 2766{
2862 bfa_assert(!list_empty(&timer->qe)); 2767 WARN_ON(list_empty(&timer->qe));
2863 2768
2864 list_del(&timer->qe); 2769 list_del(&timer->qe);
2865} 2770}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 9c407a87a1a1..ec9cf08b0e7f 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -18,10 +18,15 @@
18#ifndef __BFA_IOC_H__ 18#ifndef __BFA_IOC_H__
19#define __BFA_IOC_H__ 19#define __BFA_IOC_H__
20 20
21#include "bfa_os_inc.h" 21#include "bfad_drv.h"
22#include "bfa_cs.h" 22#include "bfa_cs.h"
23#include "bfi.h" 23#include "bfi.h"
24 24
25#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
26#define BFA_DBG_FWTRC_LEN \
27 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
28 (sizeof(struct bfa_trc_mod_s) - \
29 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
25/* 30/*
26 * BFA timer declarations 31 * BFA timer declarations
27 */ 32 */
@@ -47,7 +52,6 @@ struct bfa_timer_mod_s {
47#define BFA_TIMER_FREQ 200 /* specified in millisecs */ 52#define BFA_TIMER_FREQ 200 /* specified in millisecs */
48 53
49void bfa_timer_beat(struct bfa_timer_mod_s *mod); 54void bfa_timer_beat(struct bfa_timer_mod_s *mod);
50void bfa_timer_init(struct bfa_timer_mod_s *mod);
51void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, 55void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
52 bfa_timer_cbfn_t timercb, void *arg, 56 bfa_timer_cbfn_t timercb, void *arg,
53 unsigned int timeout); 57 unsigned int timeout);
@@ -70,7 +74,7 @@ struct bfa_sge_s {
70#define bfa_swap_words(_x) ( \ 74#define bfa_swap_words(_x) ( \
71 ((_x) << 32) | ((_x) >> 32)) 75 ((_x) << 32) | ((_x) >> 32))
72 76
73#ifdef __BIGENDIAN 77#ifdef __BIG_ENDIAN
74#define bfa_sge_to_be(_x) 78#define bfa_sge_to_be(_x)
75#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x) 79#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x)
76#define bfa_sgaddr_le(_x) bfa_swap_words(_x) 80#define bfa_sgaddr_le(_x) bfa_swap_words(_x)
@@ -115,8 +119,8 @@ struct bfa_dma_s {
115static inline void 119static inline void
116__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) 120__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
117{ 121{
118 dma_addr->a32.addr_lo = (u32) pa; 122 dma_addr->a32.addr_lo = (__be32) pa;
119 dma_addr->a32.addr_hi = (u32) (bfa_os_u32(pa)); 123 dma_addr->a32.addr_hi = (__be32) (pa >> 32);
120} 124}
121 125
122 126
@@ -125,8 +129,8 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
125static inline void 129static inline void
126__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) 130__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
127{ 131{
128 dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa); 132 dma_addr->a32.addr_lo = cpu_to_be32(pa);
129 dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa)); 133 dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
130} 134}
131 135
132struct bfa_ioc_regs_s { 136struct bfa_ioc_regs_s {
@@ -145,8 +149,11 @@ struct bfa_ioc_regs_s {
145 void __iomem *host_page_num_fn; 149 void __iomem *host_page_num_fn;
146 void __iomem *heartbeat; 150 void __iomem *heartbeat;
147 void __iomem *ioc_fwstate; 151 void __iomem *ioc_fwstate;
152 void __iomem *alt_ioc_fwstate;
148 void __iomem *ll_halt; 153 void __iomem *ll_halt;
154 void __iomem *alt_ll_halt;
149 void __iomem *err_set; 155 void __iomem *err_set;
156 void __iomem *ioc_fail_sync;
150 void __iomem *shirq_isr_next; 157 void __iomem *shirq_isr_next;
151 void __iomem *shirq_msk_next; 158 void __iomem *shirq_msk_next;
152 void __iomem *smem_page_start; 159 void __iomem *smem_page_start;
@@ -254,8 +261,12 @@ struct bfa_ioc_hwif_s {
254 void (*ioc_map_port) (struct bfa_ioc_s *ioc); 261 void (*ioc_map_port) (struct bfa_ioc_s *ioc);
255 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, 262 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
256 bfa_boolean_t msix); 263 bfa_boolean_t msix);
257 void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); 264 void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
258 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); 265 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
266 void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
267 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
268 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
269 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
259}; 270};
260 271
261#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 272#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -325,7 +336,6 @@ void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
325void bfa_ioc_detach(struct bfa_ioc_s *ioc); 336void bfa_ioc_detach(struct bfa_ioc_s *ioc);
326void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 337void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
327 enum bfi_mclass mc); 338 enum bfi_mclass mc);
328u32 bfa_ioc_meminfo(void);
329void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa); 339void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
330void bfa_ioc_enable(struct bfa_ioc_s *ioc); 340void bfa_ioc_enable(struct bfa_ioc_s *ioc);
331void bfa_ioc_disable(struct bfa_ioc_s *ioc); 341void bfa_ioc_disable(struct bfa_ioc_s *ioc);
@@ -340,6 +350,7 @@ bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
340bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); 350bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
341bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); 351bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
342bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); 352bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
353void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
343enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); 354enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
344void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); 355void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
345void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); 356void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
@@ -353,24 +364,16 @@ enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
353void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr); 364void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
354void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, 365void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
355 struct bfa_adapter_attr_s *ad_attr); 366 struct bfa_adapter_attr_s *ad_attr);
356int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
357void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave); 367void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
358bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, 368bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
359 int *trclen); 369 int *trclen);
360void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
361bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, 370bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
362 int *trclen); 371 int *trclen);
363bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, 372bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
364 u32 *offset, int *buflen); 373 u32 *offset, int *buflen);
365u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
366u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
367void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); 374void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
368bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); 375bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
369void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
370 struct bfa_ioc_hbfail_notify_s *notify);
371bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); 376bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
372void bfa_ioc_sem_release(void __iomem *sem_reg);
373void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
374void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, 377void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
375 struct bfi_ioc_image_hdr_s *fwhdr); 378 struct bfi_ioc_image_hdr_s *fwhdr);
376bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, 379bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
@@ -381,13 +384,8 @@ bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
381/* 384/*
382 * bfa mfg wwn API functions 385 * bfa mfg wwn API functions
383 */ 386 */
384wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc);
385wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc);
386mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc); 387mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
387wwn_t bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc);
388wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
389mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc); 388mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
390u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
391 389
392/* 390/*
393 * F/W Image Size & Chunk 391 * F/W Image Size & Chunk
@@ -421,7 +419,7 @@ bfa_cb_image_get_chunk(int type, u32 off)
421 return bfi_image_ct_cna_get_chunk(off); break; 419 return bfi_image_ct_cna_get_chunk(off); break;
422 case BFI_IMAGE_CB_FC: 420 case BFI_IMAGE_CB_FC:
423 return bfi_image_cb_fc_get_chunk(off); break; 421 return bfi_image_cb_fc_get_chunk(off); break;
424 default: return 0; 422 default: return NULL;
425 } 423 }
426} 424}
427 425
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 909945043850..e4a0713185b6 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -15,6 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_ioc.h" 19#include "bfa_ioc.h"
19#include "bfi_cbreg.h" 20#include "bfi_cbreg.h"
20#include "bfa_defs.h" 21#include "bfa_defs.h"
@@ -29,10 +30,14 @@ static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
29static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); 30static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
30static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); 31static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
31static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
32static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc); 33static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
33static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); 34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
36static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
37static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
38static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
34 39
35struct bfa_ioc_hwif_s hwif_cb; 40static struct bfa_ioc_hwif_s hwif_cb;
36 41
37/* 42/*
38 * Called from bfa_ioc_attach() to map asic specific calls. 43 * Called from bfa_ioc_attach() to map asic specific calls.
@@ -46,8 +51,12 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
46 hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init; 51 hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
47 hwif_cb.ioc_map_port = bfa_ioc_cb_map_port; 52 hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
48 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; 53 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
49 hwif_cb.ioc_notify_hbfail = bfa_ioc_cb_notify_hbfail; 54 hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
50 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; 55 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
56 hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
57 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
58 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
59 hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
51 60
52 ioc->ioc_hwif = &hwif_cb; 61 ioc->ioc_hwif = &hwif_cb;
53} 62}
@@ -58,6 +67,21 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
58static bfa_boolean_t 67static bfa_boolean_t
59bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) 68bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
60{ 69{
70 struct bfi_ioc_image_hdr_s fwhdr;
71 uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
72
73 if (fwstate == BFI_IOC_UNINIT)
74 return BFA_TRUE;
75
76 bfa_ioc_fwver_get(ioc, &fwhdr);
77
78 if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
79 return BFA_TRUE;
80
81 bfa_trc(ioc, fwstate);
82 bfa_trc(ioc, fwhdr.exec);
83 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
84
61 return BFA_TRUE; 85 return BFA_TRUE;
62} 86}
63 87
@@ -70,7 +94,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
70 * Notify other functions on HB failure. 94 * Notify other functions on HB failure.
71 */ 95 */
72static void 96static void
73bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) 97bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
74{ 98{
75 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 99 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
76 readl(ioc->ioc_regs.err_set); 100 readl(ioc->ioc_regs.err_set);
@@ -108,9 +132,11 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
108 if (ioc->port_id == 0) { 132 if (ioc->port_id == 0) {
109 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 133 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
110 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 134 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
135 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
111 } else { 136 } else {
112 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 137 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
113 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 138 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
139 ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
114 } 140 }
115 141
116 /* 142 /*
@@ -181,10 +207,71 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
181 * will lock it instead of clearing it. 207 * will lock it instead of clearing it.
182 */ 208 */
183 readl(ioc->ioc_regs.ioc_sem_reg); 209 readl(ioc->ioc_regs.ioc_sem_reg);
184 bfa_ioc_hw_sem_release(ioc); 210 writel(1, ioc->ioc_regs.ioc_sem_reg);
185} 211}
186 212
213/*
214 * Synchronized IOC failure processing routines
215 */
216static void
217bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
218{
219}
187 220
221static void
222bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
223{
224}
225
226static void
227bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
228{
229 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
230}
231
232static bfa_boolean_t
233bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
234{
235 uint32_t fwstate, alt_fwstate;
236 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
237
238 /*
239 * At this point, this IOC is hoding the hw sem in the
240 * start path (fwcheck) OR in the disable/enable path
241 * OR to check if the other IOC has acknowledged failure.
242 *
243 * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
244 * or in MEMTEST states. In a normal scenario, this IOC
245 * can not be in OP state when this function is called.
246 *
247 * However, this IOC could still be in OP state when
248 * the OS driver is starting up, if the OptROM code has
249 * left it in that state.
250 *
251 * If we had marked this IOC's fwstate as BFI_IOC_FAIL
252 * in the failure case and now, if the fwstate is not
253 * BFI_IOC_FAIL it implies that the other PCI fn have
254 * reinitialized the ASIC or this IOC got disabled, so
255 * return TRUE.
256 */
257 if (fwstate == BFI_IOC_UNINIT ||
258 fwstate == BFI_IOC_INITING ||
259 fwstate == BFI_IOC_DISABLED ||
260 fwstate == BFI_IOC_MEMTEST ||
261 fwstate == BFI_IOC_OP)
262 return BFA_TRUE;
263 else {
264 alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate);
265 if (alt_fwstate == BFI_IOC_FAIL ||
266 alt_fwstate == BFI_IOC_DISABLED ||
267 alt_fwstate == BFI_IOC_UNINIT ||
268 alt_fwstate == BFI_IOC_INITING ||
269 alt_fwstate == BFI_IOC_MEMTEST)
270 return BFA_TRUE;
271 else
272 return BFA_FALSE;
273 }
274}
188 275
189bfa_status_t 276bfa_status_t
190bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) 277bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 115730c0aa77..008d129ddfcd 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -15,12 +15,22 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_ioc.h" 19#include "bfa_ioc.h"
19#include "bfi_ctreg.h" 20#include "bfi_ctreg.h"
20#include "bfa_defs.h" 21#include "bfa_defs.h"
21 22
22BFA_TRC_FILE(CNA, IOC_CT); 23BFA_TRC_FILE(CNA, IOC_CT);
23 24
25#define bfa_ioc_ct_sync_pos(__ioc) \
26 ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
27#define BFA_IOC_SYNC_REQD_SH 16
28#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
33
24/* 34/*
25 * forward declarations 35 * forward declarations
26 */ 36 */
@@ -29,10 +39,14 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
29static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); 39static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
30static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); 40static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
31static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
32static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc); 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
33static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
45static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
46static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
47static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
34 48
35struct bfa_ioc_hwif_s hwif_ct; 49static struct bfa_ioc_hwif_s hwif_ct;
36 50
37/* 51/*
38 * Called from bfa_ioc_attach() to map asic specific calls. 52 * Called from bfa_ioc_attach() to map asic specific calls.
@@ -46,8 +60,12 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
46 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 60 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
47 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 61 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
48 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 62 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
49 hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; 63 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
50 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 64 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
65 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
66 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
67 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
68 hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
51 69
52 ioc->ioc_hwif = &hwif_ct; 70 ioc->ioc_hwif = &hwif_ct;
53} 71}
@@ -83,7 +101,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
83 */ 101 */
84 if (usecnt == 0) { 102 if (usecnt == 0) {
85 writel(1, ioc->ioc_regs.ioc_usage_reg); 103 writel(1, ioc->ioc_regs.ioc_usage_reg);
86 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 104 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
105 writel(0, ioc->ioc_regs.ioc_fail_sync);
87 bfa_trc(ioc, usecnt); 106 bfa_trc(ioc, usecnt);
88 return BFA_TRUE; 107 return BFA_TRUE;
89 } 108 }
@@ -94,14 +113,14 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
94 /* 113 /*
95 * Use count cannot be non-zero and chip in uninitialized state. 114 * Use count cannot be non-zero and chip in uninitialized state.
96 */ 115 */
97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); 116 WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
98 117
99 /* 118 /*
100 * Check if another driver with a different firmware is active 119 * Check if another driver with a different firmware is active
101 */ 120 */
102 bfa_ioc_fwver_get(ioc, &fwhdr); 121 bfa_ioc_fwver_get(ioc, &fwhdr);
103 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 122 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
104 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 123 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
105 bfa_trc(ioc, usecnt); 124 bfa_trc(ioc, usecnt);
106 return BFA_FALSE; 125 return BFA_FALSE;
107 } 126 }
@@ -111,7 +130,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
111 */ 130 */
112 usecnt++; 131 usecnt++;
113 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 132 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
114 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 133 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
115 bfa_trc(ioc, usecnt); 134 bfa_trc(ioc, usecnt);
116 return BFA_TRUE; 135 return BFA_TRUE;
117} 136}
@@ -139,25 +158,27 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
139 */ 158 */
140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 159 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
141 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 160 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
142 bfa_assert(usecnt > 0); 161 WARN_ON(usecnt <= 0);
143 162
144 usecnt--; 163 usecnt--;
145 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 164 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
146 bfa_trc(ioc, usecnt); 165 bfa_trc(ioc, usecnt);
147 166
148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 167 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
149} 168}
150 169
151/* 170/*
152 * Notify other functions on HB failure. 171 * Notify other functions on HB failure.
153 */ 172 */
154static void 173static void
155bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) 174bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
156{ 175{
157 if (ioc->cna) { 176 if (ioc->cna) {
158 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 177 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
178 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
159 /* Wait for halt to take effect */ 179 /* Wait for halt to take effect */
160 readl(ioc->ioc_regs.ll_halt); 180 readl(ioc->ioc_regs.ll_halt);
181 readl(ioc->ioc_regs.alt_ll_halt);
161 } else { 182 } else {
162 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 183 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
163 readl(ioc->ioc_regs.err_set); 184 readl(ioc->ioc_regs.err_set);
@@ -209,15 +230,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
209 if (ioc->port_id == 0) { 230 if (ioc->port_id == 0) {
210 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 231 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
211 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 232 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
233 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
212 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; 234 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
213 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; 235 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
214 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 236 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
237 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
215 } else { 238 } else {
216 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 239 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
217 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 240 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
241 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
218 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; 242 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
219 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; 243 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
220 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 244 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
245 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
221 } 246 }
222 247
223 /* 248 /*
@@ -235,6 +260,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
235 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 260 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 261 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 262 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
263 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
238 264
239 /* 265 /*
240 * sram memory access 266 * sram memory access
@@ -313,7 +339,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
313 if (ioc->cna) { 339 if (ioc->cna) {
314 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 340 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
315 writel(0, ioc->ioc_regs.ioc_usage_reg); 341 writel(0, ioc->ioc_regs.ioc_usage_reg);
316 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 342 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
317 } 343 }
318 344
319 /* 345 /*
@@ -322,10 +348,80 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
322 * will lock it instead of clearing it. 348 * will lock it instead of clearing it.
323 */ 349 */
324 readl(ioc->ioc_regs.ioc_sem_reg); 350 readl(ioc->ioc_regs.ioc_sem_reg);
325 bfa_ioc_hw_sem_release(ioc); 351 writel(1, ioc->ioc_regs.ioc_sem_reg);
352}
353
354/*
355 * Synchronized IOC failure processing routines
356 */
357static void
358bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
359{
360 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
361 uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
362
363 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
364}
365
366static void
367bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
368{
369 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
370 uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
371 bfa_ioc_ct_sync_pos(ioc);
372
373 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
374}
375
376static void
377bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
378{
379 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
380
381 writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
382 ioc->ioc_regs.ioc_fail_sync);
326} 383}
327 384
385static bfa_boolean_t
386bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
387{
388 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
389 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
390 uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
391 uint32_t tmp_ackd;
392
393 if (sync_ackd == 0)
394 return BFA_TRUE;
395
396 /*
397 * The check below is to see whether any other PCI fn
398 * has reinitialized the ASIC (reset sync_ackd bits)
399 * and failed again while this IOC was waiting for hw
400 * semaphore (in bfa_iocpf_sm_semwait()).
401 */
402 tmp_ackd = sync_ackd;
403 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
404 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
405 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
406
407 if (sync_reqd == sync_ackd) {
408 writel(bfa_ioc_ct_clear_sync_ackd(r32),
409 ioc->ioc_regs.ioc_fail_sync);
410 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
411 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
412 return BFA_TRUE;
413 }
414
415 /*
416 * If another PCI fn reinitialized and failed again while
417 * this IOC was waiting for hw sem, the sync_ackd bit for
418 * this IOC need to be set again to allow reinitialization.
419 */
420 if (tmp_ackd != sync_ackd)
421 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
328 422
423 return BFA_FALSE;
424}
329 425
330/* 426/*
331 * Check the firmware state to know if pll_init has been completed already 427 * Check the firmware state to know if pll_init has been completed already
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 15407ab39e77..ab79ff6fdeea 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -99,7 +99,6 @@ struct bfa_module_s {
99 void (*iocdisable) (struct bfa_s *bfa); 99 void (*iocdisable) (struct bfa_s *bfa);
100}; 100};
101 101
102extern struct bfa_module_s *hal_mods[];
103 102
104struct bfa_s { 103struct bfa_s {
105 void *bfad; /* BFA driver instance */ 104 void *bfad; /* BFA driver instance */
@@ -116,8 +115,6 @@ struct bfa_s {
116 struct bfa_msix_s msix; 115 struct bfa_msix_s msix;
117}; 116};
118 117
119extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
120extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[];
121extern bfa_boolean_t bfa_auto_recover; 118extern bfa_boolean_t bfa_auto_recover;
122extern struct bfa_module_s hal_mod_sgpg; 119extern struct bfa_module_s hal_mod_sgpg;
123extern struct bfa_module_s hal_mod_fcport; 120extern struct bfa_module_s hal_mod_fcport;
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
deleted file mode 100644
index 65df62ef437f..000000000000
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ /dev/null
@@ -1,143 +0,0 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_OS_INC_H__
19#define __BFA_OS_INC_H__
20
21#include <linux/types.h>
22#include <linux/version.h>
23#include <linux/pci.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/cdev.h>
28#include <linux/fs.h>
29#include <linux/delay.h>
30#include <linux/vmalloc.h>
31#include <linux/workqueue.h>
32#include <linux/bitops.h>
33#include <scsi/scsi.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/scsi_transport.h>
38
39#ifdef __BIG_ENDIAN
40#define __BIGENDIAN
41#endif
42
43static inline u64 bfa_os_get_log_time(void)
44{
45 u64 system_time = 0;
46 struct timeval tv;
47 do_gettimeofday(&tv);
48
49 /* We are interested in seconds only. */
50 system_time = tv.tv_sec;
51 return system_time;
52}
53
54#define bfa_io_lat_clock_res_div HZ
55#define bfa_io_lat_clock_res_mul 1000
56
57#define BFA_LOG(level, bfad, mask, fmt, arg...) \
58do { \
59 if (((mask) == 4) || (level[1] <= '4')) \
60 dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \
61} while (0)
62
63#define bfa_swap_3b(_x) \
64 ((((_x) & 0xff) << 16) | \
65 ((_x) & 0x00ff00) | \
66 (((_x) & 0xff0000) >> 16))
67
68#define bfa_os_swap_sgaddr(_x) ((u64)( \
69 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
70 (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
71 (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
72 (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
73 (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
74 (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
75 (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
76 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
77
78#ifndef __BIGENDIAN
79#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
80#define bfa_os_sgaddr(_x) (_x)
81#else
82#define bfa_os_hton3b(_x) (_x)
83#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x)
84#endif
85
86#define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x)
87#define bfa_os_u32(__pa64) ((__pa64) >> 32)
88
89#define BFA_TRC_TS(_trcm) \
90 ({ \
91 struct timeval tv; \
92 \
93 do_gettimeofday(&tv); \
94 (tv.tv_sec*1000000+tv.tv_usec); \
95 })
96
97#define boolean_t int
98
99/*
100 * For current time stamp, OS API will fill-in
101 */
102struct bfa_timeval_s {
103 u32 tv_sec; /* seconds */
104 u32 tv_usec; /* microseconds */
105};
106
107static inline void
108bfa_os_gettimeofday(struct bfa_timeval_s *tv)
109{
110 struct timeval tmp_tv;
111
112 do_gettimeofday(&tmp_tv);
113 tv->tv_sec = (u32) tmp_tv.tv_sec;
114 tv->tv_usec = (u32) tmp_tv.tv_usec;
115}
116
117static inline void
118wwn2str(char *wwn_str, u64 wwn)
119{
120 union {
121 u64 wwn;
122 u8 byte[8];
123 } w;
124
125 w.wwn = wwn;
126 sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
127 w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
128 w.byte[6], w.byte[7]);
129}
130
131static inline void
132fcid2str(char *fcid_str, u32 fcid)
133{
134 union {
135 u32 fcid;
136 u8 byte[4];
137 } f;
138
139 f.fcid = fcid;
140 sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
141}
142
143#endif /* __BFA_OS_INC_H__ */
diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h
index 501f0ed35cf0..1c9baa68339b 100644
--- a/drivers/scsi/bfa/bfa_plog.h
+++ b/drivers/scsi/bfa/bfa_plog.h
@@ -151,9 +151,5 @@ void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
151void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 151void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
152 enum bfa_plog_eid event, u16 misc, 152 enum bfa_plog_eid event, u16 misc,
153 struct fchs_s *fchdr, u32 pld_w0); 153 struct fchs_s *fchdr, u32 pld_w0);
154void bfa_plog_clear(struct bfa_plog_s *plog);
155void bfa_plog_enable(struct bfa_plog_s *plog);
156void bfa_plog_disable(struct bfa_plog_s *plog);
157bfa_boolean_t bfa_plog_get_setting(struct bfa_plog_s *plog);
158 154
159#endif /* __BFA_PORTLOG_H__ */ 155#endif /* __BFA_PORTLOG_H__ */
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index fff96226a383..3f8e9d6066ec 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -15,6 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfad_drv.h"
18#include "bfa_defs_svc.h" 19#include "bfa_defs_svc.h"
19#include "bfa_port.h" 20#include "bfa_port.h"
20#include "bfi.h" 21#include "bfi.h"
@@ -29,14 +30,14 @@ static void
29bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) 30bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
30{ 31{
31 u32 *dip = (u32 *) stats; 32 u32 *dip = (u32 *) stats;
32 u32 t0, t1; 33 __be32 t0, t1;
33 int i; 34 int i;
34 35
35 for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32); 36 for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
36 i += 2) { 37 i += 2) {
37 t0 = dip[i]; 38 t0 = dip[i];
38 t1 = dip[i + 1]; 39 t1 = dip[i + 1];
39#ifdef __BIGENDIAN 40#ifdef __BIG_ENDIAN
40 dip[i] = be32_to_cpu(t0); 41 dip[i] = be32_to_cpu(t0);
41 dip[i + 1] = be32_to_cpu(t1); 42 dip[i + 1] = be32_to_cpu(t1);
42#else 43#else
@@ -96,13 +97,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
96 port->stats_busy = BFA_FALSE; 97 port->stats_busy = BFA_FALSE;
97 98
98 if (status == BFA_STATUS_OK) { 99 if (status == BFA_STATUS_OK) {
99 struct bfa_timeval_s tv; 100 struct timeval tv;
100 101
101 memcpy(port->stats, port->stats_dma.kva, 102 memcpy(port->stats, port->stats_dma.kva,
102 sizeof(union bfa_port_stats_u)); 103 sizeof(union bfa_port_stats_u));
103 bfa_port_stats_swap(port, port->stats); 104 bfa_port_stats_swap(port, port->stats);
104 105
105 bfa_os_gettimeofday(&tv); 106 do_gettimeofday(&tv);
106 port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time; 107 port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
107 } 108 }
108 109
@@ -124,7 +125,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
124static void 125static void
125bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) 126bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
126{ 127{
127 struct bfa_timeval_s tv; 128 struct timeval tv;
128 129
129 port->stats_status = status; 130 port->stats_status = status;
130 port->stats_busy = BFA_FALSE; 131 port->stats_busy = BFA_FALSE;
@@ -132,7 +133,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
132 /* 133 /*
133 * re-initialize time stamp for stats reset 134 * re-initialize time stamp for stats reset
134 */ 135 */
135 bfa_os_gettimeofday(&tv); 136 do_gettimeofday(&tv);
136 port->stats_reset_time = tv.tv_sec; 137 port->stats_reset_time = tv.tv_sec;
137 138
138 if (port->stats_cbfn) { 139 if (port->stats_cbfn) {
@@ -185,7 +186,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
185 break; 186 break;
186 187
187 default: 188 default:
188 bfa_assert(0); 189 WARN_ON(1);
189 } 190 }
190} 191}
191 192
@@ -432,9 +433,9 @@ void
432bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, 433bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
433 void *dev, struct bfa_trc_mod_s *trcmod) 434 void *dev, struct bfa_trc_mod_s *trcmod)
434{ 435{
435 struct bfa_timeval_s tv; 436 struct timeval tv;
436 437
437 bfa_assert(port); 438 WARN_ON(!port);
438 439
439 port->dev = dev; 440 port->dev = dev;
440 port->ioc = ioc; 441 port->ioc = ioc;
@@ -447,27 +448,13 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
447 448
448 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); 449 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
449 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); 450 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
450 bfa_ioc_hbfail_register(port->ioc, &port->hbfail); 451 list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q);
451 452
452 /* 453 /*
453 * initialize time stamp for stats reset 454 * initialize time stamp for stats reset
454 */ 455 */
455 bfa_os_gettimeofday(&tv); 456 do_gettimeofday(&tv);
456 port->stats_reset_time = tv.tv_sec; 457 port->stats_reset_time = tv.tv_sec;
457 458
458 bfa_trc(port, 0); 459 bfa_trc(port, 0);
459} 460}
460
461/*
462 * bfa_port_detach()
463 *
464 *
465 * @param[in] port - Pointer to the Port module data structure
466 *
467 * @return void
468 */
469void
470bfa_port_detach(struct bfa_port_s *port)
471{
472 bfa_trc(port, 0);
473}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index dbce9dfd056b..c4ee9db6b470 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -48,7 +48,6 @@ struct bfa_port_s {
48 48
49void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, 49void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
50 void *dev, struct bfa_trc_mod_s *trcmod); 50 void *dev, struct bfa_trc_mod_s *trcmod);
51void bfa_port_detach(struct bfa_port_s *port);
52void bfa_port_hbfail(void *arg); 51void bfa_port_hbfail(void *arg);
53 52
54bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, 53bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 37e16ac8f249..1d34921f88bf 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -15,11 +15,10 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include "bfa_os_inc.h" 18#include "bfad_drv.h"
19#include "bfa_plog.h" 19#include "bfa_plog.h"
20#include "bfa_cs.h" 20#include "bfa_cs.h"
21#include "bfa_modules.h" 21#include "bfa_modules.h"
22#include "bfad_drv.h"
23 22
24BFA_TRC_FILE(HAL, FCXP); 23BFA_TRC_FILE(HAL, FCXP);
25BFA_MODULE(fcxp); 24BFA_MODULE(fcxp);
@@ -41,19 +40,6 @@ BFA_MODULE(uf);
41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255 40#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190 41#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
43 42
44/*
45 * lps_pvt BFA LPS private functions
46 */
47
48enum bfa_lps_event {
49 BFA_LPS_SM_LOGIN = 1, /* login request from user */
50 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
51 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
52 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
53 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
54 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
56};
57 43
58/* 44/*
59 * FC PORT related definitions 45 * FC PORT related definitions
@@ -66,7 +52,6 @@ enum bfa_lps_event {
66 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ 52 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 53 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
68 54
69
70/* 55/*
71 * BFA port state machine events 56 * BFA port state machine events
72 */ 57 */
@@ -113,19 +98,6 @@ enum bfa_fcport_ln_sm_event {
113 } \ 98 } \
114} while (0) 99} while (0)
115 100
116
117enum bfa_rport_event {
118 BFA_RPORT_SM_CREATE = 1, /* rport create event */
119 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
120 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
121 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
122 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
123 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
124 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
125 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
127};
128
129/* 101/*
130 * forward declarations FCXP related functions 102 * forward declarations FCXP related functions
131 */ 103 */
@@ -159,6 +131,7 @@ static void bfa_lps_reqq_resume(void *lps_arg);
159static void bfa_lps_free(struct bfa_lps_s *lps); 131static void bfa_lps_free(struct bfa_lps_s *lps);
160static void bfa_lps_send_login(struct bfa_lps_s *lps); 132static void bfa_lps_send_login(struct bfa_lps_s *lps);
161static void bfa_lps_send_logout(struct bfa_lps_s *lps); 133static void bfa_lps_send_logout(struct bfa_lps_s *lps);
134static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
162static void bfa_lps_login_comp(struct bfa_lps_s *lps); 135static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 136static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164static void bfa_lps_cvl_event(struct bfa_lps_s *lps); 137static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
@@ -171,6 +144,8 @@ static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
171static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event 144static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
172 event); 145 event);
173static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); 146static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
147static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
148 enum bfa_lps_event event);
174static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); 149static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event 150static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
176 event); 151 event);
@@ -312,6 +287,18 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
312 return 0; 287 return 0;
313} 288}
314 289
290static u64
291bfa_get_log_time(void)
292{
293 u64 system_time = 0;
294 struct timeval tv;
295 do_gettimeofday(&tv);
296
297 /* We are interested in seconds only. */
298 system_time = tv.tv_sec;
299 return system_time;
300}
301
315static void 302static void
316bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) 303bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
317{ 304{
@@ -322,7 +309,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
322 return; 309 return;
323 310
324 if (plkd_validate_logrec(pl_rec)) { 311 if (plkd_validate_logrec(pl_rec)) {
325 bfa_assert(0); 312 WARN_ON(1);
326 return; 313 return;
327 } 314 }
328 315
@@ -332,7 +319,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
332 319
333 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); 320 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
334 321
335 pl_recp->tv = bfa_os_get_log_time(); 322 pl_recp->tv = bfa_get_log_time();
336 BFA_PL_LOG_REC_INCR(plog->tail); 323 BFA_PL_LOG_REC_INCR(plog->tail);
337 324
338 if (plog->head == plog->tail) 325 if (plog->head == plog->tail)
@@ -437,29 +424,6 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
437 } 424 }
438} 425}
439 426
440void
441bfa_plog_clear(struct bfa_plog_s *plog)
442{
443 plog->head = plog->tail = 0;
444}
445
446void
447bfa_plog_enable(struct bfa_plog_s *plog)
448{
449 plog->plog_enabled = 1;
450}
451
452void
453bfa_plog_disable(struct bfa_plog_s *plog)
454{
455 plog->plog_enabled = 0;
456}
457
458bfa_boolean_t
459bfa_plog_get_setting(struct bfa_plog_s *plog)
460{
461 return (bfa_boolean_t)plog->plog_enabled;
462}
463 427
464/* 428/*
465 * fcxp_pvt BFA FCXP private functions 429 * fcxp_pvt BFA FCXP private functions
@@ -637,15 +601,15 @@ bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
637 bfa_fcxp_get_sglen_t sglen_cbfn) 601 bfa_fcxp_get_sglen_t sglen_cbfn)
638{ 602{
639 603
640 bfa_assert(bfa != NULL); 604 WARN_ON(bfa == NULL);
641 605
642 bfa_trc(bfa, fcxp->fcxp_tag); 606 bfa_trc(bfa, fcxp->fcxp_tag);
643 607
644 if (n_sgles == 0) { 608 if (n_sgles == 0) {
645 *use_ibuf = 1; 609 *use_ibuf = 1;
646 } else { 610 } else {
647 bfa_assert(*sga_cbfn != NULL); 611 WARN_ON(*sga_cbfn == NULL);
648 bfa_assert(*sglen_cbfn != NULL); 612 WARN_ON(*sglen_cbfn == NULL);
649 613
650 *use_ibuf = 0; 614 *use_ibuf = 0;
651 *r_sga_cbfn = sga_cbfn; 615 *r_sga_cbfn = sga_cbfn;
@@ -657,7 +621,7 @@ bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
657 * alloc required sgpgs 621 * alloc required sgpgs
658 */ 622 */
659 if (n_sgles > BFI_SGE_INLINE) 623 if (n_sgles > BFI_SGE_INLINE)
660 bfa_assert(0); 624 WARN_ON(1);
661 } 625 }
662 626
663} 627}
@@ -671,7 +635,7 @@ bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
671 bfa_fcxp_get_sglen_t rsp_sglen_cbfn) 635 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
672{ 636{
673 637
674 bfa_assert(bfa != NULL); 638 WARN_ON(bfa == NULL);
675 639
676 bfa_trc(bfa, fcxp->fcxp_tag); 640 bfa_trc(bfa, fcxp->fcxp_tag);
677 641
@@ -708,7 +672,7 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
708 return; 672 return;
709 } 673 }
710 674
711 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); 675 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
712 list_del(&fcxp->qe); 676 list_del(&fcxp->qe);
713 list_add_tail(&fcxp->qe, &mod->fcxp_free_q); 677 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
714} 678}
@@ -757,7 +721,7 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
757 721
758 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); 722 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
759 723
760 bfa_assert(fcxp->send_cbfn != NULL); 724 WARN_ON(fcxp->send_cbfn == NULL);
761 725
762 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); 726 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
763 727
@@ -913,13 +877,13 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
913 BFA_FCXP_REQ_PLD_PA(fcxp)); 877 BFA_FCXP_REQ_PLD_PA(fcxp));
914 } else { 878 } else {
915 if (fcxp->nreq_sgles > 0) { 879 if (fcxp->nreq_sgles > 0) {
916 bfa_assert(fcxp->nreq_sgles == 1); 880 WARN_ON(fcxp->nreq_sgles != 1);
917 hal_fcxp_set_local_sges(send_req->req_sge, 881 hal_fcxp_set_local_sges(send_req->req_sge,
918 reqi->req_tot_len, 882 reqi->req_tot_len,
919 fcxp->req_sga_cbfn(fcxp->caller, 883 fcxp->req_sga_cbfn(fcxp->caller,
920 0)); 884 0));
921 } else { 885 } else {
922 bfa_assert(reqi->req_tot_len == 0); 886 WARN_ON(reqi->req_tot_len != 0);
923 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); 887 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
924 } 888 }
925 } 889 }
@@ -928,20 +892,20 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
928 * setup rsp sgles 892 * setup rsp sgles
929 */ 893 */
930 if (fcxp->use_irspbuf == 1) { 894 if (fcxp->use_irspbuf == 1) {
931 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ); 895 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
932 896
933 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen, 897 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
934 BFA_FCXP_RSP_PLD_PA(fcxp)); 898 BFA_FCXP_RSP_PLD_PA(fcxp));
935 899
936 } else { 900 } else {
937 if (fcxp->nrsp_sgles > 0) { 901 if (fcxp->nrsp_sgles > 0) {
938 bfa_assert(fcxp->nrsp_sgles == 1); 902 WARN_ON(fcxp->nrsp_sgles != 1);
939 hal_fcxp_set_local_sges(send_req->rsp_sge, 903 hal_fcxp_set_local_sges(send_req->rsp_sge,
940 rspi->rsp_maxlen, 904 rspi->rsp_maxlen,
941 fcxp->rsp_sga_cbfn(fcxp->caller, 905 fcxp->rsp_sga_cbfn(fcxp->caller,
942 0)); 906 0));
943 } else { 907 } else {
944 bfa_assert(rspi->rsp_maxlen == 0); 908 WARN_ON(rspi->rsp_maxlen != 0);
945 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); 909 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
946 } 910 }
947 } 911 }
@@ -955,10 +919,6 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
955} 919}
956 920
957/* 921/*
958 * hal_fcxp_api BFA FCXP API
959 */
960
961/*
962 * Allocate an FCXP instance to send a response or to send a request 922 * Allocate an FCXP instance to send a response or to send a request
963 * that has a response. Request/response buffers are allocated by caller. 923 * that has a response. Request/response buffers are allocated by caller.
964 * 924 *
@@ -990,7 +950,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
990{ 950{
991 struct bfa_fcxp_s *fcxp = NULL; 951 struct bfa_fcxp_s *fcxp = NULL;
992 952
993 bfa_assert(bfa != NULL); 953 WARN_ON(bfa == NULL);
994 954
995 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); 955 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
996 if (fcxp == NULL) 956 if (fcxp == NULL)
@@ -1017,7 +977,7 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
1017 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 977 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1018 void *reqbuf; 978 void *reqbuf;
1019 979
1020 bfa_assert(fcxp->use_ireqbuf == 1); 980 WARN_ON(fcxp->use_ireqbuf != 1);
1021 reqbuf = ((u8 *)mod->req_pld_list_kva) + 981 reqbuf = ((u8 *)mod->req_pld_list_kva) +
1022 fcxp->fcxp_tag * mod->req_pld_sz; 982 fcxp->fcxp_tag * mod->req_pld_sz;
1023 return reqbuf; 983 return reqbuf;
@@ -1044,7 +1004,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1044 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 1004 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1045 void *rspbuf; 1005 void *rspbuf;
1046 1006
1047 bfa_assert(fcxp->use_irspbuf == 1); 1007 WARN_ON(fcxp->use_irspbuf != 1);
1048 1008
1049 rspbuf = ((u8 *)mod->rsp_pld_list_kva) + 1009 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1050 fcxp->fcxp_tag * mod->rsp_pld_sz; 1010 fcxp->fcxp_tag * mod->rsp_pld_sz;
@@ -1052,7 +1012,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1052} 1012}
1053 1013
1054/* 1014/*
1055 * Free the BFA FCXP 1015 * Free the BFA FCXP
1056 * 1016 *
1057 * @param[in] fcxp BFA fcxp pointer 1017 * @param[in] fcxp BFA fcxp pointer
1058 * 1018 *
@@ -1063,7 +1023,7 @@ bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1063{ 1023{
1064 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 1024 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1065 1025
1066 bfa_assert(fcxp != NULL); 1026 WARN_ON(fcxp == NULL);
1067 bfa_trc(mod->bfa, fcxp->fcxp_tag); 1027 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1068 bfa_fcxp_put(fcxp); 1028 bfa_fcxp_put(fcxp);
1069} 1029}
@@ -1142,7 +1102,7 @@ bfa_status_t
1142bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) 1102bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1143{ 1103{
1144 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); 1104 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1145 bfa_assert(0); 1105 WARN_ON(1);
1146 return BFA_STATUS_OK; 1106 return BFA_STATUS_OK;
1147} 1107}
1148 1108
@@ -1157,7 +1117,7 @@ bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1157{ 1117{
1158 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 1118 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1159 1119
1160 bfa_assert(list_empty(&mod->fcxp_free_q)); 1120 WARN_ON(!list_empty(&mod->fcxp_free_q));
1161 1121
1162 wqe->alloc_cbfn = alloc_cbfn; 1122 wqe->alloc_cbfn = alloc_cbfn;
1163 wqe->alloc_cbarg = alloc_cbarg; 1123 wqe->alloc_cbarg = alloc_cbarg;
@@ -1178,7 +1138,7 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1178{ 1138{
1179 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 1139 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1180 1140
1181 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe)); 1141 WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
1182 list_del(&wqe->qe); 1142 list_del(&wqe->qe);
1183} 1143}
1184 1144
@@ -1199,12 +1159,6 @@ bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1199 fcxp->send_cbfn = bfa_fcxp_null_comp; 1159 fcxp->send_cbfn = bfa_fcxp_null_comp;
1200} 1160}
1201 1161
1202
1203
1204/*
1205 * hal_fcxp_public BFA FCXP public functions
1206 */
1207
1208void 1162void
1209bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) 1163bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1210{ 1164{
@@ -1215,7 +1169,7 @@ bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1215 1169
1216 default: 1170 default:
1217 bfa_trc(bfa, msg->mhdr.msg_id); 1171 bfa_trc(bfa, msg->mhdr.msg_id);
1218 bfa_assert(0); 1172 WARN_ON(1);
1219 } 1173 }
1220} 1174}
1221 1175
@@ -1303,6 +1257,12 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1303 else 1257 else
1304 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1258 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1305 BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); 1259 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1260 /* If N2N, send the assigned PID to FW */
1261 bfa_trc(lps->bfa, lps->fport);
1262 bfa_trc(lps->bfa, lps->lp_pid);
1263
1264 if (!lps->fport && lps->lp_pid)
1265 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1306 } else { 1266 } else {
1307 bfa_sm_set_state(lps, bfa_lps_sm_init); 1267 bfa_sm_set_state(lps, bfa_lps_sm_init);
1308 if (lps->fdisc) 1268 if (lps->fdisc)
@@ -1321,6 +1281,11 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1321 bfa_sm_set_state(lps, bfa_lps_sm_init); 1281 bfa_sm_set_state(lps, bfa_lps_sm_init);
1322 break; 1282 break;
1323 1283
1284 case BFA_LPS_SM_SET_N2N_PID:
1285 bfa_trc(lps->bfa, lps->fport);
1286 bfa_trc(lps->bfa, lps->lp_pid);
1287 break;
1288
1324 default: 1289 default:
1325 bfa_sm_fault(lps->bfa, event); 1290 bfa_sm_fault(lps->bfa, event);
1326 } 1291 }
@@ -1389,9 +1354,59 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1389 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); 1354 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1390 break; 1355 break;
1391 1356
1357 case BFA_LPS_SM_SET_N2N_PID:
1358 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1359 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1360 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1361 } else
1362 bfa_lps_send_set_n2n_pid(lps);
1363 break;
1364
1365 case BFA_LPS_SM_OFFLINE:
1366 case BFA_LPS_SM_DELETE:
1367 bfa_sm_set_state(lps, bfa_lps_sm_init);
1368 break;
1369
1370 default:
1371 bfa_sm_fault(lps->bfa, event);
1372 }
1373}
1374
1375/*
1376 * login complete
1377 */
1378static void
1379bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1380{
1381 bfa_trc(lps->bfa, lps->lp_tag);
1382 bfa_trc(lps->bfa, event);
1383
1384 switch (event) {
1385 case BFA_LPS_SM_RESUME:
1386 bfa_sm_set_state(lps, bfa_lps_sm_online);
1387 bfa_lps_send_set_n2n_pid(lps);
1388 break;
1389
1390 case BFA_LPS_SM_LOGOUT:
1391 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1392 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1393 BFA_PL_EID_LOGO, 0, "Logout");
1394 break;
1395
1396 case BFA_LPS_SM_RX_CVL:
1397 bfa_sm_set_state(lps, bfa_lps_sm_init);
1398 bfa_reqq_wcancel(&lps->wqe);
1399
1400 /* Let the vport module know about this event */
1401 bfa_lps_cvl_event(lps);
1402 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1403 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1404 break;
1405
1392 case BFA_LPS_SM_OFFLINE: 1406 case BFA_LPS_SM_OFFLINE:
1393 case BFA_LPS_SM_DELETE: 1407 case BFA_LPS_SM_DELETE:
1394 bfa_sm_set_state(lps, bfa_lps_sm_init); 1408 bfa_sm_set_state(lps, bfa_lps_sm_init);
1409 bfa_reqq_wcancel(&lps->wqe);
1395 break; 1410 break;
1396 1411
1397 default: 1412 default:
@@ -1540,15 +1555,16 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1540 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1555 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1541 struct bfa_lps_s *lps; 1556 struct bfa_lps_s *lps;
1542 1557
1543 bfa_assert(rsp->lp_tag < mod->num_lps); 1558 WARN_ON(rsp->lp_tag >= mod->num_lps);
1544 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); 1559 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1545 1560
1546 lps->status = rsp->status; 1561 lps->status = rsp->status;
1547 switch (rsp->status) { 1562 switch (rsp->status) {
1548 case BFA_STATUS_OK: 1563 case BFA_STATUS_OK:
1549 lps->fport = rsp->f_port; 1564 lps->fport = rsp->f_port;
1565 if (lps->fport)
1566 lps->lp_pid = rsp->lp_pid;
1550 lps->npiv_en = rsp->npiv_en; 1567 lps->npiv_en = rsp->npiv_en;
1551 lps->lp_pid = rsp->lp_pid;
1552 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); 1568 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
1553 lps->pr_pwwn = rsp->port_name; 1569 lps->pr_pwwn = rsp->port_name;
1554 lps->pr_nwwn = rsp->node_name; 1570 lps->pr_nwwn = rsp->node_name;
@@ -1587,7 +1603,7 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1587 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1603 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1588 struct bfa_lps_s *lps; 1604 struct bfa_lps_s *lps;
1589 1605
1590 bfa_assert(rsp->lp_tag < mod->num_lps); 1606 WARN_ON(rsp->lp_tag >= mod->num_lps);
1591 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); 1607 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1592 1608
1593 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1609 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
@@ -1640,7 +1656,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
1640 struct bfi_lps_login_req_s *m; 1656 struct bfi_lps_login_req_s *m;
1641 1657
1642 m = bfa_reqq_next(lps->bfa, lps->reqq); 1658 m = bfa_reqq_next(lps->bfa, lps->reqq);
1643 bfa_assert(m); 1659 WARN_ON(!m);
1644 1660
1645 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, 1661 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1646 bfa_lpuid(lps->bfa)); 1662 bfa_lpuid(lps->bfa));
@@ -1665,7 +1681,7 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
1665 struct bfi_lps_logout_req_s *m; 1681 struct bfi_lps_logout_req_s *m;
1666 1682
1667 m = bfa_reqq_next(lps->bfa, lps->reqq); 1683 m = bfa_reqq_next(lps->bfa, lps->reqq);
1668 bfa_assert(m); 1684 WARN_ON(!m);
1669 1685
1670 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, 1686 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1671 bfa_lpuid(lps->bfa)); 1687 bfa_lpuid(lps->bfa));
@@ -1676,6 +1692,25 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
1676} 1692}
1677 1693
1678/* 1694/*
1695 * send n2n pid set request to firmware
1696 */
1697static void
1698bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1699{
1700 struct bfi_lps_n2n_pid_req_s *m;
1701
1702 m = bfa_reqq_next(lps->bfa, lps->reqq);
1703 WARN_ON(!m);
1704
1705 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1706 bfa_lpuid(lps->bfa));
1707
1708 m->lp_tag = lps->lp_tag;
1709 m->lp_pid = lps->lp_pid;
1710 bfa_reqq_produce(lps->bfa, lps->reqq);
1711}
1712
1713/*
1679 * Indirect login completion handler for non-fcs 1714 * Indirect login completion handler for non-fcs
1680 */ 1715 */
1681static void 1716static void
@@ -1853,14 +1888,6 @@ bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1853 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1888 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1854} 1889}
1855 1890
1856/*
1857 * Initiate a lport logout (flogi).
1858 */
1859void
1860bfa_lps_flogo(struct bfa_lps_s *lps)
1861{
1862 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1863}
1864 1891
1865/* 1892/*
1866 * Initiate a lport FDSIC logout. 1893 * Initiate a lport FDSIC logout.
@@ -1871,24 +1898,6 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1898 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1872} 1899}
1873 1900
1874/*
1875 * Discard a pending login request -- should be called only for
1876 * link down handling.
1877 */
1878void
1879bfa_lps_discard(struct bfa_lps_s *lps)
1880{
1881 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1882}
1883
1884/*
1885 * Return lport services tag
1886 */
1887u8
1888bfa_lps_get_tag(struct bfa_lps_s *lps)
1889{
1890 return lps->lp_tag;
1891}
1892 1901
1893/* 1902/*
1894 * Return lport services tag given the pid 1903 * Return lport services tag given the pid
@@ -1909,55 +1918,6 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1909 return 0; 1918 return 0;
1910} 1919}
1911 1920
1912/*
1913 * return if fabric login indicates support for NPIV
1914 */
1915bfa_boolean_t
1916bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1917{
1918 return lps->npiv_en;
1919}
1920
1921/*
1922 * Return TRUE if attached to F-Port, else return FALSE
1923 */
1924bfa_boolean_t
1925bfa_lps_is_fport(struct bfa_lps_s *lps)
1926{
1927 return lps->fport;
1928}
1929
1930/*
1931 * Return TRUE if attached to a Brocade Fabric
1932 */
1933bfa_boolean_t
1934bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1935{
1936 return lps->brcd_switch;
1937}
1938/*
1939 * return TRUE if authentication is required
1940 */
1941bfa_boolean_t
1942bfa_lps_is_authreq(struct bfa_lps_s *lps)
1943{
1944 return lps->auth_req;
1945}
1946
1947bfa_eproto_status_t
1948bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1949{
1950 return lps->ext_status;
1951}
1952
1953/*
1954 * return port id assigned to the lport
1955 */
1956u32
1957bfa_lps_get_pid(struct bfa_lps_s *lps)
1958{
1959 return lps->lp_pid;
1960}
1961 1921
1962/* 1922/*
1963 * return port id assigned to the base lport 1923 * return port id assigned to the base lport
@@ -1971,57 +1931,16 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
1971} 1931}
1972 1932
1973/* 1933/*
1974 * Return bb_credit assigned in FLOGI response 1934 * Set PID in case of n2n (which is assigned during PLOGI)
1975 */
1976u16
1977bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1978{
1979 return lps->pr_bbcred;
1980}
1981
1982/*
1983 * Return peer port name
1984 */
1985wwn_t
1986bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1987{
1988 return lps->pr_pwwn;
1989}
1990
1991/*
1992 * Return peer node name
1993 */
1994wwn_t
1995bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1996{
1997 return lps->pr_nwwn;
1998}
1999
2000/*
2001 * return reason code if login request is rejected
2002 */
2003u8
2004bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
2005{
2006 return lps->lsrjt_rsn;
2007}
2008
2009/*
2010 * return explanation code if login request is rejected
2011 */ 1935 */
2012u8 1936void
2013bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps) 1937bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
2014{ 1938{
2015 return lps->lsrjt_expl; 1939 bfa_trc(lps->bfa, lps->lp_tag);
2016} 1940 bfa_trc(lps->bfa, n2n_pid);
2017 1941
2018/* 1942 lps->lp_pid = n2n_pid;
2019 * Return fpma/spma MAC for lport 1943 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
2020 */
2021mac_t
2022bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
2023{
2024 return lps->lp_mac;
2025} 1944}
2026 1945
2027/* 1946/*
@@ -2050,7 +1969,7 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2050 1969
2051 default: 1970 default:
2052 bfa_trc(bfa, m->mhdr.msg_id); 1971 bfa_trc(bfa, m->mhdr.msg_id);
2053 bfa_assert(0); 1972 WARN_ON(1);
2054 } 1973 }
2055} 1974}
2056 1975
@@ -2068,6 +1987,8 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2068 /* 1987 /*
2069 * Start event after IOC is configured and BFA is started. 1988 * Start event after IOC is configured and BFA is started.
2070 */ 1989 */
1990 fcport->use_flash_cfg = BFA_TRUE;
1991
2071 if (bfa_fcport_send_enable(fcport)) { 1992 if (bfa_fcport_send_enable(fcport)) {
2072 bfa_trc(fcport->bfa, BFA_TRUE); 1993 bfa_trc(fcport->bfa, BFA_TRUE);
2073 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 1994 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
@@ -2178,7 +2099,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2178 bfa_fcport_update_linkinfo(fcport); 2099 bfa_fcport_update_linkinfo(fcport);
2179 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); 2100 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2180 2101
2181 bfa_assert(fcport->event_cbfn); 2102 WARN_ON(!fcport->event_cbfn);
2182 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); 2103 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2183 break; 2104 break;
2184 2105
@@ -2229,7 +2150,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2229 case BFA_FCPORT_SM_LINKUP: 2150 case BFA_FCPORT_SM_LINKUP:
2230 bfa_fcport_update_linkinfo(fcport); 2151 bfa_fcport_update_linkinfo(fcport);
2231 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); 2152 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2232 bfa_assert(fcport->event_cbfn); 2153 WARN_ON(!fcport->event_cbfn);
2233 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2154 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2234 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); 2155 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2235 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 2156 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
@@ -2803,12 +2724,6 @@ bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2803 } 2724 }
2804} 2725}
2805 2726
2806
2807
2808/*
2809 * hal_port_private
2810 */
2811
2812static void 2727static void
2813__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) 2728__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2814{ 2729{
@@ -2839,7 +2754,7 @@ bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2839 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); 2754 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2840 break; 2755 break;
2841 default: 2756 default:
2842 bfa_assert(0); 2757 WARN_ON(1);
2843 } 2758 }
2844} 2759}
2845 2760
@@ -2906,7 +2821,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2906 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 2821 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2907 struct bfa_port_cfg_s *port_cfg = &fcport->cfg; 2822 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2908 struct bfa_fcport_ln_s *ln = &fcport->ln; 2823 struct bfa_fcport_ln_s *ln = &fcport->ln;
2909 struct bfa_timeval_s tv; 2824 struct timeval tv;
2910 2825
2911 memset(fcport, 0, sizeof(struct bfa_fcport_s)); 2826 memset(fcport, 0, sizeof(struct bfa_fcport_s));
2912 fcport->bfa = bfa; 2827 fcport->bfa = bfa;
@@ -2920,7 +2835,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2920 /* 2835 /*
2921 * initialize time stamp for stats reset 2836 * initialize time stamp for stats reset
2922 */ 2837 */
2923 bfa_os_gettimeofday(&tv); 2838 do_gettimeofday(&tv);
2924 fcport->stats_reset_time = tv.tv_sec; 2839 fcport->stats_reset_time = tv.tv_sec;
2925 2840
2926 /* 2841 /*
@@ -3039,6 +2954,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3039 m->port_cfg = fcport->cfg; 2954 m->port_cfg = fcport->cfg;
3040 m->msgtag = fcport->msgtag; 2955 m->msgtag = fcport->msgtag;
3041 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); 2956 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
2957 m->use_flash_cfg = fcport->use_flash_cfg;
3042 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); 2958 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3043 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); 2959 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3044 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); 2960 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
@@ -3089,8 +3005,8 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3089static void 3005static void
3090bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) 3006bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3091{ 3007{
3092 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc); 3008 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3093 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc); 3009 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3094 3010
3095 bfa_trc(fcport->bfa, fcport->pwwn); 3011 bfa_trc(fcport->bfa, fcport->pwwn);
3096 bfa_trc(fcport->bfa, fcport->nwwn); 3012 bfa_trc(fcport->bfa, fcport->nwwn);
@@ -3127,7 +3043,7 @@ bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3127 struct bfa_qos_stats_s *s) 3043 struct bfa_qos_stats_s *s)
3128{ 3044{
3129 u32 *dip = (u32 *) d; 3045 u32 *dip = (u32 *) d;
3130 u32 *sip = (u32 *) s; 3046 __be32 *sip = (__be32 *) s;
3131 int i; 3047 int i;
3132 3048
3133 /* Now swap the 32 bit fields */ 3049 /* Now swap the 32 bit fields */
@@ -3140,12 +3056,12 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3140 struct bfa_fcoe_stats_s *s) 3056 struct bfa_fcoe_stats_s *s)
3141{ 3057{
3142 u32 *dip = (u32 *) d; 3058 u32 *dip = (u32 *) d;
3143 u32 *sip = (u32 *) s; 3059 __be32 *sip = (__be32 *) s;
3144 int i; 3060 int i;
3145 3061
3146 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); 3062 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3147 i = i + 2) { 3063 i = i + 2) {
3148#ifdef __BIGENDIAN 3064#ifdef __BIG_ENDIAN
3149 dip[i] = be32_to_cpu(sip[i]); 3065 dip[i] = be32_to_cpu(sip[i]);
3150 dip[i + 1] = be32_to_cpu(sip[i + 1]); 3066 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3151#else 3067#else
@@ -3162,7 +3078,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3162 3078
3163 if (complete) { 3079 if (complete) {
3164 if (fcport->stats_status == BFA_STATUS_OK) { 3080 if (fcport->stats_status == BFA_STATUS_OK) {
3165 struct bfa_timeval_s tv; 3081 struct timeval tv;
3166 3082
3167 /* Swap FC QoS or FCoE stats */ 3083 /* Swap FC QoS or FCoE stats */
3168 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 3084 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
@@ -3174,7 +3090,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3174 &fcport->stats_ret->fcoe, 3090 &fcport->stats_ret->fcoe,
3175 &fcport->stats->fcoe); 3091 &fcport->stats->fcoe);
3176 3092
3177 bfa_os_gettimeofday(&tv); 3093 do_gettimeofday(&tv);
3178 fcport->stats_ret->fcoe.secs_reset = 3094 fcport->stats_ret->fcoe.secs_reset =
3179 tv.tv_sec - fcport->stats_reset_time; 3095 tv.tv_sec - fcport->stats_reset_time;
3180 } 3096 }
@@ -3233,12 +3149,12 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3233 struct bfa_fcport_s *fcport = cbarg; 3149 struct bfa_fcport_s *fcport = cbarg;
3234 3150
3235 if (complete) { 3151 if (complete) {
3236 struct bfa_timeval_s tv; 3152 struct timeval tv;
3237 3153
3238 /* 3154 /*
3239 * re-initialize time stamp for stats reset 3155 * re-initialize time stamp for stats reset
3240 */ 3156 */
3241 bfa_os_gettimeofday(&tv); 3157 do_gettimeofday(&tv);
3242 fcport->stats_reset_time = tv.tv_sec; 3158 fcport->stats_reset_time = tv.tv_sec;
3243 3159
3244 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3160 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
@@ -3303,8 +3219,8 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3303 int link_bm = 0; 3219 int link_bm = 0;
3304 3220
3305 bfa_trc(fcport->bfa, fcport->cfg.trunked); 3221 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3306 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE || 3222 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3307 scn->trunk_state == BFA_TRUNK_OFFLINE); 3223 scn->trunk_state != BFA_TRUNK_OFFLINE);
3308 3224
3309 bfa_trc(fcport->bfa, trunk->attr.state); 3225 bfa_trc(fcport->bfa, trunk->attr.state);
3310 bfa_trc(fcport->bfa, scn->trunk_state); 3226 bfa_trc(fcport->bfa, scn->trunk_state);
@@ -3396,12 +3312,6 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
3396 } 3312 }
3397} 3313}
3398 3314
3399
3400
3401/*
3402 * hal_port_public
3403 */
3404
3405/* 3315/*
3406 * Called to initialize port attributes 3316 * Called to initialize port attributes
3407 */ 3317 */
@@ -3419,9 +3329,9 @@ bfa_fcport_init(struct bfa_s *bfa)
3419 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); 3329 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3420 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); 3330 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3421 3331
3422 bfa_assert(fcport->cfg.maxfrsize); 3332 WARN_ON(!fcport->cfg.maxfrsize);
3423 bfa_assert(fcport->cfg.rx_bbcredit); 3333 WARN_ON(!fcport->cfg.rx_bbcredit);
3424 bfa_assert(fcport->speed_sup); 3334 WARN_ON(!fcport->speed_sup);
3425} 3335}
3426 3336
3427/* 3337/*
@@ -3441,8 +3351,28 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3441 3351
3442 switch (msg->mhdr.msg_id) { 3352 switch (msg->mhdr.msg_id) {
3443 case BFI_FCPORT_I2H_ENABLE_RSP: 3353 case BFI_FCPORT_I2H_ENABLE_RSP:
3444 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) 3354 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3355
3356 if (fcport->use_flash_cfg) {
3357 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3358 fcport->cfg.maxfrsize =
3359 cpu_to_be16(fcport->cfg.maxfrsize);
3360 fcport->cfg.path_tov =
3361 cpu_to_be16(fcport->cfg.path_tov);
3362 fcport->cfg.q_depth =
3363 cpu_to_be16(fcport->cfg.q_depth);
3364
3365 if (fcport->cfg.trunked)
3366 fcport->trunk.attr.state =
3367 BFA_TRUNK_OFFLINE;
3368 else
3369 fcport->trunk.attr.state =
3370 BFA_TRUNK_DISABLED;
3371 fcport->use_flash_cfg = BFA_FALSE;
3372 }
3373
3445 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 3374 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3375 }
3446 break; 3376 break;
3447 3377
3448 case BFI_FCPORT_I2H_DISABLE_RSP: 3378 case BFI_FCPORT_I2H_DISABLE_RSP:
@@ -3498,17 +3428,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3498 break; 3428 break;
3499 3429
3500 default: 3430 default:
3501 bfa_assert(0); 3431 WARN_ON(1);
3502 break; 3432 break;
3503 } 3433 }
3504} 3434}
3505 3435
3506
3507
3508/*
3509 * hal_port_api
3510 */
3511
3512/* 3436/*
3513 * Registered callback for port events. 3437 * Registered callback for port events.
3514 */ 3438 */
@@ -3732,8 +3656,8 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3732 attr->nwwn = fcport->nwwn; 3656 attr->nwwn = fcport->nwwn;
3733 attr->pwwn = fcport->pwwn; 3657 attr->pwwn = fcport->pwwn;
3734 3658
3735 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc); 3659 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3736 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc); 3660 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
3737 3661
3738 memcpy(&attr->pport_cfg, &fcport->cfg, 3662 memcpy(&attr->pport_cfg, &fcport->cfg,
3739 sizeof(struct bfa_port_cfg_s)); 3663 sizeof(struct bfa_port_cfg_s));
@@ -3751,7 +3675,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3751 /* beacon attributes */ 3675 /* beacon attributes */
3752 attr->beacon = fcport->beacon; 3676 attr->beacon = fcport->beacon;
3753 attr->link_e2e_beacon = fcport->link_e2e_beacon; 3677 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3754 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog); 3678 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
3755 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa); 3679 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3756 3680
3757 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); 3681 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
@@ -3818,89 +3742,6 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3818 return BFA_STATUS_OK; 3742 return BFA_STATUS_OK;
3819} 3743}
3820 3744
3821/*
3822 * Fetch FCQoS port statistics
3823 */
3824bfa_status_t
3825bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3826 bfa_cb_port_t cbfn, void *cbarg)
3827{
3828 /* Meaningful only for FC mode */
3829 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3830
3831 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3832}
3833
3834/*
3835 * Reset FCoE port statistics
3836 */
3837bfa_status_t
3838bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3839{
3840 /* Meaningful only for FC mode */
3841 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3842
3843 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3844}
3845
3846/*
3847 * Fetch FCQoS port statistics
3848 */
3849bfa_status_t
3850bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3851 bfa_cb_port_t cbfn, void *cbarg)
3852{
3853 /* Meaningful only for FCoE mode */
3854 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3855
3856 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3857}
3858
3859/*
3860 * Reset FCoE port statistics
3861 */
3862bfa_status_t
3863bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3864{
3865 /* Meaningful only for FCoE mode */
3866 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3867
3868 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3869}
3870
3871void
3872bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
3873{
3874 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3875
3876 qos_attr->state = fcport->qos_attr.state;
3877 qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
3878}
3879
3880void
3881bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3882 struct bfa_qos_vc_attr_s *qos_vc_attr)
3883{
3884 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3885 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
3886 u32 i = 0;
3887
3888 qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
3889 qos_vc_attr->shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
3890 qos_vc_attr->elp_opmode_flags =
3891 be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
3892
3893 /* Individual VC info */
3894 while (i < qos_vc_attr->total_vc_count) {
3895 qos_vc_attr->vc_info[i].vc_credit =
3896 bfa_vc_attr->vc_info[i].vc_credit;
3897 qos_vc_attr->vc_info[i].borrow_credit =
3898 bfa_vc_attr->vc_info[i].borrow_credit;
3899 qos_vc_attr->vc_info[i].priority =
3900 bfa_vc_attr->vc_info[i].priority;
3901 ++i;
3902 }
3903}
3904 3745
3905/* 3746/*
3906 * Fetch port attributes. 3747 * Fetch port attributes.
@@ -3924,60 +3765,6 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa)
3924 3765
3925} 3766}
3926 3767
3927void
3928bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
3929{
3930 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3931 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
3932
3933 bfa_trc(bfa, on_off);
3934 bfa_trc(bfa, fcport->cfg.qos_enabled);
3935
3936 bfa_trc(bfa, ioc_type);
3937
3938 if (ioc_type == BFA_IOC_TYPE_FC) {
3939 fcport->cfg.qos_enabled = on_off;
3940 /*
3941 * Notify fcpim of the change in QoS state
3942 */
3943 bfa_fcpim_update_ioredirect(bfa);
3944 }
3945}
3946
3947void
3948bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
3949{
3950 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3951
3952 bfa_trc(bfa, on_off);
3953 bfa_trc(bfa, fcport->cfg.ratelimit);
3954
3955 fcport->cfg.ratelimit = on_off;
3956 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
3957 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
3958}
3959
3960/*
3961 * Configure default minimum ratelim speed
3962 */
3963bfa_status_t
3964bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3965{
3966 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3967
3968 bfa_trc(bfa, speed);
3969
3970 /* Auto and speeds greater than the supported speed, are invalid */
3971 if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
3972 bfa_trc(bfa, fcport->speed_sup);
3973 return BFA_STATUS_UNSUPP_SPEED;
3974 }
3975
3976 fcport->cfg.trl_def_speed = speed;
3977
3978 return BFA_STATUS_OK;
3979}
3980
3981/* 3768/*
3982 * Get default minimum ratelim speed 3769 * Get default minimum ratelim speed
3983 */ 3770 */
@@ -3990,32 +3777,6 @@ bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3990 return fcport->cfg.trl_def_speed; 3777 return fcport->cfg.trl_def_speed;
3991 3778
3992} 3779}
3993void
3994bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
3995{
3996 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3997
3998 bfa_trc(bfa, status);
3999 bfa_trc(bfa, fcport->diag_busy);
4000
4001 fcport->diag_busy = status;
4002}
4003
4004void
4005bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4006 bfa_boolean_t link_e2e_beacon)
4007{
4008 struct bfa_s *bfa = dev;
4009 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4010
4011 bfa_trc(bfa, beacon);
4012 bfa_trc(bfa, link_e2e_beacon);
4013 bfa_trc(bfa, fcport->beacon);
4014 bfa_trc(bfa, fcport->link_e2e_beacon);
4015
4016 fcport->beacon = beacon;
4017 fcport->link_e2e_beacon = link_e2e_beacon;
4018}
4019 3780
4020bfa_boolean_t 3781bfa_boolean_t
4021bfa_fcport_is_linkup(struct bfa_s *bfa) 3782bfa_fcport_is_linkup(struct bfa_s *bfa)
@@ -4036,63 +3797,6 @@ bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4036 return fcport->cfg.qos_enabled; 3797 return fcport->cfg.qos_enabled;
4037} 3798}
4038 3799
4039bfa_status_t
4040bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr)
4041
4042{
4043 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4044 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4045
4046 bfa_trc(bfa, fcport->cfg.trunked);
4047 bfa_trc(bfa, trunk->attr.state);
4048 *attr = trunk->attr;
4049 attr->port_id = bfa_lps_get_base_pid(bfa);
4050
4051 return BFA_STATUS_OK;
4052}
4053
4054void
4055bfa_trunk_enable_cfg(struct bfa_s *bfa)
4056{
4057 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4058 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4059
4060 bfa_trc(bfa, 1);
4061 trunk->attr.state = BFA_TRUNK_OFFLINE;
4062 fcport->cfg.trunked = BFA_TRUE;
4063}
4064
4065bfa_status_t
4066bfa_trunk_enable(struct bfa_s *bfa)
4067{
4068 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4069 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4070
4071 bfa_trc(bfa, 1);
4072
4073 trunk->attr.state = BFA_TRUNK_OFFLINE;
4074 bfa_fcport_disable(bfa);
4075 fcport->cfg.trunked = BFA_TRUE;
4076 bfa_fcport_enable(bfa);
4077
4078 return BFA_STATUS_OK;
4079}
4080
4081bfa_status_t
4082bfa_trunk_disable(struct bfa_s *bfa)
4083{
4084 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4085 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4086
4087 bfa_trc(bfa, 0);
4088 trunk->attr.state = BFA_TRUNK_DISABLED;
4089 bfa_fcport_disable(bfa);
4090 fcport->cfg.trunked = BFA_FALSE;
4091 bfa_fcport_enable(bfa);
4092 return BFA_STATUS_OK;
4093}
4094
4095
4096/* 3800/*
4097 * Rport State machine functions 3801 * Rport State machine functions
4098 */ 3802 */
@@ -4606,8 +4310,8 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4606 mod->rps_list = rp; 4310 mod->rps_list = rp;
4607 mod->num_rports = cfg->fwcfg.num_rports; 4311 mod->num_rports = cfg->fwcfg.num_rports;
4608 4312
4609 bfa_assert(mod->num_rports && 4313 WARN_ON(!mod->num_rports ||
4610 !(mod->num_rports & (mod->num_rports - 1))); 4314 (mod->num_rports & (mod->num_rports - 1)));
4611 4315
4612 for (i = 0; i < mod->num_rports; i++, rp++) { 4316 for (i = 0; i < mod->num_rports; i++, rp++) {
4613 memset(rp, 0, sizeof(struct bfa_rport_s)); 4317 memset(rp, 0, sizeof(struct bfa_rport_s));
@@ -4675,7 +4379,7 @@ bfa_rport_free(struct bfa_rport_s *rport)
4675{ 4379{
4676 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); 4380 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4677 4381
4678 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport)); 4382 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4679 list_del(&rport->qe); 4383 list_del(&rport->qe);
4680 list_add_tail(&rport->qe, &mod->rp_free_q); 4384 list_add_tail(&rport->qe, &mod->rp_free_q);
4681} 4385}
@@ -4788,13 +4492,13 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4788 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); 4492 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4789 rp->fw_handle = msg.create_rsp->fw_handle; 4493 rp->fw_handle = msg.create_rsp->fw_handle;
4790 rp->qos_attr = msg.create_rsp->qos_attr; 4494 rp->qos_attr = msg.create_rsp->qos_attr;
4791 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); 4495 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4792 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4496 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4793 break; 4497 break;
4794 4498
4795 case BFI_RPORT_I2H_DELETE_RSP: 4499 case BFI_RPORT_I2H_DELETE_RSP:
4796 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); 4500 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4797 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); 4501 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4798 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4502 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4799 break; 4503 break;
4800 4504
@@ -4806,7 +4510,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4806 4510
4807 default: 4511 default:
4808 bfa_trc(bfa, m->mhdr.msg_id); 4512 bfa_trc(bfa, m->mhdr.msg_id);
4809 bfa_assert(0); 4513 WARN_ON(1);
4810 } 4514 }
4811} 4515}
4812 4516
@@ -4828,24 +4532,18 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4828 4532
4829 rp->bfa = bfa; 4533 rp->bfa = bfa;
4830 rp->rport_drv = rport_drv; 4534 rp->rport_drv = rport_drv;
4831 bfa_rport_clear_stats(rp); 4535 memset(&rp->stats, 0, sizeof(rp->stats));
4832 4536
4833 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); 4537 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4834 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); 4538 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4835 4539
4836 return rp; 4540 return rp;
4837} 4541}
4838 4542
4839void 4543void
4840bfa_rport_delete(struct bfa_rport_s *rport)
4841{
4842 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
4843}
4844
4845void
4846bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) 4544bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4847{ 4545{
4848 bfa_assert(rport_info->max_frmsz != 0); 4546 WARN_ON(rport_info->max_frmsz == 0);
4849 4547
4850 /* 4548 /*
4851 * Some JBODs are seen to be not setting PDU size correctly in PLOGI 4549 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
@@ -4861,43 +4559,15 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4861} 4559}
4862 4560
4863void 4561void
4864bfa_rport_offline(struct bfa_rport_s *rport)
4865{
4866 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
4867}
4868
4869void
4870bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) 4562bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4871{ 4563{
4872 bfa_assert(speed != 0); 4564 WARN_ON(speed == 0);
4873 bfa_assert(speed != BFA_PORT_SPEED_AUTO); 4565 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4874 4566
4875 rport->rport_info.speed = speed; 4567 rport->rport_info.speed = speed;
4876 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); 4568 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4877} 4569}
4878 4570
4879void
4880bfa_rport_get_stats(struct bfa_rport_s *rport,
4881 struct bfa_rport_hal_stats_s *stats)
4882{
4883 *stats = rport->stats;
4884}
4885
4886void
4887bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
4888 struct bfa_rport_qos_attr_s *qos_attr)
4889{
4890 qos_attr->qos_priority = rport->qos_attr.qos_priority;
4891 qos_attr->qos_flow_id = be32_to_cpu(rport->qos_attr.qos_flow_id);
4892
4893}
4894
4895void
4896bfa_rport_clear_stats(struct bfa_rport_s *rport)
4897{
4898 memset(&rport->stats, 0, sizeof(rport->stats));
4899}
4900
4901 4571
4902/* 4572/*
4903 * SGPG related functions 4573 * SGPG related functions
@@ -4952,7 +4622,7 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4952 sgpg_pa.pa = mod->sgpg_arr_pa; 4622 sgpg_pa.pa = mod->sgpg_arr_pa;
4953 mod->free_sgpgs = mod->num_sgpgs; 4623 mod->free_sgpgs = mod->num_sgpgs;
4954 4624
4955 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1))); 4625 WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1));
4956 4626
4957 for (i = 0; i < mod->num_sgpgs; i++) { 4627 for (i = 0; i < mod->num_sgpgs; i++) {
4958 memset(hsgpg, 0, sizeof(*hsgpg)); 4628 memset(hsgpg, 0, sizeof(*hsgpg));
@@ -4993,12 +4663,6 @@ bfa_sgpg_iocdisable(struct bfa_s *bfa)
4993{ 4663{
4994} 4664}
4995 4665
4996
4997
4998/*
4999 * hal_sgpg_public BFA SGPG public functions
5000 */
5001
5002bfa_status_t 4666bfa_status_t
5003bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) 4667bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5004{ 4668{
@@ -5006,14 +4670,12 @@ bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5006 struct bfa_sgpg_s *hsgpg; 4670 struct bfa_sgpg_s *hsgpg;
5007 int i; 4671 int i;
5008 4672
5009 bfa_trc_fp(bfa, nsgpgs);
5010
5011 if (mod->free_sgpgs < nsgpgs) 4673 if (mod->free_sgpgs < nsgpgs)
5012 return BFA_STATUS_ENOMEM; 4674 return BFA_STATUS_ENOMEM;
5013 4675
5014 for (i = 0; i < nsgpgs; i++) { 4676 for (i = 0; i < nsgpgs; i++) {
5015 bfa_q_deq(&mod->sgpg_q, &hsgpg); 4677 bfa_q_deq(&mod->sgpg_q, &hsgpg);
5016 bfa_assert(hsgpg); 4678 WARN_ON(!hsgpg);
5017 list_add_tail(&hsgpg->qe, sgpg_q); 4679 list_add_tail(&hsgpg->qe, sgpg_q);
5018 } 4680 }
5019 4681
@@ -5027,10 +4689,8 @@ bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5027 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4689 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5028 struct bfa_sgpg_wqe_s *wqe; 4690 struct bfa_sgpg_wqe_s *wqe;
5029 4691
5030 bfa_trc_fp(bfa, nsgpg);
5031
5032 mod->free_sgpgs += nsgpg; 4692 mod->free_sgpgs += nsgpg;
5033 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs); 4693 WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5034 4694
5035 list_splice_tail_init(sgpg_q, &mod->sgpg_q); 4695 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5036 4696
@@ -5060,8 +4720,8 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5060{ 4720{
5061 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4721 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5062 4722
5063 bfa_assert(nsgpg > 0); 4723 WARN_ON(nsgpg <= 0);
5064 bfa_assert(nsgpg > mod->free_sgpgs); 4724 WARN_ON(nsgpg <= mod->free_sgpgs);
5065 4725
5066 wqe->nsgpg_total = wqe->nsgpg = nsgpg; 4726 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5067 4727
@@ -5072,7 +4732,7 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5072 /* 4732 /*
5073 * no one else is waiting for SGPG 4733 * no one else is waiting for SGPG
5074 */ 4734 */
5075 bfa_assert(list_empty(&mod->sgpg_wait_q)); 4735 WARN_ON(!list_empty(&mod->sgpg_wait_q));
5076 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); 4736 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5077 wqe->nsgpg -= mod->free_sgpgs; 4737 wqe->nsgpg -= mod->free_sgpgs;
5078 mod->free_sgpgs = 0; 4738 mod->free_sgpgs = 0;
@@ -5086,7 +4746,7 @@ bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5086{ 4746{
5087 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4747 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5088 4748
5089 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); 4749 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5090 list_del(&wqe->qe); 4750 list_del(&wqe->qe);
5091 4751
5092 if (wqe->nsgpg_total != wqe->nsgpg) 4752 if (wqe->nsgpg_total != wqe->nsgpg)
@@ -5318,7 +4978,7 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5318 uf->data_ptr = buf; 4978 uf->data_ptr = buf;
5319 uf->data_len = m->xfr_len; 4979 uf->data_len = m->xfr_len;
5320 4980
5321 bfa_assert(uf->data_len >= sizeof(struct fchs_s)); 4981 WARN_ON(uf->data_len < sizeof(struct fchs_s));
5322 4982
5323 if (uf->data_len == sizeof(struct fchs_s)) { 4983 if (uf->data_len == sizeof(struct fchs_s)) {
5324 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, 4984 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
@@ -5361,12 +5021,6 @@ bfa_uf_start(struct bfa_s *bfa)
5361 bfa_uf_post_all(BFA_UF_MOD(bfa)); 5021 bfa_uf_post_all(BFA_UF_MOD(bfa));
5362} 5022}
5363 5023
5364
5365
5366/*
5367 * hal_uf_api
5368 */
5369
5370/* 5024/*
5371 * Register handler for all unsolicted recieve frames. 5025 * Register handler for all unsolicted recieve frames.
5372 * 5026 *
@@ -5414,7 +5068,7 @@ bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5414 5068
5415 default: 5069 default:
5416 bfa_trc(bfa, msg->mhdr.msg_id); 5070 bfa_trc(bfa, msg->mhdr.msg_id);
5417 bfa_assert(0); 5071 WARN_ON(1);
5418 } 5072 }
5419} 5073}
5420 5074
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index e2349d5cdb93..331ad992a581 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -220,6 +220,18 @@ void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
220/* 220/*
221 * RPORT related defines 221 * RPORT related defines
222 */ 222 */
223enum bfa_rport_event {
224 BFA_RPORT_SM_CREATE = 1, /* rport create event */
225 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
226 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
227 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
228 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
229 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
230 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
231 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
232 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
233};
234
223#define BFA_RPORT_MIN 4 235#define BFA_RPORT_MIN 4
224 236
225struct bfa_rport_mod_s { 237struct bfa_rport_mod_s {
@@ -432,6 +444,7 @@ struct bfa_fcport_s {
432 u8 myalpa; /* my ALPA in LOOP topology */ 444 u8 myalpa; /* my ALPA in LOOP topology */
433 u8 rsvd[3]; 445 u8 rsvd[3];
434 struct bfa_port_cfg_s cfg; /* current port configuration */ 446 struct bfa_port_cfg_s cfg; /* current port configuration */
447 bfa_boolean_t use_flash_cfg; /* get port cfg from flash */
435 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 448 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
436 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ 449 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
437 struct bfa_reqq_wait_s reqq_wait; 450 struct bfa_reqq_wait_s reqq_wait;
@@ -500,30 +513,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
500 void (*event_cbfn) (void *cbarg, 513 void (*event_cbfn) (void *cbarg,
501 enum bfa_port_linkstate event), void *event_cbarg); 514 enum bfa_port_linkstate event), void *event_cbarg);
502bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); 515bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
503void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
504void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
505bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
506 enum bfa_port_speed speed);
507enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); 516enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
508 517
509void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); 518void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
510void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
511void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
512 bfa_boolean_t link_e2e_beacon);
513void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
514 struct bfa_qos_attr_s *qos_attr);
515void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
516 struct bfa_qos_vc_attr_s *qos_vc_attr);
517bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
518 union bfa_fcport_stats_u *stats,
519 bfa_cb_port_t cbfn, void *cbarg);
520bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
521 void *cbarg);
522bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
523 union bfa_fcport_stats_u *stats,
524 bfa_cb_port_t cbfn, void *cbarg);
525bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
526 void *cbarg);
527bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); 519bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
528bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); 520bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
529bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, 521bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
@@ -537,14 +529,9 @@ bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
537 * bfa rport API functions 529 * bfa rport API functions
538 */ 530 */
539struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv); 531struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
540void bfa_rport_delete(struct bfa_rport_s *rport);
541void bfa_rport_online(struct bfa_rport_s *rport, 532void bfa_rport_online(struct bfa_rport_s *rport,
542 struct bfa_rport_info_s *rport_info); 533 struct bfa_rport_info_s *rport_info);
543void bfa_rport_offline(struct bfa_rport_s *rport);
544void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed); 534void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
545void bfa_rport_get_stats(struct bfa_rport_s *rport,
546 struct bfa_rport_hal_stats_s *stats);
547void bfa_rport_clear_stats(struct bfa_rport_s *rport);
548void bfa_cb_rport_online(void *rport); 535void bfa_cb_rport_online(void *rport);
549void bfa_cb_rport_offline(void *rport); 536void bfa_cb_rport_offline(void *rport);
550void bfa_cb_rport_qos_scn_flowid(void *rport, 537void bfa_cb_rport_qos_scn_flowid(void *rport,
@@ -553,8 +540,6 @@ void bfa_cb_rport_qos_scn_flowid(void *rport,
553void bfa_cb_rport_qos_scn_prio(void *rport, 540void bfa_cb_rport_qos_scn_prio(void *rport,
554 struct bfa_rport_qos_attr_s old_qos_attr, 541 struct bfa_rport_qos_attr_s old_qos_attr,
555 struct bfa_rport_qos_attr_s new_qos_attr); 542 struct bfa_rport_qos_attr_s new_qos_attr);
556void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
557 struct bfa_rport_qos_attr_s *qos_attr);
558 543
559/* 544/*
560 * bfa fcxp API functions 545 * bfa fcxp API functions
@@ -619,38 +604,18 @@ void bfa_uf_free(struct bfa_uf_s *uf);
619u32 bfa_lps_get_max_vport(struct bfa_s *bfa); 604u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
620struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); 605struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
621void bfa_lps_delete(struct bfa_lps_s *lps); 606void bfa_lps_delete(struct bfa_lps_s *lps);
622void bfa_lps_discard(struct bfa_lps_s *lps);
623void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, 607void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
624 u16 pdusz, wwn_t pwwn, wwn_t nwwn, 608 u16 pdusz, wwn_t pwwn, wwn_t nwwn,
625 bfa_boolean_t auth_en); 609 bfa_boolean_t auth_en);
626void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, 610void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
627 wwn_t pwwn, wwn_t nwwn); 611 wwn_t pwwn, wwn_t nwwn);
628void bfa_lps_flogo(struct bfa_lps_s *lps);
629void bfa_lps_fdisclogo(struct bfa_lps_s *lps); 612void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
630u8 bfa_lps_get_tag(struct bfa_lps_s *lps); 613void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
631bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
632bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
633bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
634bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
635bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
636u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
637u32 bfa_lps_get_base_pid(struct bfa_s *bfa); 614u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
638u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); 615u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
639u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
640wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
641wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
642u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
643u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
644mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
645void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); 616void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
646void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); 617void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
647void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); 618void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
648void bfa_cb_lps_cvl_event(void *bfad, void *uarg); 619void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
649 620
650void bfa_trunk_enable_cfg(struct bfa_s *bfa);
651bfa_status_t bfa_trunk_enable(struct bfa_s *bfa);
652bfa_status_t bfa_trunk_disable(struct bfa_s *bfa);
653bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa,
654 struct bfa_trunk_attr_s *attr);
655
656#endif /* __BFA_SVC_H__ */ 621#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 6797720213b2..44524cf55d33 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -32,7 +32,6 @@
32#include "bfad_drv.h" 32#include "bfad_drv.h"
33#include "bfad_im.h" 33#include "bfad_im.h"
34#include "bfa_fcs.h" 34#include "bfa_fcs.h"
35#include "bfa_os_inc.h"
36#include "bfa_defs.h" 35#include "bfa_defs.h"
37#include "bfa.h" 36#include "bfa.h"
38 37
@@ -61,12 +60,12 @@ int msix_disable_cb = 0, msix_disable_ct = 0;
61u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; 60u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
62u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; 61u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
63 62
64const char *msix_name_ct[] = { 63static const char *msix_name_ct[] = {
65 "cpe0", "cpe1", "cpe2", "cpe3", 64 "cpe0", "cpe1", "cpe2", "cpe3",
66 "rme0", "rme1", "rme2", "rme3", 65 "rme0", "rme1", "rme2", "rme3",
67 "ctrl" }; 66 "ctrl" };
68 67
69const char *msix_name_cb[] = { 68static const char *msix_name_cb[] = {
70 "cpe0", "cpe1", "cpe2", "cpe3", 69 "cpe0", "cpe1", "cpe2", "cpe3",
71 "rme0", "rme1", "rme2", "rme3", 70 "rme0", "rme1", "rme2", "rme3",
72 "eemc", "elpu0", "elpu1", "epss", "mlpu" }; 71 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
@@ -206,7 +205,7 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
206 } 205 }
207 206
208 spin_lock_irqsave(&bfad->bfad_lock, flags); 207 spin_lock_irqsave(&bfad->bfad_lock, flags);
209 bfa_init(&bfad->bfa); 208 bfa_iocfc_init(&bfad->bfa);
210 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 209 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
211 210
212 /* Set up interrupt handler for each vectors */ 211 /* Set up interrupt handler for each vectors */
@@ -533,7 +532,7 @@ bfad_hal_mem_release(struct bfad_s *bfad)
533 (dma_addr_t) meminfo_elem->dma); 532 (dma_addr_t) meminfo_elem->dma);
534 break; 533 break;
535 default: 534 default:
536 bfa_assert(0); 535 WARN_ON(1);
537 break; 536 break;
538 } 537 }
539 } 538 }
@@ -725,7 +724,7 @@ bfad_bfa_tmo(unsigned long data)
725 724
726 spin_lock_irqsave(&bfad->bfad_lock, flags); 725 spin_lock_irqsave(&bfad->bfad_lock, flags);
727 726
728 bfa_timer_tick(&bfad->bfa); 727 bfa_timer_beat(&bfad->bfa.timer_mod);
729 728
730 bfa_comp_deq(&bfad->bfa, &doneq); 729 bfa_comp_deq(&bfad->bfa, &doneq);
731 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 730 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -882,8 +881,8 @@ bfad_drv_init(struct bfad_s *bfad)
882 goto out_hal_mem_alloc_failure; 881 goto out_hal_mem_alloc_failure;
883 } 882 }
884 883
885 bfa_init_trc(&bfad->bfa, bfad->trcmod); 884 bfad->bfa.trcmod = bfad->trcmod;
886 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 885 bfad->bfa.plog = &bfad->plog_buf;
887 bfa_plog_init(&bfad->plog_buf); 886 bfa_plog_init(&bfad->plog_buf);
888 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 887 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
889 0, "Driver Attach"); 888 0, "Driver Attach");
@@ -893,9 +892,9 @@ bfad_drv_init(struct bfad_s *bfad)
893 892
894 /* FCS INIT */ 893 /* FCS INIT */
895 spin_lock_irqsave(&bfad->bfad_lock, flags); 894 spin_lock_irqsave(&bfad->bfad_lock, flags);
896 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 895 bfad->bfa_fcs.trcmod = bfad->trcmod;
897 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 896 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
898 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); 897 bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
899 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 898 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
900 899
901 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 900 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
@@ -913,7 +912,7 @@ bfad_drv_uninit(struct bfad_s *bfad)
913 912
914 spin_lock_irqsave(&bfad->bfad_lock, flags); 913 spin_lock_irqsave(&bfad->bfad_lock, flags);
915 init_completion(&bfad->comp); 914 init_completion(&bfad->comp);
916 bfa_stop(&bfad->bfa); 915 bfa_iocfc_stop(&bfad->bfa);
917 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 916 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
918 wait_for_completion(&bfad->comp); 917 wait_for_completion(&bfad->comp);
919 918
@@ -932,8 +931,8 @@ bfad_drv_start(struct bfad_s *bfad)
932 unsigned long flags; 931 unsigned long flags;
933 932
934 spin_lock_irqsave(&bfad->bfad_lock, flags); 933 spin_lock_irqsave(&bfad->bfad_lock, flags);
935 bfa_start(&bfad->bfa); 934 bfa_iocfc_start(&bfad->bfa);
936 bfa_fcs_start(&bfad->bfa_fcs); 935 bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
937 bfad->bfad_flags |= BFAD_HAL_START_DONE; 936 bfad->bfad_flags |= BFAD_HAL_START_DONE;
938 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 937 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
939 938
@@ -963,7 +962,7 @@ bfad_stop(struct bfad_s *bfad)
963 962
964 spin_lock_irqsave(&bfad->bfad_lock, flags); 963 spin_lock_irqsave(&bfad->bfad_lock, flags);
965 init_completion(&bfad->comp); 964 init_completion(&bfad->comp);
966 bfa_stop(&bfad->bfa); 965 bfa_iocfc_stop(&bfad->bfa);
967 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 966 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
968 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 967 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
969 wait_for_completion(&bfad->comp); 968 wait_for_completion(&bfad->comp);
@@ -1102,15 +1101,15 @@ bfad_start_ops(struct bfad_s *bfad) {
1102 1101
1103 /* 1102 /*
1104 * If bfa_linkup_delay is set to -1 default; try to retrive the 1103 * If bfa_linkup_delay is set to -1 default; try to retrive the
1105 * value using the bfad_os_get_linkup_delay(); else use the 1104 * value using the bfad_get_linkup_delay(); else use the
1106 * passed in module param value as the bfa_linkup_delay. 1105 * passed in module param value as the bfa_linkup_delay.
1107 */ 1106 */
1108 if (bfa_linkup_delay < 0) { 1107 if (bfa_linkup_delay < 0) {
1109 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 1108 bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1110 bfad_os_rport_online_wait(bfad); 1109 bfad_rport_online_wait(bfad);
1111 bfa_linkup_delay = -1; 1110 bfa_linkup_delay = -1;
1112 } else 1111 } else
1113 bfad_os_rport_online_wait(bfad); 1112 bfad_rport_online_wait(bfad);
1114 1113
1115 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); 1114 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1116 1115
@@ -1167,7 +1166,6 @@ bfad_intx(int irq, void *dev_id)
1167 spin_lock_irqsave(&bfad->bfad_lock, flags); 1166 spin_lock_irqsave(&bfad->bfad_lock, flags);
1168 bfa_comp_free(&bfad->bfa, &doneq); 1167 bfa_comp_free(&bfad->bfa, &doneq);
1169 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1168 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1170 bfa_trc_fp(bfad, irq);
1171 } 1169 }
1172 1170
1173 return IRQ_HANDLED; 1171 return IRQ_HANDLED;
@@ -1524,7 +1522,7 @@ bfad_init(void)
1524 if (strcmp(FCPI_NAME, " fcpim") == 0) 1522 if (strcmp(FCPI_NAME, " fcpim") == 0)
1525 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; 1523 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1526 1524
1527 bfa_ioc_auto_recover(ioc_auto_recover); 1525 bfa_auto_recover = ioc_auto_recover;
1528 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1526 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1529 1527
1530 error = pci_register_driver(&bfad_pci_driver); 1528 error = pci_register_driver(&bfad_pci_driver);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index ed9fff440b5c..a94ea4235433 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -25,7 +25,7 @@
25/* 25/*
26 * FC transport template entry, get SCSI target port ID. 26 * FC transport template entry, get SCSI target port ID.
27 */ 27 */
28void 28static void
29bfad_im_get_starget_port_id(struct scsi_target *starget) 29bfad_im_get_starget_port_id(struct scsi_target *starget)
30{ 30{
31 struct Scsi_Host *shost; 31 struct Scsi_Host *shost;
@@ -40,7 +40,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
40 bfad = im_port->bfad; 40 bfad = im_port->bfad;
41 spin_lock_irqsave(&bfad->bfad_lock, flags); 41 spin_lock_irqsave(&bfad->bfad_lock, flags);
42 42
43 itnim = bfad_os_get_itnim(im_port, starget->id); 43 itnim = bfad_get_itnim(im_port, starget->id);
44 if (itnim) 44 if (itnim)
45 fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); 45 fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
46 46
@@ -51,7 +51,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
51/* 51/*
52 * FC transport template entry, get SCSI target nwwn. 52 * FC transport template entry, get SCSI target nwwn.
53 */ 53 */
54void 54static void
55bfad_im_get_starget_node_name(struct scsi_target *starget) 55bfad_im_get_starget_node_name(struct scsi_target *starget)
56{ 56{
57 struct Scsi_Host *shost; 57 struct Scsi_Host *shost;
@@ -66,7 +66,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
66 bfad = im_port->bfad; 66 bfad = im_port->bfad;
67 spin_lock_irqsave(&bfad->bfad_lock, flags); 67 spin_lock_irqsave(&bfad->bfad_lock, flags);
68 68
69 itnim = bfad_os_get_itnim(im_port, starget->id); 69 itnim = bfad_get_itnim(im_port, starget->id);
70 if (itnim) 70 if (itnim)
71 node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); 71 node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
72 72
@@ -77,7 +77,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
77/* 77/*
78 * FC transport template entry, get SCSI target pwwn. 78 * FC transport template entry, get SCSI target pwwn.
79 */ 79 */
80void 80static void
81bfad_im_get_starget_port_name(struct scsi_target *starget) 81bfad_im_get_starget_port_name(struct scsi_target *starget)
82{ 82{
83 struct Scsi_Host *shost; 83 struct Scsi_Host *shost;
@@ -92,7 +92,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
92 bfad = im_port->bfad; 92 bfad = im_port->bfad;
93 spin_lock_irqsave(&bfad->bfad_lock, flags); 93 spin_lock_irqsave(&bfad->bfad_lock, flags);
94 94
95 itnim = bfad_os_get_itnim(im_port, starget->id); 95 itnim = bfad_get_itnim(im_port, starget->id);
96 if (itnim) 96 if (itnim)
97 port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 97 port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
98 98
@@ -103,7 +103,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
103/* 103/*
104 * FC transport template entry, get SCSI host port ID. 104 * FC transport template entry, get SCSI host port ID.
105 */ 105 */
106void 106static void
107bfad_im_get_host_port_id(struct Scsi_Host *shost) 107bfad_im_get_host_port_id(struct Scsi_Host *shost)
108{ 108{
109 struct bfad_im_port_s *im_port = 109 struct bfad_im_port_s *im_port =
@@ -111,7 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
111 struct bfad_port_s *port = im_port->port; 111 struct bfad_port_s *port = im_port->port;
112 112
113 fc_host_port_id(shost) = 113 fc_host_port_id(shost) =
114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); 114 bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
115} 115}
116 116
117/* 117/*
@@ -487,7 +487,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
487 wait_for_completion(vport->comp_del); 487 wait_for_completion(vport->comp_del);
488 488
489free_scsi_host: 489free_scsi_host:
490 bfad_os_scsi_host_free(bfad, im_port); 490 bfad_scsi_host_free(bfad, im_port);
491 491
492 kfree(vport); 492 kfree(vport);
493 493
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 1fedeeb4ac1f..c66e32eced7b 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -90,7 +90,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
90 memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len); 90 memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
91 91
92 spin_lock_irqsave(&bfad->bfad_lock, flags); 92 spin_lock_irqsave(&bfad->bfad_lock, flags);
93 rc = bfa_debug_fwtrc(&bfad->bfa, 93 rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
94 fw_debug->debug_buffer, 94 fw_debug->debug_buffer,
95 &fw_debug->buffer_len); 95 &fw_debug->buffer_len);
96 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 96 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -134,7 +134,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
134 memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len); 134 memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
135 135
136 spin_lock_irqsave(&bfad->bfad_lock, flags); 136 spin_lock_irqsave(&bfad->bfad_lock, flags);
137 rc = bfa_debug_fwsave(&bfad->bfa, 137 rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
138 fw_debug->debug_buffer, 138 fw_debug->debug_buffer,
139 &fw_debug->buffer_len); 139 &fw_debug->buffer_len);
140 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 140 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -208,7 +208,7 @@ bfad_debugfs_read(struct file *file, char __user *buf,
208 if (!debug || !debug->debug_buffer) 208 if (!debug || !debug->debug_buffer)
209 return 0; 209 return 0;
210 210
211 return memory_read_from_buffer(buf, nbytes, pos, 211 return simple_read_from_buffer(buf, nbytes, pos,
212 debug->debug_buffer, debug->buffer_len); 212 debug->debug_buffer, debug->buffer_len);
213} 213}
214 214
@@ -254,7 +254,7 @@ bfad_debugfs_read_regrd(struct file *file, char __user *buf,
254 if (!bfad->regdata) 254 if (!bfad->regdata)
255 return 0; 255 return 0;
256 256
257 rc = memory_read_from_buffer(buf, nbytes, pos, 257 rc = simple_read_from_buffer(buf, nbytes, pos,
258 bfad->regdata, bfad->reglen); 258 bfad->regdata, bfad->reglen);
259 259
260 if ((*pos + nbytes) >= bfad->reglen) { 260 if ((*pos + nbytes) >= bfad->reglen) {
@@ -279,15 +279,31 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
279 u32 *regbuf; 279 u32 *regbuf;
280 void __iomem *rb, *reg_addr; 280 void __iomem *rb, *reg_addr;
281 unsigned long flags; 281 unsigned long flags;
282 void *kern_buf;
282 283
283 rc = sscanf(buf, "%x:%x", &addr, &len); 284 kern_buf = kzalloc(nbytes, GFP_KERNEL);
285
286 if (!kern_buf) {
287 printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
288 bfad->inst_no);
289 return -ENOMEM;
290 }
291
292 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
293 kfree(kern_buf);
294 return -ENOMEM;
295 }
296
297 rc = sscanf(kern_buf, "%x:%x", &addr, &len);
284 if (rc < 2) { 298 if (rc < 2) {
285 printk(KERN_INFO 299 printk(KERN_INFO
286 "bfad[%d]: %s failed to read user buf\n", 300 "bfad[%d]: %s failed to read user buf\n",
287 bfad->inst_no, __func__); 301 bfad->inst_no, __func__);
302 kfree(kern_buf);
288 return -EINVAL; 303 return -EINVAL;
289 } 304 }
290 305
306 kfree(kern_buf);
291 kfree(bfad->regdata); 307 kfree(bfad->regdata);
292 bfad->regdata = NULL; 308 bfad->regdata = NULL;
293 bfad->reglen = 0; 309 bfad->reglen = 0;
@@ -339,14 +355,30 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
339 int addr, val, rc; 355 int addr, val, rc;
340 void __iomem *reg_addr; 356 void __iomem *reg_addr;
341 unsigned long flags; 357 unsigned long flags;
358 void *kern_buf;
359
360 kern_buf = kzalloc(nbytes, GFP_KERNEL);
361
362 if (!kern_buf) {
363 printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
364 bfad->inst_no);
365 return -ENOMEM;
366 }
367
368 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
369 kfree(kern_buf);
370 return -ENOMEM;
371 }
342 372
343 rc = sscanf(buf, "%x:%x", &addr, &val); 373 rc = sscanf(kern_buf, "%x:%x", &addr, &val);
344 if (rc < 2) { 374 if (rc < 2) {
345 printk(KERN_INFO 375 printk(KERN_INFO
346 "bfad[%d]: %s failed to read user buf\n", 376 "bfad[%d]: %s failed to read user buf\n",
347 bfad->inst_no, __func__); 377 bfad->inst_no, __func__);
378 kfree(kern_buf);
348 return -EINVAL; 379 return -EINVAL;
349 } 380 }
381 kfree(kern_buf);
350 382
351 addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */ 383 addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
352 384
@@ -359,7 +391,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
359 return -EINVAL; 391 return -EINVAL;
360 } 392 }
361 393
362 reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr); 394 reg_addr = (bfa_ioc_bar0(ioc)) + addr;
363 spin_lock_irqsave(&bfad->bfad_lock, flags); 395 spin_lock_irqsave(&bfad->bfad_lock, flags);
364 writel(val, reg_addr); 396 writel(val, reg_addr);
365 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 397 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index d5ce2349ac59..7f9ea90254cd 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -26,7 +26,23 @@
26#ifndef __BFAD_DRV_H__ 26#ifndef __BFAD_DRV_H__
27#define __BFAD_DRV_H__ 27#define __BFAD_DRV_H__
28 28
29#include "bfa_os_inc.h" 29#include <linux/types.h>
30#include <linux/version.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/idr.h>
34#include <linux/interrupt.h>
35#include <linux/cdev.h>
36#include <linux/fs.h>
37#include <linux/delay.h>
38#include <linux/vmalloc.h>
39#include <linux/workqueue.h>
40#include <linux/bitops.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_transport_fc.h>
45#include <scsi/scsi_transport.h>
30 46
31#include "bfa_modules.h" 47#include "bfa_modules.h"
32#include "bfa_fcs.h" 48#include "bfa_fcs.h"
@@ -39,7 +55,7 @@
39#ifdef BFA_DRIVER_VERSION 55#ifdef BFA_DRIVER_VERSION
40#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 56#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
41#else 57#else
42#define BFAD_DRIVER_VERSION "2.3.2.0" 58#define BFAD_DRIVER_VERSION "2.3.2.3"
43#endif 59#endif
44 60
45#define BFAD_PROTO_NAME FCPI_NAME 61#define BFAD_PROTO_NAME FCPI_NAME
@@ -263,28 +279,21 @@ struct bfad_hal_comp {
263 */ 279 */
264#define nextLowerInt(x) \ 280#define nextLowerInt(x) \
265do { \ 281do { \
266 int i; \ 282 int __i; \
267 (*x)--; \ 283 (*x)--; \
268 for (i = 1; i < (sizeof(int)*8); i <<= 1) \ 284 for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
269 (*x) = (*x) | (*x) >> i; \ 285 (*x) = (*x) | (*x) >> __i; \
270 (*x)++; \ 286 (*x)++; \
271 (*x) = (*x) >> 1; \ 287 (*x) = (*x) >> 1; \
272} while (0) 288} while (0)
273 289
274 290
275#define list_remove_head(list, entry, type, member) \ 291#define BFA_LOG(level, bfad, mask, fmt, arg...) \
276do { \ 292do { \
277 entry = NULL; \ 293 if (((mask) == 4) || (level[1] <= '4')) \
278 if (!list_empty(list)) { \ 294 dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \
279 entry = list_entry((list)->next, type, member); \
280 list_del_init(&entry->member); \
281 } \
282} while (0) 295} while (0)
283 296
284#define list_get_first(list, type, member) \
285((list_empty(list)) ? NULL : \
286 list_entry((list)->next, type, member))
287
288bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 297bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
289 struct bfa_lport_cfg_s *port_cfg, 298 struct bfa_lport_cfg_s *port_cfg,
290 struct device *dev); 299 struct device *dev);
@@ -316,8 +325,8 @@ void bfad_debugfs_exit(struct bfad_port_s *port);
316 325
317void bfad_pci_remove(struct pci_dev *pdev); 326void bfad_pci_remove(struct pci_dev *pdev);
318int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); 327int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
319void bfad_os_rport_online_wait(struct bfad_s *bfad); 328void bfad_rport_online_wait(struct bfad_s *bfad);
320int bfad_os_get_linkup_delay(struct bfad_s *bfad); 329int bfad_get_linkup_delay(struct bfad_s *bfad);
321int bfad_install_msix_handler(struct bfad_s *bfad); 330int bfad_install_msix_handler(struct bfad_s *bfad);
322 331
323extern struct idr bfad_im_port_index; 332extern struct idr bfad_im_port_index;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index fbad5e9b2402..c2b36179e8e8 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -21,7 +21,6 @@
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h" 23#include "bfad_im.h"
24#include "bfa_cb_ioim.h"
25#include "bfa_fcs.h" 24#include "bfa_fcs.h"
26 25
27BFA_TRC_FILE(LDRV, IM); 26BFA_TRC_FILE(LDRV, IM);
@@ -93,10 +92,10 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
93 if (!cmnd->result && itnim && 92 if (!cmnd->result && itnim &&
94 (bfa_lun_queue_depth > cmnd->device->queue_depth)) { 93 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
95 /* Queue depth adjustment for good status completion */ 94 /* Queue depth adjustment for good status completion */
96 bfad_os_ramp_up_qdepth(itnim, cmnd->device); 95 bfad_ramp_up_qdepth(itnim, cmnd->device);
97 } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { 96 } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
98 /* qfull handling */ 97 /* qfull handling */
99 bfad_os_handle_qfull(itnim, cmnd->device); 98 bfad_handle_qfull(itnim, cmnd->device);
100 } 99 }
101 } 100 }
102 101
@@ -124,7 +123,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
124 if (itnim_data) { 123 if (itnim_data) {
125 itnim = itnim_data->itnim; 124 itnim = itnim_data->itnim;
126 if (itnim) 125 if (itnim)
127 bfad_os_ramp_up_qdepth(itnim, cmnd->device); 126 bfad_ramp_up_qdepth(itnim, cmnd->device);
128 } 127 }
129 } 128 }
130 129
@@ -183,7 +182,7 @@ bfad_im_info(struct Scsi_Host *shost)
183 bfa_get_adapter_model(bfa, model); 182 bfa_get_adapter_model(bfa, model);
184 183
185 memset(bfa_buf, 0, sizeof(bfa_buf)); 184 memset(bfa_buf, 0, sizeof(bfa_buf));
186 if (ioc->ctdev) 185 if (ioc->ctdev && !ioc->fcmode)
187 snprintf(bfa_buf, sizeof(bfa_buf), 186 snprintf(bfa_buf, sizeof(bfa_buf),
188 "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s", 187 "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
189 model, bfad->pci_name, BFAD_DRIVER_VERSION); 188 model, bfad->pci_name, BFAD_DRIVER_VERSION);
@@ -258,6 +257,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
258 struct bfa_tskim_s *tskim; 257 struct bfa_tskim_s *tskim;
259 struct bfa_itnim_s *bfa_itnim; 258 struct bfa_itnim_s *bfa_itnim;
260 bfa_status_t rc = BFA_STATUS_OK; 259 bfa_status_t rc = BFA_STATUS_OK;
260 struct scsi_lun scsilun;
261 261
262 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 262 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
263 if (!tskim) { 263 if (!tskim) {
@@ -274,7 +274,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
274 cmnd->host_scribble = NULL; 274 cmnd->host_scribble = NULL;
275 cmnd->SCp.Status = 0; 275 cmnd->SCp.Status = 0;
276 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); 276 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
277 bfa_tskim_start(tskim, bfa_itnim, (lun_t)0, 277 memset(&scsilun, 0, sizeof(scsilun));
278 bfa_tskim_start(tskim, bfa_itnim, scsilun,
278 FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); 279 FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
279out: 280out:
280 return rc; 281 return rc;
@@ -301,6 +302,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
301 int rc = SUCCESS; 302 int rc = SUCCESS;
302 unsigned long flags; 303 unsigned long flags;
303 enum bfi_tskim_status task_status; 304 enum bfi_tskim_status task_status;
305 struct scsi_lun scsilun;
304 306
305 spin_lock_irqsave(&bfad->bfad_lock, flags); 307 spin_lock_irqsave(&bfad->bfad_lock, flags);
306 itnim = itnim_data->itnim; 308 itnim = itnim_data->itnim;
@@ -327,8 +329,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
327 cmnd->SCp.ptr = (char *)&wq; 329 cmnd->SCp.ptr = (char *)&wq;
328 cmnd->SCp.Status = 0; 330 cmnd->SCp.Status = 0;
329 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); 331 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
330 bfa_tskim_start(tskim, bfa_itnim, 332 int_to_scsilun(cmnd->device->lun, &scsilun);
331 bfad_int_to_lun(cmnd->device->lun), 333 bfa_tskim_start(tskim, bfa_itnim, scsilun,
332 FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); 334 FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
333 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 335 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
334 336
@@ -364,7 +366,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
364 366
365 spin_lock_irqsave(&bfad->bfad_lock, flags); 367 spin_lock_irqsave(&bfad->bfad_lock, flags);
366 for (i = 0; i < MAX_FCP_TARGET; i++) { 368 for (i = 0; i < MAX_FCP_TARGET; i++) {
367 itnim = bfad_os_get_itnim(im_port, i); 369 itnim = bfad_get_itnim(im_port, i);
368 if (itnim) { 370 if (itnim) {
369 cmnd->SCp.ptr = (char *)&wq; 371 cmnd->SCp.ptr = (char *)&wq;
370 rc = bfad_im_target_reset_send(bfad, cmnd, itnim); 372 rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
@@ -447,7 +449,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
447 struct bfad_im_s *im = itnim_drv->im; 449 struct bfad_im_s *im = itnim_drv->im;
448 450
449 /* online to free state transtion should not happen */ 451 /* online to free state transtion should not happen */
450 bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE); 452 WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE);
451 453
452 itnim_drv->queue_work = 1; 454 itnim_drv->queue_work = 1;
453 /* offline request is not yet done, use the same request to free */ 455 /* offline request is not yet done, use the same request to free */
@@ -545,7 +547,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
545 547
546 mutex_unlock(&bfad_mutex); 548 mutex_unlock(&bfad_mutex);
547 549
548 im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad); 550 im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
549 if (!im_port->shost) { 551 if (!im_port->shost) {
550 error = 1; 552 error = 1;
551 goto out_free_idr; 553 goto out_free_idr;
@@ -571,7 +573,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
571 } 573 }
572 574
573 /* setup host fixed attribute if the lk supports */ 575 /* setup host fixed attribute if the lk supports */
574 bfad_os_fc_host_init(im_port); 576 bfad_fc_host_init(im_port);
575 577
576 return 0; 578 return 0;
577 579
@@ -662,7 +664,7 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
662 } 664 }
663 665
664 /* the itnim_mapped_list must be empty at this time */ 666 /* the itnim_mapped_list must be empty at this time */
665 bfa_assert(list_empty(&im_port->itnim_mapped_list)); 667 WARN_ON(!list_empty(&im_port->itnim_mapped_list));
666 668
667 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 669 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
668} 670}
@@ -682,7 +684,7 @@ bfad_im_probe(struct bfad_s *bfad)
682 bfad->im = im; 684 bfad->im = im;
683 im->bfad = bfad; 685 im->bfad = bfad;
684 686
685 if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) { 687 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
686 kfree(im); 688 kfree(im);
687 rc = BFA_STATUS_FAILED; 689 rc = BFA_STATUS_FAILED;
688 } 690 }
@@ -695,14 +697,14 @@ void
695bfad_im_probe_undo(struct bfad_s *bfad) 697bfad_im_probe_undo(struct bfad_s *bfad)
696{ 698{
697 if (bfad->im) { 699 if (bfad->im) {
698 bfad_os_destroy_workq(bfad->im); 700 bfad_destroy_workq(bfad->im);
699 kfree(bfad->im); 701 kfree(bfad->im);
700 bfad->im = NULL; 702 bfad->im = NULL;
701 } 703 }
702} 704}
703 705
704struct Scsi_Host * 706struct Scsi_Host *
705bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) 707bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
706{ 708{
707 struct scsi_host_template *sht; 709 struct scsi_host_template *sht;
708 710
@@ -717,7 +719,7 @@ bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
717} 719}
718 720
719void 721void
720bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 722bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
721{ 723{
722 if (!(im_port->flags & BFAD_PORT_DELETE)) 724 if (!(im_port->flags & BFAD_PORT_DELETE))
723 flush_workqueue(bfad->im->drv_workq); 725 flush_workqueue(bfad->im->drv_workq);
@@ -727,7 +729,7 @@ bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
727} 729}
728 730
729void 731void
730bfad_os_destroy_workq(struct bfad_im_s *im) 732bfad_destroy_workq(struct bfad_im_s *im)
731{ 733{
732 if (im && im->drv_workq) { 734 if (im && im->drv_workq) {
733 flush_workqueue(im->drv_workq); 735 flush_workqueue(im->drv_workq);
@@ -737,7 +739,7 @@ bfad_os_destroy_workq(struct bfad_im_s *im)
737} 739}
738 740
739bfa_status_t 741bfa_status_t
740bfad_os_thread_workq(struct bfad_s *bfad) 742bfad_thread_workq(struct bfad_s *bfad)
741{ 743{
742 struct bfad_im_s *im = bfad->im; 744 struct bfad_im_s *im = bfad->im;
743 745
@@ -841,7 +843,7 @@ bfad_im_module_exit(void)
841} 843}
842 844
843void 845void
844bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 846bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
845{ 847{
846 struct scsi_device *tmp_sdev; 848 struct scsi_device *tmp_sdev;
847 849
@@ -869,7 +871,7 @@ bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
869} 871}
870 872
871void 873void
872bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 874bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
873{ 875{
874 struct scsi_device *tmp_sdev; 876 struct scsi_device *tmp_sdev;
875 877
@@ -883,7 +885,7 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
883} 885}
884 886
885struct bfad_itnim_s * 887struct bfad_itnim_s *
886bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id) 888bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
887{ 889{
888 struct bfad_itnim_s *itnim = NULL; 890 struct bfad_itnim_s *itnim = NULL;
889 891
@@ -922,7 +924,7 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
922 if (!ioc_attr) 924 if (!ioc_attr)
923 return 0; 925 return 0;
924 926
925 bfa_get_attr(bfa, ioc_attr); 927 bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
926 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { 928 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
927 if (ioc_attr->adapter_attr.is_mezz) { 929 if (ioc_attr->adapter_attr.is_mezz) {
928 supported_speed |= FC_PORTSPEED_8GBIT | 930 supported_speed |= FC_PORTSPEED_8GBIT |
@@ -944,7 +946,7 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
944} 946}
945 947
946void 948void
947bfad_os_fc_host_init(struct bfad_im_port_s *im_port) 949bfad_fc_host_init(struct bfad_im_port_s *im_port)
948{ 950{
949 struct Scsi_Host *host = im_port->shost; 951 struct Scsi_Host *host = im_port->shost;
950 struct bfad_s *bfad = im_port->bfad; 952 struct bfad_s *bfad = im_port->bfad;
@@ -988,7 +990,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
988 rport_ids.port_name = 990 rport_ids.port_name =
989 cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 991 cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
990 rport_ids.port_id = 992 rport_ids.port_id =
991 bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); 993 bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
992 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 994 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
993 995
994 itnim->fc_rport = fc_rport = 996 itnim->fc_rport = fc_rport =
@@ -1109,7 +1111,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
1109 kfree(itnim); 1111 kfree(itnim);
1110 break; 1112 break;
1111 default: 1113 default:
1112 bfa_assert(0); 1114 WARN_ON(1);
1113 break; 1115 break;
1114 } 1116 }
1115 1117
@@ -1172,7 +1174,6 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
1172 } 1174 }
1173 1175
1174 cmnd->host_scribble = (char *)hal_io; 1176 cmnd->host_scribble = (char *)hal_io;
1175 bfa_trc_fp(bfad, hal_io->iotag);
1176 bfa_ioim_start(hal_io); 1177 bfa_ioim_start(hal_io);
1177 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1178 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1178 1179
@@ -1190,7 +1191,7 @@ out_fail_cmd:
1190static DEF_SCSI_QCMD(bfad_im_queuecommand) 1191static DEF_SCSI_QCMD(bfad_im_queuecommand)
1191 1192
1192void 1193void
1193bfad_os_rport_online_wait(struct bfad_s *bfad) 1194bfad_rport_online_wait(struct bfad_s *bfad)
1194{ 1195{
1195 int i; 1196 int i;
1196 int rport_delay = 10; 1197 int rport_delay = 10;
@@ -1218,7 +1219,7 @@ bfad_os_rport_online_wait(struct bfad_s *bfad)
1218} 1219}
1219 1220
1220int 1221int
1221bfad_os_get_linkup_delay(struct bfad_s *bfad) 1222bfad_get_linkup_delay(struct bfad_s *bfad)
1222{ 1223{
1223 u8 nwwns = 0; 1224 u8 nwwns = 0;
1224 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; 1225 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index b038c0e08921..bfee63b16fa9 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -117,17 +117,17 @@ struct bfad_im_s {
117 char drv_workq_name[KOBJ_NAME_LEN]; 117 char drv_workq_name[KOBJ_NAME_LEN];
118}; 118};
119 119
120struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, 120struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
121 struct bfad_s *); 121 struct bfad_s *);
122bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad); 122bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
123void bfad_os_destroy_workq(struct bfad_im_s *im); 123void bfad_destroy_workq(struct bfad_im_s *im);
124void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); 124void bfad_fc_host_init(struct bfad_im_port_s *im_port);
125void bfad_os_scsi_host_free(struct bfad_s *bfad, 125void bfad_scsi_host_free(struct bfad_s *bfad,
126 struct bfad_im_port_s *im_port); 126 struct bfad_im_port_s *im_port);
127void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, 127void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim,
128 struct scsi_device *sdev); 128 struct scsi_device *sdev);
129void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev); 129void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
130struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id); 130struct bfad_itnim_s *bfad_get_itnim(struct bfad_im_port_s *im_port, int id);
131 131
132extern struct scsi_host_template bfad_im_scsi_host_template; 132extern struct scsi_host_template bfad_im_scsi_host_template;
133extern struct scsi_host_template bfad_im_vport_template; 133extern struct scsi_host_template bfad_im_vport_template;
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 58796d1284b7..72b69a0c3b51 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -95,8 +95,8 @@ enum {
95 */ 95 */
96union bfi_addr_u { 96union bfi_addr_u {
97 struct { 97 struct {
98 u32 addr_lo; 98 __be32 addr_lo;
99 u32 addr_hi; 99 __be32 addr_hi;
100 } a32; 100 } a32;
101}; 101};
102 102
@@ -104,7 +104,7 @@ union bfi_addr_u {
104 * Scatter Gather Element 104 * Scatter Gather Element
105 */ 105 */
106struct bfi_sge_s { 106struct bfi_sge_s {
107#ifdef __BIGENDIAN 107#ifdef __BIG_ENDIAN
108 u32 flags:2, 108 u32 flags:2,
109 rsvd:2, 109 rsvd:2,
110 sg_len:28; 110 sg_len:28;
@@ -399,7 +399,7 @@ union bfi_ioc_i2h_msg_u {
399 */ 399 */
400struct bfi_pbc_blun_s { 400struct bfi_pbc_blun_s {
401 wwn_t tgt_pwwn; 401 wwn_t tgt_pwwn;
402 lun_t tgt_lun; 402 struct scsi_lun tgt_lun;
403}; 403};
404 404
405/* 405/*
diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
index 6f03ed382c69..39ad42b66b5b 100644
--- a/drivers/scsi/bfa/bfi_cbreg.h
+++ b/drivers/scsi/bfa/bfi_cbreg.h
@@ -208,6 +208,7 @@
208#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG 208#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
209#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG 209#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
210#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG 210#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
211#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
211 212
212#define CPE_Q_DEPTH(__n) \ 213#define CPE_Q_DEPTH(__n) \
213 (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH)) 214 (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
index 62b86a4b0e4b..fc4ce4a5a183 100644
--- a/drivers/scsi/bfa/bfi_ctreg.h
+++ b/drivers/scsi/bfa/bfi_ctreg.h
@@ -522,6 +522,7 @@ enum {
522#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG 522#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
523#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG 523#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
524#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG 524#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
525#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
525 526
526#define CPE_DEPTH_Q(__n) \ 527#define CPE_DEPTH_Q(__n) \
527 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) 528 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@@ -539,22 +540,30 @@ enum {
539 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) 540 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
540#define RME_CI_PTR_Q(__n) \ 541#define RME_CI_PTR_Q(__n) \
541 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) 542 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
542#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \ 543#define HQM_QSET_RXQ_DRBL_P0(__n) \
543 * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) 544 (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
544#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \ 545 (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
545 * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) 546#define HQM_QSET_TXQ_DRBL_P0(__n) \
546#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \ 547 (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
547 * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) 548 (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
548#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \ 549#define HQM_QSET_IB_DRBL_1_P0(__n) \
549 * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) 550 (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
550#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \ 551 (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
551 * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) 552#define HQM_QSET_IB_DRBL_2_P0(__n) \
552#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \ 553 (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
553 * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) 554 (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
554#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \ 555#define HQM_QSET_RXQ_DRBL_P1(__n) \
555 * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) 556 (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
556#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \ 557 (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
557 * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) 558#define HQM_QSET_TXQ_DRBL_P1(__n) \
559 (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
560 (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
561#define HQM_QSET_IB_DRBL_1_P1(__n) \
562 (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
563 (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
564#define HQM_QSET_IB_DRBL_2_P1(__n) \
565 (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
566 (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
558 567
559#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) 568#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
560#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) 569#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index fa9f6fb9d45b..19e888a57555 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -47,10 +47,10 @@ struct bfi_iocfc_cfg_s {
47 */ 47 */
48 union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS]; 48 union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS];
49 union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS]; 49 union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS];
50 u16 req_cq_elems[BFI_IOC_MAX_CQS]; 50 __be16 req_cq_elems[BFI_IOC_MAX_CQS];
51 union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS]; 51 union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS];
52 union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS]; 52 union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS];
53 u16 rsp_cq_elems[BFI_IOC_MAX_CQS]; 53 __be16 rsp_cq_elems[BFI_IOC_MAX_CQS];
54 54
55 union bfi_addr_u stats_addr; /* DMA-able address for stats */ 55 union bfi_addr_u stats_addr; /* DMA-able address for stats */
56 union bfi_addr_u cfgrsp_addr; /* config response dma address */ 56 union bfi_addr_u cfgrsp_addr; /* config response dma address */
@@ -102,8 +102,8 @@ struct bfi_iocfc_set_intr_req_s {
102 struct bfi_mhdr_s mh; /* common msg header */ 102 struct bfi_mhdr_s mh; /* common msg header */
103 u8 coalesce; /* enable intr coalescing */ 103 u8 coalesce; /* enable intr coalescing */
104 u8 rsvd[3]; 104 u8 rsvd[3];
105 u16 delay; /* delay timer 0..1125us */ 105 __be16 delay; /* delay timer 0..1125us */
106 u16 latency; /* latency timer 0..225us */ 106 __be16 latency; /* latency timer 0..225us */
107}; 107};
108 108
109 109
@@ -188,7 +188,8 @@ struct bfi_fcport_rsp_s {
188 struct bfi_mhdr_s mh; /* common msg header */ 188 struct bfi_mhdr_s mh; /* common msg header */
189 u8 status; /* port enable status */ 189 u8 status; /* port enable status */
190 u8 rsvd[3]; 190 u8 rsvd[3];
191 u32 msgtag; /* msgtag for reply */ 191 struct bfa_port_cfg_s port_cfg;/* port configuration */
192 u32 msgtag; /* msgtag for reply */
192}; 193};
193 194
194/* 195/*
@@ -202,7 +203,8 @@ struct bfi_fcport_enable_req_s {
202 struct bfa_port_cfg_s port_cfg; /* port configuration */ 203 struct bfa_port_cfg_s port_cfg; /* port configuration */
203 union bfi_addr_u stats_dma_addr; /* DMA address for stats */ 204 union bfi_addr_u stats_dma_addr; /* DMA address for stats */
204 u32 msgtag; /* msgtag for reply */ 205 u32 msgtag; /* msgtag for reply */
205 u32 rsvd2; 206 u8 use_flash_cfg; /* get prot cfg from flash */
207 u8 rsvd2[3];
206}; 208};
207 209
208/* 210/*
@@ -210,7 +212,7 @@ struct bfi_fcport_enable_req_s {
210 */ 212 */
211struct bfi_fcport_set_svc_params_req_s { 213struct bfi_fcport_set_svc_params_req_s {
212 struct bfi_mhdr_s mh; /* msg header */ 214 struct bfi_mhdr_s mh; /* msg header */
213 u16 tx_bbcredit; /* Tx credits */ 215 __be16 tx_bbcredit; /* Tx credits */
214 u16 rsvd; 216 u16 rsvd;
215}; 217};
216 218
@@ -231,7 +233,7 @@ struct bfi_fcport_trunk_link_s {
231 u8 state; /* bfa_trunk_link_state_t */ 233 u8 state; /* bfa_trunk_link_state_t */
232 u8 speed; /* bfa_port_speed_t */ 234 u8 speed; /* bfa_port_speed_t */
233 u8 rsvd; 235 u8 rsvd;
234 u32 deskew; 236 __be32 deskew;
235}; 237};
236 238
237#define BFI_FCPORT_MAX_LINKS 2 239#define BFI_FCPORT_MAX_LINKS 2
@@ -284,17 +286,17 @@ enum bfi_fcxp_i2h {
284 */ 286 */
285struct bfi_fcxp_send_req_s { 287struct bfi_fcxp_send_req_s {
286 struct bfi_mhdr_s mh; /* Common msg header */ 288 struct bfi_mhdr_s mh; /* Common msg header */
287 u16 fcxp_tag; /* driver request tag */ 289 __be16 fcxp_tag; /* driver request tag */
288 u16 max_frmsz; /* max send frame size */ 290 __be16 max_frmsz; /* max send frame size */
289 u16 vf_id; /* vsan tag if applicable */ 291 __be16 vf_id; /* vsan tag if applicable */
290 u16 rport_fw_hndl; /* FW Handle for the remote port */ 292 u16 rport_fw_hndl; /* FW Handle for the remote port */
291 u8 class; /* FC class used for req/rsp */ 293 u8 class; /* FC class used for req/rsp */
292 u8 rsp_timeout; /* timeout in secs, 0-no response */ 294 u8 rsp_timeout; /* timeout in secs, 0-no response */
293 u8 cts; /* continue sequence */ 295 u8 cts; /* continue sequence */
294 u8 lp_tag; /* lport tag */ 296 u8 lp_tag; /* lport tag */
295 struct fchs_s fchs; /* request FC header structure */ 297 struct fchs_s fchs; /* request FC header structure */
296 u32 req_len; /* request payload length */ 298 __be32 req_len; /* request payload length */
297 u32 rsp_maxlen; /* max response length expected */ 299 __be32 rsp_maxlen; /* max response length expected */
298 struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */ 300 struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */
299 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ 301 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
300}; 302};
@@ -304,11 +306,11 @@ struct bfi_fcxp_send_req_s {
304 */ 306 */
305struct bfi_fcxp_send_rsp_s { 307struct bfi_fcxp_send_rsp_s {
306 struct bfi_mhdr_s mh; /* Common msg header */ 308 struct bfi_mhdr_s mh; /* Common msg header */
307 u16 fcxp_tag; /* send request tag */ 309 __be16 fcxp_tag; /* send request tag */
308 u8 req_status; /* request status */ 310 u8 req_status; /* request status */
309 u8 rsvd; 311 u8 rsvd;
310 u32 rsp_len; /* actual response length */ 312 __be32 rsp_len; /* actual response length */
311 u32 residue_len; /* residual response length */ 313 __be32 residue_len; /* residual response length */
312 struct fchs_s fchs; /* response FC header structure */ 314 struct fchs_s fchs; /* response FC header structure */
313}; 315};
314 316
@@ -325,7 +327,7 @@ enum bfi_uf_i2h {
325struct bfi_uf_buf_post_s { 327struct bfi_uf_buf_post_s {
326 struct bfi_mhdr_s mh; /* Common msg header */ 328 struct bfi_mhdr_s mh; /* Common msg header */
327 u16 buf_tag; /* buffer tag */ 329 u16 buf_tag; /* buffer tag */
328 u16 buf_len; /* total buffer length */ 330 __be16 buf_len; /* total buffer length */
329 struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */ 331 struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */
330}; 332};
331 333
@@ -340,6 +342,7 @@ struct bfi_uf_frm_rcvd_s {
340enum bfi_lps_h2i_msgs { 342enum bfi_lps_h2i_msgs {
341 BFI_LPS_H2I_LOGIN_REQ = 1, 343 BFI_LPS_H2I_LOGIN_REQ = 1,
342 BFI_LPS_H2I_LOGOUT_REQ = 2, 344 BFI_LPS_H2I_LOGOUT_REQ = 2,
345 BFI_LPS_H2I_N2N_PID_REQ = 3,
343}; 346};
344 347
345enum bfi_lps_i2h_msgs { 348enum bfi_lps_i2h_msgs {
@@ -352,7 +355,7 @@ struct bfi_lps_login_req_s {
352 struct bfi_mhdr_s mh; /* common msg header */ 355 struct bfi_mhdr_s mh; /* common msg header */
353 u8 lp_tag; 356 u8 lp_tag;
354 u8 alpa; 357 u8 alpa;
355 u16 pdu_size; 358 __be16 pdu_size;
356 wwn_t pwwn; 359 wwn_t pwwn;
357 wwn_t nwwn; 360 wwn_t nwwn;
358 u8 fdisc; 361 u8 fdisc;
@@ -368,7 +371,7 @@ struct bfi_lps_login_rsp_s {
368 u8 lsrjt_expl; 371 u8 lsrjt_expl;
369 wwn_t port_name; 372 wwn_t port_name;
370 wwn_t node_name; 373 wwn_t node_name;
371 u16 bb_credit; 374 __be16 bb_credit;
372 u8 f_port; 375 u8 f_port;
373 u8 npiv_en; 376 u8 npiv_en;
374 u32 lp_pid:24; 377 u32 lp_pid:24;
@@ -399,10 +402,17 @@ struct bfi_lps_cvl_event_s {
399 u8 rsvd[3]; 402 u8 rsvd[3];
400}; 403};
401 404
405struct bfi_lps_n2n_pid_req_s {
406 struct bfi_mhdr_s mh; /* common msg header */
407 u8 lp_tag;
408 u32 lp_pid:24;
409};
410
402union bfi_lps_h2i_msg_u { 411union bfi_lps_h2i_msg_u {
403 struct bfi_mhdr_s *msg; 412 struct bfi_mhdr_s *msg;
404 struct bfi_lps_login_req_s *login_req; 413 struct bfi_lps_login_req_s *login_req;
405 struct bfi_lps_logout_req_s *logout_req; 414 struct bfi_lps_logout_req_s *logout_req;
415 struct bfi_lps_n2n_pid_req_s *n2n_pid_req;
406}; 416};
407 417
408union bfi_lps_i2h_msg_u { 418union bfi_lps_i2h_msg_u {
@@ -427,7 +437,7 @@ enum bfi_rport_i2h_msgs {
427struct bfi_rport_create_req_s { 437struct bfi_rport_create_req_s {
428 struct bfi_mhdr_s mh; /* common msg header */ 438 struct bfi_mhdr_s mh; /* common msg header */
429 u16 bfa_handle; /* host rport handle */ 439 u16 bfa_handle; /* host rport handle */
430 u16 max_frmsz; /* max rcv pdu size */ 440 __be16 max_frmsz; /* max rcv pdu size */
431 u32 pid:24, /* remote port ID */ 441 u32 pid:24, /* remote port ID */
432 lp_tag:8; /* local port tag */ 442 lp_tag:8; /* local port tag */
433 u32 local_pid:24, /* local port ID */ 443 u32 local_pid:24, /* local port ID */
@@ -583,7 +593,7 @@ struct bfi_ioim_dif_s {
583 */ 593 */
584struct bfi_ioim_req_s { 594struct bfi_ioim_req_s {
585 struct bfi_mhdr_s mh; /* Common msg header */ 595 struct bfi_mhdr_s mh; /* Common msg header */
586 u16 io_tag; /* I/O tag */ 596 __be16 io_tag; /* I/O tag */
587 u16 rport_hdl; /* itnim/rport firmware handle */ 597 u16 rport_hdl; /* itnim/rport firmware handle */
588 struct fcp_cmnd_s cmnd; /* IO request info */ 598 struct fcp_cmnd_s cmnd; /* IO request info */
589 599
@@ -689,7 +699,7 @@ enum bfi_ioim_status {
689 */ 699 */
690struct bfi_ioim_rsp_s { 700struct bfi_ioim_rsp_s {
691 struct bfi_mhdr_s mh; /* common msg header */ 701 struct bfi_mhdr_s mh; /* common msg header */
692 u16 io_tag; /* completed IO tag */ 702 __be16 io_tag; /* completed IO tag */
693 u16 bfa_rport_hndl; /* releated rport handle */ 703 u16 bfa_rport_hndl; /* releated rport handle */
694 u8 io_status; /* IO completion status */ 704 u8 io_status; /* IO completion status */
695 u8 reuse_io_tag; /* IO tag can be reused */ 705 u8 reuse_io_tag; /* IO tag can be reused */
@@ -698,13 +708,13 @@ struct bfi_ioim_rsp_s {
698 u8 sns_len; /* scsi sense length */ 708 u8 sns_len; /* scsi sense length */
699 u8 resid_flags; /* IO residue flags */ 709 u8 resid_flags; /* IO residue flags */
700 u8 rsvd_a; 710 u8 rsvd_a;
701 u32 residue; /* IO residual length in bytes */ 711 __be32 residue; /* IO residual length in bytes */
702 u32 rsvd_b[3]; 712 u32 rsvd_b[3];
703}; 713};
704 714
705struct bfi_ioim_abort_req_s { 715struct bfi_ioim_abort_req_s {
706 struct bfi_mhdr_s mh; /* Common msg header */ 716 struct bfi_mhdr_s mh; /* Common msg header */
707 u16 io_tag; /* I/O tag */ 717 __be16 io_tag; /* I/O tag */
708 u16 abort_tag; /* unique request tag */ 718 u16 abort_tag; /* unique request tag */
709}; 719};
710 720
@@ -723,9 +733,9 @@ enum bfi_tskim_i2h {
723 733
724struct bfi_tskim_req_s { 734struct bfi_tskim_req_s {
725 struct bfi_mhdr_s mh; /* Common msg header */ 735 struct bfi_mhdr_s mh; /* Common msg header */
726 u16 tsk_tag; /* task management tag */ 736 __be16 tsk_tag; /* task management tag */
727 u16 itn_fhdl; /* itn firmware handle */ 737 u16 itn_fhdl; /* itn firmware handle */
728 lun_t lun; /* LU number */ 738 struct scsi_lun lun; /* LU number */
729 u8 tm_flags; /* see enum fcp_tm_cmnd */ 739 u8 tm_flags; /* see enum fcp_tm_cmnd */
730 u8 t_secs; /* Timeout value in seconds */ 740 u8 t_secs; /* Timeout value in seconds */
731 u8 rsvd[2]; 741 u8 rsvd[2];
@@ -733,7 +743,7 @@ struct bfi_tskim_req_s {
733 743
734struct bfi_tskim_abortreq_s { 744struct bfi_tskim_abortreq_s {
735 struct bfi_mhdr_s mh; /* Common msg header */ 745 struct bfi_mhdr_s mh; /* Common msg header */
736 u16 tsk_tag; /* task management tag */ 746 __be16 tsk_tag; /* task management tag */
737 u16 rsvd; 747 u16 rsvd;
738}; 748};
739 749
@@ -755,7 +765,7 @@ enum bfi_tskim_status {
755 765
756struct bfi_tskim_rsp_s { 766struct bfi_tskim_rsp_s {
757 struct bfi_mhdr_s mh; /* Common msg header */ 767 struct bfi_mhdr_s mh; /* Common msg header */
758 u16 tsk_tag; /* task mgmt cmnd tag */ 768 __be16 tsk_tag; /* task mgmt cmnd tag */
759 u8 tsk_status; /* @ref bfi_tskim_status */ 769 u8 tsk_status; /* @ref bfi_tskim_status */
760 u8 rsvd; 770 u8 rsvd;
761}; 771};
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 1b6f86b2482d..30e6bdbd65af 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,12 +1,13 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI 1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 * 2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation 3 * Copyright (c) 2006 - 2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
10 */ 11 */
11#ifndef __57XX_ISCSI_CONSTANTS_H_ 12#ifndef __57XX_ISCSI_CONSTANTS_H_
12#define __57XX_ISCSI_CONSTANTS_H_ 13#define __57XX_ISCSI_CONSTANTS_H_
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 36af1afef9b6..dad6c8a34317 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,12 +1,13 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. 1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 * 2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation 3 * Copyright (c) 2006 - 2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
10 */ 11 */
11#ifndef __57XX_ISCSI_HSI_LINUX_LE__ 12#ifndef __57XX_ISCSI_HSI_LINUX_LE__
12#define __57XX_ISCSI_HSI_LINUX_LE__ 13#define __57XX_ISCSI_HSI_LINUX_LE__
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index a44b1b33fa18..e1ca5fe7e6bb 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation 3 * Copyright (c) 2006 - 2010 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
@@ -9,6 +9,7 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 * 10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
12 */ 13 */
13 14
14#ifndef _BNX2I_H_ 15#ifndef _BNX2I_H_
@@ -649,6 +650,7 @@ enum {
649 EP_STATE_OFLD_FAILED = 0x8000000, 650 EP_STATE_OFLD_FAILED = 0x8000000,
650 EP_STATE_CONNECT_FAILED = 0x10000000, 651 EP_STATE_CONNECT_FAILED = 0x10000000,
651 EP_STATE_DISCONN_TIMEDOUT = 0x20000000, 652 EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
653 EP_STATE_OFLD_FAILED_CID_BUSY = 0x80000000,
652}; 654};
653 655
654/** 656/**
@@ -717,14 +719,11 @@ extern struct device_attribute *bnx2i_dev_attributes[];
717 * Function Prototypes 719 * Function Prototypes
718 */ 720 */
719extern void bnx2i_identify_device(struct bnx2i_hba *hba); 721extern void bnx2i_identify_device(struct bnx2i_hba *hba);
720extern void bnx2i_register_device(struct bnx2i_hba *hba);
721 722
722extern void bnx2i_ulp_init(struct cnic_dev *dev); 723extern void bnx2i_ulp_init(struct cnic_dev *dev);
723extern void bnx2i_ulp_exit(struct cnic_dev *dev); 724extern void bnx2i_ulp_exit(struct cnic_dev *dev);
724extern void bnx2i_start(void *handle); 725extern void bnx2i_start(void *handle);
725extern void bnx2i_stop(void *handle); 726extern void bnx2i_stop(void *handle);
726extern void bnx2i_reg_dev_all(void);
727extern void bnx2i_unreg_dev_all(void);
728extern struct bnx2i_hba *get_adapter_list_head(void); 727extern struct bnx2i_hba *get_adapter_list_head(void);
729 728
730struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 729struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
@@ -761,11 +760,11 @@ extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
761 struct iscsi_task *mtask); 760 struct iscsi_task *mtask);
762extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, 761extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
763 struct bnx2i_cmd *cmd); 762 struct bnx2i_cmd *cmd);
764extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, 763extern int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
765 struct bnx2i_endpoint *ep);
766extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
767extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
768 struct bnx2i_endpoint *ep); 764 struct bnx2i_endpoint *ep);
765extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
766extern int bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
767 struct bnx2i_endpoint *ep);
769 768
770extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, 769extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
771 struct bnx2i_endpoint *ep); 770 struct bnx2i_endpoint *ep);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 2f9622ebbd84..96505e3ab986 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation 3 * Copyright (c) 2006 - 2010 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
@@ -9,6 +9,7 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 * 10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
12 */ 13 */
13 14
14#include <linux/gfp.h> 15#include <linux/gfp.h>
@@ -385,6 +386,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
385 struct bnx2i_cmd *bnx2i_cmd; 386 struct bnx2i_cmd *bnx2i_cmd;
386 struct bnx2i_tmf_request *tmfabort_wqe; 387 struct bnx2i_tmf_request *tmfabort_wqe;
387 u32 dword; 388 u32 dword;
389 u32 scsi_lun[2];
388 390
389 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; 391 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
390 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 392 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -426,7 +428,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
426 default: 428 default:
427 tmfabort_wqe->ref_itt = RESERVED_ITT; 429 tmfabort_wqe->ref_itt = RESERVED_ITT;
428 } 430 }
429 memcpy(tmfabort_wqe->lun, tmfabort_hdr->lun, sizeof(struct scsi_lun)); 431 memcpy(scsi_lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
432 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
433 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
434
430 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); 435 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
431 436
432 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 437 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
@@ -697,10 +702,11 @@ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
697 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate 702 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
698 * iscsi connection context clean-up process 703 * iscsi connection context clean-up process
699 */ 704 */
700void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) 705int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
701{ 706{
702 struct kwqe *kwqe_arr[2]; 707 struct kwqe *kwqe_arr[2];
703 struct iscsi_kwqe_conn_destroy conn_cleanup; 708 struct iscsi_kwqe_conn_destroy conn_cleanup;
709 int rc = -EINVAL;
704 710
705 memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); 711 memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
706 712
@@ -717,7 +723,9 @@ void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
717 723
718 kwqe_arr[0] = (struct kwqe *) &conn_cleanup; 724 kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
719 if (hba->cnic && hba->cnic->submit_kwqes) 725 if (hba->cnic && hba->cnic->submit_kwqes)
720 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); 726 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
727
728 return rc;
721} 729}
722 730
723 731
@@ -728,8 +736,8 @@ void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
728 * 736 *
729 * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE 737 * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
730 */ 738 */
731static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, 739static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
732 struct bnx2i_endpoint *ep) 740 struct bnx2i_endpoint *ep)
733{ 741{
734 struct kwqe *kwqe_arr[2]; 742 struct kwqe *kwqe_arr[2];
735 struct iscsi_kwqe_conn_offload1 ofld_req1; 743 struct iscsi_kwqe_conn_offload1 ofld_req1;
@@ -737,6 +745,7 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
737 dma_addr_t dma_addr; 745 dma_addr_t dma_addr;
738 int num_kwqes = 2; 746 int num_kwqes = 2;
739 u32 *ptbl; 747 u32 *ptbl;
748 int rc = -EINVAL;
740 749
741 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; 750 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
742 ofld_req1.hdr.flags = 751 ofld_req1.hdr.flags =
@@ -774,7 +783,9 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
774 ofld_req2.num_additional_wqes = 0; 783 ofld_req2.num_additional_wqes = 0;
775 784
776 if (hba->cnic && hba->cnic->submit_kwqes) 785 if (hba->cnic && hba->cnic->submit_kwqes)
777 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 786 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
787
788 return rc;
778} 789}
779 790
780 791
@@ -785,8 +796,8 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
785 * 796 *
786 * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE 797 * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
787 */ 798 */
788static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, 799static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
789 struct bnx2i_endpoint *ep) 800 struct bnx2i_endpoint *ep)
790{ 801{
791 struct kwqe *kwqe_arr[5]; 802 struct kwqe *kwqe_arr[5];
792 struct iscsi_kwqe_conn_offload1 ofld_req1; 803 struct iscsi_kwqe_conn_offload1 ofld_req1;
@@ -795,6 +806,7 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
795 dma_addr_t dma_addr; 806 dma_addr_t dma_addr;
796 int num_kwqes = 2; 807 int num_kwqes = 2;
797 u32 *ptbl; 808 u32 *ptbl;
809 int rc = -EINVAL;
798 810
799 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; 811 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
800 ofld_req1.hdr.flags = 812 ofld_req1.hdr.flags =
@@ -840,7 +852,9 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
840 num_kwqes += 1; 852 num_kwqes += 1;
841 853
842 if (hba->cnic && hba->cnic->submit_kwqes) 854 if (hba->cnic && hba->cnic->submit_kwqes)
843 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 855 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
856
857 return rc;
844} 858}
845 859
846/** 860/**
@@ -851,12 +865,16 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
851 * 865 *
852 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE 866 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
853 */ 867 */
854void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) 868int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
855{ 869{
870 int rc;
871
856 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) 872 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
857 bnx2i_5771x_send_conn_ofld_req(hba, ep); 873 rc = bnx2i_5771x_send_conn_ofld_req(hba, ep);
858 else 874 else
859 bnx2i_570x_send_conn_ofld_req(hba, ep); 875 rc = bnx2i_570x_send_conn_ofld_req(hba, ep);
876
877 return rc;
860} 878}
861 879
862 880
@@ -1513,7 +1531,7 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
1513 task = iscsi_itt_to_task(conn, 1531 task = iscsi_itt_to_task(conn,
1514 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); 1532 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
1515 if (task) 1533 if (task)
1516 iscsi_put_task(task); 1534 __iscsi_put_task(task);
1517 spin_unlock(&session->lock); 1535 spin_unlock(&session->lock);
1518} 1536}
1519 1537
@@ -1549,11 +1567,9 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1549 struct iscsi_task *task; 1567 struct iscsi_task *task;
1550 struct bnx2i_nop_in_msg *nop_in; 1568 struct bnx2i_nop_in_msg *nop_in;
1551 struct iscsi_nopin *hdr; 1569 struct iscsi_nopin *hdr;
1552 u32 itt;
1553 int tgt_async_nop = 0; 1570 int tgt_async_nop = 0;
1554 1571
1555 nop_in = (struct bnx2i_nop_in_msg *)cqe; 1572 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1556 itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
1557 1573
1558 spin_lock(&session->lock); 1574 spin_lock(&session->lock);
1559 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; 1575 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
@@ -1563,7 +1579,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1563 hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); 1579 hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
1564 hdr->ttt = cpu_to_be32(nop_in->ttt); 1580 hdr->ttt = cpu_to_be32(nop_in->ttt);
1565 1581
1566 if (itt == (u16) RESERVED_ITT) { 1582 if (nop_in->itt == (u16) RESERVED_ITT) {
1567 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 1583 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1568 hdr->itt = RESERVED_ITT; 1584 hdr->itt = RESERVED_ITT;
1569 tgt_async_nop = 1; 1585 tgt_async_nop = 1;
@@ -1571,7 +1587,8 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1571 } 1587 }
1572 1588
1573 /* this is a response to one of our nop-outs */ 1589 /* this is a response to one of our nop-outs */
1574 task = iscsi_itt_to_task(conn, itt); 1590 task = iscsi_itt_to_task(conn,
1591 (itt_t) (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX));
1575 if (task) { 1592 if (task) {
1576 hdr->flags = ISCSI_FLAG_CMD_FINAL; 1593 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1577 hdr->itt = task->hdr->itt; 1594 hdr->itt = task->hdr->itt;
@@ -1721,9 +1738,18 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1721 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) 1738 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
1722 break; 1739 break;
1723 1740
1724 if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) 1741 if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
1742 if (nopin->op_code == ISCSI_OP_NOOP_IN &&
1743 nopin->itt == (u16) RESERVED_ITT) {
1744 printk(KERN_ALERT "bnx2i: Unsolicited "
1745 "NOP-In detected for suspended "
1746 "connection dev=%s!\n",
1747 bnx2i_conn->hba->netdev->name);
1748 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1749 goto cqe_out;
1750 }
1725 break; 1751 break;
1726 1752 }
1727 tgt_async_msg = 0; 1753 tgt_async_msg = 0;
1728 1754
1729 switch (nopin->op_code) { 1755 switch (nopin->op_code) {
@@ -1770,10 +1796,9 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1770 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", 1796 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
1771 nopin->op_code); 1797 nopin->op_code);
1772 } 1798 }
1773
1774 if (!tgt_async_msg) 1799 if (!tgt_async_msg)
1775 bnx2i_conn->ep->num_active_cmds--; 1800 bnx2i_conn->ep->num_active_cmds--;
1776 1801cqe_out:
1777 /* clear out in production version only, till beta keep opcode 1802 /* clear out in production version only, till beta keep opcode
1778 * field intact, will be helpful in debugging (context dump) 1803 * field intact, will be helpful in debugging (context dump)
1779 * nopin->op_code = 0; 1804 * nopin->op_code = 0;
@@ -2154,11 +2179,24 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2154 } 2179 }
2155 2180
2156 if (ofld_kcqe->completion_status) { 2181 if (ofld_kcqe->completion_status) {
2182 ep->state = EP_STATE_OFLD_FAILED;
2157 if (ofld_kcqe->completion_status == 2183 if (ofld_kcqe->completion_status ==
2158 ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) 2184 ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
2159 printk(KERN_ALERT "bnx2i: unable to allocate" 2185 printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - unable "
2160 " iSCSI context resources\n"); 2186 "to allocate iSCSI context resources\n",
2161 ep->state = EP_STATE_OFLD_FAILED; 2187 hba->netdev->name);
2188 else if (ofld_kcqe->completion_status ==
2189 ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE)
2190 printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
2191 "opcode\n", hba->netdev->name);
2192 else if (ofld_kcqe->completion_status ==
2193 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
2194 /* error status code valid only for 5771x chipset */
2195 ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
2196 else
2197 printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
2198 "error code %d\n", hba->netdev->name,
2199 ofld_kcqe->completion_status);
2162 } else { 2200 } else {
2163 ep->state = EP_STATE_OFLD_COMPL; 2201 ep->state = EP_STATE_OFLD_COMPL;
2164 cid_addr = ofld_kcqe->iscsi_conn_context_id; 2202 cid_addr = ofld_kcqe->iscsi_conn_context_id;
@@ -2339,10 +2377,14 @@ static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
2339static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) 2377static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
2340{ 2378{
2341 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; 2379 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2380 u32 old_state = ep->state;
2342 2381
2343 ep->state = EP_STATE_TCP_RST_RCVD; 2382 ep->state = EP_STATE_TCP_RST_RCVD;
2344 if (ep->conn) 2383 if (old_state == EP_STATE_DISCONN_START)
2345 bnx2i_recovery_que_add_conn(ep->hba, ep->conn); 2384 wake_up_interruptible(&ep->ofld_wait);
2385 else
2386 if (ep->conn)
2387 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2346} 2388}
2347 2389
2348 2390
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 50c2aa3b8eb1..72a7b2d4a439 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation 3 * Copyright (c) 2006 - 2010 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
@@ -9,6 +9,7 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 * 10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
12 */ 13 */
13 14
14#include "bnx2i.h" 15#include "bnx2i.h"
@@ -17,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 18static u32 adapter_count;
18 19
19#define DRV_MODULE_NAME "bnx2i" 20#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.1.3" 21#define DRV_MODULE_VERSION "2.6.2.2"
21#define DRV_MODULE_RELDATE "Aug 10, 2010" 22#define DRV_MODULE_RELDATE "Nov 23, 2010"
22 23
23static char version[] __devinitdata = 24static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -65,8 +66,6 @@ MODULE_PARM_DESC(rq_size, "Configure RQ size");
65 66
66u64 iscsi_error_mask = 0x00; 67u64 iscsi_error_mask = 0x00;
67 68
68static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
69
70 69
71/** 70/**
72 * bnx2i_identify_device - identifies NetXtreme II device type 71 * bnx2i_identify_device - identifies NetXtreme II device type
@@ -211,13 +210,24 @@ void bnx2i_stop(void *handle)
211{ 210{
212 struct bnx2i_hba *hba = handle; 211 struct bnx2i_hba *hba = handle;
213 int conns_active; 212 int conns_active;
213 int wait_delay = 1 * HZ;
214 214
215 /* check if cleanup happened in GOING_DOWN context */ 215 /* check if cleanup happened in GOING_DOWN context */
216 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, 216 if (!test_and_set_bit(ADAPTER_STATE_GOING_DOWN,
217 &hba->adapter_state)) 217 &hba->adapter_state)) {
218 iscsi_host_for_each_session(hba->shost, 218 iscsi_host_for_each_session(hba->shost,
219 bnx2i_drop_session); 219 bnx2i_drop_session);
220 220 wait_delay = hba->hba_shutdown_tmo;
221 }
222 /* Wait for inflight offload connection tasks to complete before
223 * proceeding. Forcefully terminate all connection recovery in
224 * progress at the earliest, either in bind(), send_pdu(LOGIN),
225 * or conn_start()
226 */
227 wait_event_interruptible_timeout(hba->eh_wait,
228 (list_empty(&hba->ep_ofld_list) &&
229 list_empty(&hba->ep_destroy_list)),
230 10 * HZ);
221 /* Wait for all endpoints to be torn down, Chip will be reset once 231 /* Wait for all endpoints to be torn down, Chip will be reset once
222 * control returns to network driver. So it is required to cleanup and 232 * control returns to network driver. So it is required to cleanup and
223 * release all connection resources before returning from this routine. 233 * release all connection resources before returning from this routine.
@@ -226,7 +236,7 @@ void bnx2i_stop(void *handle)
226 conns_active = hba->ofld_conns_active; 236 conns_active = hba->ofld_conns_active;
227 wait_event_interruptible_timeout(hba->eh_wait, 237 wait_event_interruptible_timeout(hba->eh_wait,
228 (hba->ofld_conns_active != conns_active), 238 (hba->ofld_conns_active != conns_active),
229 hba->hba_shutdown_tmo); 239 wait_delay);
230 if (hba->ofld_conns_active == conns_active) 240 if (hba->ofld_conns_active == conns_active)
231 break; 241 break;
232 } 242 }
@@ -235,88 +245,10 @@ void bnx2i_stop(void *handle)
235 /* This flag should be cleared last so that ep_disconnect() gracefully 245 /* This flag should be cleared last so that ep_disconnect() gracefully
236 * cleans up connection context 246 * cleans up connection context
237 */ 247 */
248 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
238 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); 249 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
239} 250}
240 251
241/**
242 * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
243 * @hba: Adapter instance to register
244 *
245 * registers bnx2i adapter instance with the cnic driver while holding the
246 * adapter structure lock
247 */
248void bnx2i_register_device(struct bnx2i_hba *hba)
249{
250 int rc;
251
252 if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
253 test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
254 return;
255 }
256
257 rc = hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
258
259 if (!rc)
260 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
261}
262
263
264/**
265 * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
266 *
267 * registers all bnx2i adapter instances with the cnic driver while holding
268 * the global resource lock
269 */
270void bnx2i_reg_dev_all(void)
271{
272 struct bnx2i_hba *hba, *temp;
273
274 mutex_lock(&bnx2i_dev_lock);
275 list_for_each_entry_safe(hba, temp, &adapter_list, link)
276 bnx2i_register_device(hba);
277 mutex_unlock(&bnx2i_dev_lock);
278}
279
280
281/**
282 * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
283 * @hba: Adapter instance to unregister
284 *
285 * registers bnx2i adapter instance with the cnic driver while holding
286 * the adapter structure lock
287 */
288static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
289{
290 if (hba->ofld_conns_active ||
291 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
292 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
293 return;
294
295 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
296
297 /* ep_disconnect could come before NETDEV_DOWN, driver won't
298 * see NETDEV_DOWN as it already unregistered itself.
299 */
300 hba->adapter_state = 0;
301 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
302}
303
304/**
305 * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
306 *
307 * unregisters all bnx2i adapter instances with the cnic driver while holding
308 * the global resource lock
309 */
310void bnx2i_unreg_dev_all(void)
311{
312 struct bnx2i_hba *hba, *temp;
313
314 mutex_lock(&bnx2i_dev_lock);
315 list_for_each_entry_safe(hba, temp, &adapter_list, link)
316 bnx2i_unreg_one_device(hba);
317 mutex_unlock(&bnx2i_dev_lock);
318}
319
320 252
321/** 253/**
322 * bnx2i_init_one - initialize an adapter instance and allocate memory resources 254 * bnx2i_init_one - initialize an adapter instance and allocate memory resources
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fb50efbce087..f0dce26593eb 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. 2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 * 3 *
4 * Copyright (c) 2006 - 2009 Broadcom Corporation 4 * Copyright (c) 2006 - 2010 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie 6 * Copyright (c) 2007, 2008 Mike Christie
7 * 7 *
@@ -10,6 +10,7 @@
10 * the Free Software Foundation. 10 * the Free Software Foundation.
11 * 11 *
12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
13 * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
13 */ 14 */
14 15
15#include <linux/slab.h> 16#include <linux/slab.h>
@@ -411,7 +412,9 @@ static void bnx2i_free_ep(struct iscsi_endpoint *ep)
411 bnx2i_ep->state = EP_STATE_IDLE; 412 bnx2i_ep->state = EP_STATE_IDLE;
412 bnx2i_ep->hba->ofld_conns_active--; 413 bnx2i_ep->hba->ofld_conns_active--;
413 414
414 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); 415 if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
416 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
417
415 if (bnx2i_ep->conn) { 418 if (bnx2i_ep->conn) {
416 bnx2i_ep->conn->ep = NULL; 419 bnx2i_ep->conn->ep = NULL;
417 bnx2i_ep->conn = NULL; 420 bnx2i_ep->conn = NULL;
@@ -1383,6 +1386,12 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1383 ep = iscsi_lookup_endpoint(transport_fd); 1386 ep = iscsi_lookup_endpoint(transport_fd);
1384 if (!ep) 1387 if (!ep)
1385 return -EINVAL; 1388 return -EINVAL;
1389 /*
1390 * Forcefully terminate all in progress connection recovery at the
1391 * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
1392 */
1393 if (bnx2i_adapter_ready(hba))
1394 return -EIO;
1386 1395
1387 bnx2i_ep = ep->dd_data; 1396 bnx2i_ep = ep->dd_data;
1388 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || 1397 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
@@ -1404,7 +1413,6 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1404 hba->netdev->name); 1413 hba->netdev->name);
1405 return -EEXIST; 1414 return -EEXIST;
1406 } 1415 }
1407
1408 bnx2i_ep->conn = bnx2i_conn; 1416 bnx2i_ep->conn = bnx2i_conn;
1409 bnx2i_conn->ep = bnx2i_ep; 1417 bnx2i_conn->ep = bnx2i_ep;
1410 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; 1418 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
@@ -1461,21 +1469,28 @@ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
1461 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1469 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1462 int len = 0; 1470 int len = 0;
1463 1471
1472 if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
1473 goto out;
1474
1464 switch (param) { 1475 switch (param) {
1465 case ISCSI_PARAM_CONN_PORT: 1476 case ISCSI_PARAM_CONN_PORT:
1466 if (bnx2i_conn->ep) 1477 mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
1478 if (bnx2i_conn->ep->cm_sk)
1467 len = sprintf(buf, "%hu\n", 1479 len = sprintf(buf, "%hu\n",
1468 bnx2i_conn->ep->cm_sk->dst_port); 1480 bnx2i_conn->ep->cm_sk->dst_port);
1481 mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
1469 break; 1482 break;
1470 case ISCSI_PARAM_CONN_ADDRESS: 1483 case ISCSI_PARAM_CONN_ADDRESS:
1471 if (bnx2i_conn->ep) 1484 mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
1485 if (bnx2i_conn->ep->cm_sk)
1472 len = sprintf(buf, "%pI4\n", 1486 len = sprintf(buf, "%pI4\n",
1473 &bnx2i_conn->ep->cm_sk->dst_ip); 1487 &bnx2i_conn->ep->cm_sk->dst_ip);
1488 mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
1474 break; 1489 break;
1475 default: 1490 default:
1476 return iscsi_conn_get_param(cls_conn, param, buf); 1491 return iscsi_conn_get_param(cls_conn, param, buf);
1477 } 1492 }
1478 1493out:
1479 return len; 1494 return len;
1480} 1495}
1481 1496
@@ -1599,8 +1614,6 @@ static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1599 struct bnx2i_hba *hba; 1614 struct bnx2i_hba *hba;
1600 struct cnic_dev *cnic = NULL; 1615 struct cnic_dev *cnic = NULL;
1601 1616
1602 bnx2i_reg_dev_all();
1603
1604 hba = get_adapter_list_head(); 1617 hba = get_adapter_list_head();
1605 if (hba && hba->cnic) 1618 if (hba && hba->cnic)
1606 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); 1619 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
@@ -1640,18 +1653,26 @@ no_nx2_route:
1640static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, 1653static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1641 struct bnx2i_endpoint *ep) 1654 struct bnx2i_endpoint *ep)
1642{ 1655{
1643 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) 1656 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
1644 hba->cnic->cm_destroy(ep->cm_sk); 1657 hba->cnic->cm_destroy(ep->cm_sk);
1645 1658
1646 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
1647 ep->state = EP_STATE_DISCONN_COMPL;
1648
1649 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && 1659 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1650 ep->state == EP_STATE_DISCONN_TIMEDOUT) { 1660 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1651 printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump," 1661 if (ep->conn && ep->conn->cls_conn &&
1652 " NW/PCIe trace, driver msgs to developers" 1662 ep->conn->cls_conn->dd_data) {
1653 " for analysis\n"); 1663 struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
1654 return 1; 1664
1665 /* Must suspend all rx queue activity for this ep */
1666 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1667 }
1668 /* CONN_DISCONNECT timeout may or may not be an issue depending
1669 * on what transcribed in TCP layer, different targets behave
1670 * differently
1671 */
1672 printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
1673 "please submit GRC Dump, NW/PCIe trace, "
1674 "driver msgs to developers for analysis\n",
1675 hba->netdev->name);
1655 } 1676 }
1656 1677
1657 ep->state = EP_STATE_CLEANUP_START; 1678 ep->state = EP_STATE_CLEANUP_START;
@@ -1664,7 +1685,9 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1664 bnx2i_ep_destroy_list_add(hba, ep); 1685 bnx2i_ep_destroy_list_add(hba, ep);
1665 1686
1666 /* destroy iSCSI context, wait for it to complete */ 1687 /* destroy iSCSI context, wait for it to complete */
1667 bnx2i_send_conn_destroy(hba, ep); 1688 if (bnx2i_send_conn_destroy(hba, ep))
1689 ep->state = EP_STATE_CLEANUP_CMPL;
1690
1668 wait_event_interruptible(ep->ofld_wait, 1691 wait_event_interruptible(ep->ofld_wait,
1669 (ep->state != EP_STATE_CLEANUP_START)); 1692 (ep->state != EP_STATE_CLEANUP_START));
1670 1693
@@ -1711,8 +1734,6 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1711 if (shost) { 1734 if (shost) {
1712 /* driver is given scsi host to work with */ 1735 /* driver is given scsi host to work with */
1713 hba = iscsi_host_priv(shost); 1736 hba = iscsi_host_priv(shost);
1714 /* Register the device with cnic if not already done so */
1715 bnx2i_register_device(hba);
1716 } else 1737 } else
1717 /* 1738 /*
1718 * check if the given destination can be reached through 1739 * check if the given destination can be reached through
@@ -1720,13 +1741,17 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1720 */ 1741 */
1721 hba = bnx2i_check_route(dst_addr); 1742 hba = bnx2i_check_route(dst_addr);
1722 1743
1723 if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { 1744 if (!hba) {
1724 rc = -EINVAL; 1745 rc = -EINVAL;
1725 goto nohba; 1746 goto nohba;
1726 } 1747 }
1748 mutex_lock(&hba->net_dev_lock);
1727 1749
1750 if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
1751 rc = -EPERM;
1752 goto check_busy;
1753 }
1728 cnic = hba->cnic; 1754 cnic = hba->cnic;
1729 mutex_lock(&hba->net_dev_lock);
1730 ep = bnx2i_alloc_ep(hba); 1755 ep = bnx2i_alloc_ep(hba);
1731 if (!ep) { 1756 if (!ep) {
1732 rc = -ENOMEM; 1757 rc = -ENOMEM;
@@ -1734,23 +1759,21 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1734 } 1759 }
1735 bnx2i_ep = ep->dd_data; 1760 bnx2i_ep = ep->dd_data;
1736 1761
1737 if (bnx2i_adapter_ready(hba)) {
1738 rc = -EPERM;
1739 goto net_if_down;
1740 }
1741
1742 bnx2i_ep->num_active_cmds = 0; 1762 bnx2i_ep->num_active_cmds = 0;
1743 iscsi_cid = bnx2i_alloc_iscsi_cid(hba); 1763 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1744 if (iscsi_cid == -1) { 1764 if (iscsi_cid == -1) {
1745 printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n"); 1765 printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
1766 "iscsi cid\n", hba->netdev->name);
1746 rc = -ENOMEM; 1767 rc = -ENOMEM;
1747 goto iscsi_cid_err; 1768 bnx2i_free_ep(ep);
1769 goto check_busy;
1748 } 1770 }
1749 bnx2i_ep->hba_age = hba->age; 1771 bnx2i_ep->hba_age = hba->age;
1750 1772
1751 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); 1773 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1752 if (rc != 0) { 1774 if (rc != 0) {
1753 printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n"); 1775 printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
1776 "\n", hba->netdev->name);
1754 rc = -ENOMEM; 1777 rc = -ENOMEM;
1755 goto qp_resc_err; 1778 goto qp_resc_err;
1756 } 1779 }
@@ -1765,7 +1788,18 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1765 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 1788 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1766 add_timer(&bnx2i_ep->ofld_timer); 1789 add_timer(&bnx2i_ep->ofld_timer);
1767 1790
1768 bnx2i_send_conn_ofld_req(hba, bnx2i_ep); 1791 if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
1792 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1793 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1794 hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1795 rc = -EBUSY;
1796 } else
1797 rc = -ENOSPC;
1798 printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
1799 "\n", hba->netdev->name);
1800 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1801 goto conn_failed;
1802 }
1769 1803
1770 /* Wait for CNIC hardware to setup conn context and return 'cid' */ 1804 /* Wait for CNIC hardware to setup conn context and return 'cid' */
1771 wait_event_interruptible(bnx2i_ep->ofld_wait, 1805 wait_event_interruptible(bnx2i_ep->ofld_wait,
@@ -1778,7 +1812,12 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1778 bnx2i_ep_ofld_list_del(hba, bnx2i_ep); 1812 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1779 1813
1780 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { 1814 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1781 rc = -ENOSPC; 1815 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1816 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1817 hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1818 rc = -EBUSY;
1819 } else
1820 rc = -ENOSPC;
1782 goto conn_failed; 1821 goto conn_failed;
1783 } 1822 }
1784 1823
@@ -1786,7 +1825,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1786 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); 1825 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1787 if (rc) { 1826 if (rc) {
1788 rc = -EINVAL; 1827 rc = -EINVAL;
1789 goto conn_failed; 1828 /* Need to terminate and cleanup the connection */
1829 goto release_ep;
1790 } 1830 }
1791 1831
1792 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; 1832 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
@@ -1830,15 +1870,12 @@ release_ep:
1830 return ERR_PTR(rc); 1870 return ERR_PTR(rc);
1831 } 1871 }
1832conn_failed: 1872conn_failed:
1833net_if_down:
1834iscsi_cid_err:
1835 bnx2i_free_qp_resc(hba, bnx2i_ep); 1873 bnx2i_free_qp_resc(hba, bnx2i_ep);
1836qp_resc_err: 1874qp_resc_err:
1837 bnx2i_free_ep(ep); 1875 bnx2i_free_ep(ep);
1838check_busy: 1876check_busy:
1839 mutex_unlock(&hba->net_dev_lock); 1877 mutex_unlock(&hba->net_dev_lock);
1840nohba: 1878nohba:
1841 bnx2i_unreg_dev_all();
1842 return ERR_PTR(rc); 1879 return ERR_PTR(rc);
1843} 1880}
1844 1881
@@ -1898,12 +1935,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1898 cnic_dev_10g = 1; 1935 cnic_dev_10g = 1;
1899 1936
1900 switch (bnx2i_ep->state) { 1937 switch (bnx2i_ep->state) {
1901 case EP_STATE_CONNECT_START: 1938 case EP_STATE_CONNECT_FAILED:
1902 case EP_STATE_CLEANUP_FAILED: 1939 case EP_STATE_CLEANUP_FAILED:
1903 case EP_STATE_OFLD_FAILED: 1940 case EP_STATE_OFLD_FAILED:
1904 case EP_STATE_DISCONN_TIMEDOUT: 1941 case EP_STATE_DISCONN_TIMEDOUT:
1905 ret = 0; 1942 ret = 0;
1906 break; 1943 break;
1944 case EP_STATE_CONNECT_START:
1907 case EP_STATE_CONNECT_COMPL: 1945 case EP_STATE_CONNECT_COMPL:
1908 case EP_STATE_ULP_UPDATE_START: 1946 case EP_STATE_ULP_UPDATE_START:
1909 case EP_STATE_ULP_UPDATE_COMPL: 1947 case EP_STATE_ULP_UPDATE_COMPL:
@@ -1914,13 +1952,10 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1914 ret = 1; 1952 ret = 1;
1915 break; 1953 break;
1916 case EP_STATE_TCP_RST_RCVD: 1954 case EP_STATE_TCP_RST_RCVD:
1917 ret = 0;
1918 break;
1919 case EP_STATE_CONNECT_FAILED:
1920 if (cnic_dev_10g) 1955 if (cnic_dev_10g)
1921 ret = 1;
1922 else
1923 ret = 0; 1956 ret = 0;
1957 else
1958 ret = 1;
1924 break; 1959 break;
1925 default: 1960 default:
1926 ret = 0; 1961 ret = 0;
@@ -1953,7 +1988,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
1953 if (!cnic) 1988 if (!cnic)
1954 return 0; 1989 return 0;
1955 1990
1956 if (bnx2i_ep->state == EP_STATE_IDLE) 1991 if (bnx2i_ep->state == EP_STATE_IDLE ||
1992 bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
1957 return 0; 1993 return 0;
1958 1994
1959 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) 1995 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
@@ -1979,9 +2015,10 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
1979 if (session->state == ISCSI_STATE_LOGGING_OUT) { 2015 if (session->state == ISCSI_STATE_LOGGING_OUT) {
1980 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { 2016 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
1981 /* Logout sent, but no resp */ 2017 /* Logout sent, but no resp */
1982 printk(KERN_ALERT "bnx2i - WARNING " 2018 printk(KERN_ALERT "bnx2i (%s): WARNING"
1983 "logout response was not " 2019 " logout response was not "
1984 "received!\n"); 2020 "received!\n",
2021 bnx2i_ep->hba->netdev->name);
1985 } else if (bnx2i_ep->state == 2022 } else if (bnx2i_ep->state ==
1986 EP_STATE_LOGOUT_RESP_RCVD) 2023 EP_STATE_LOGOUT_RESP_RCVD)
1987 close = 1; 2024 close = 1;
@@ -1999,9 +2036,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
1999 else 2036 else
2000 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); 2037 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2001 2038
2002 /* No longer allow CFC delete if cm_close/abort fails the request */
2003 if (close_ret) 2039 if (close_ret)
2004 printk(KERN_ALERT "bnx2i: %s close/abort(%d) returned %d\n", 2040 printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
2005 bnx2i_ep->hba->netdev->name, close, close_ret); 2041 bnx2i_ep->hba->netdev->name, close, close_ret);
2006 else 2042 else
2007 /* wait for option-2 conn teardown */ 2043 /* wait for option-2 conn teardown */
@@ -2015,7 +2051,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2015destroy_conn: 2051destroy_conn:
2016 bnx2i_ep_active_list_del(hba, bnx2i_ep); 2052 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2017 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) 2053 if (bnx2i_tear_down_conn(hba, bnx2i_ep))
2018 ret = -EINVAL; 2054 return -EINVAL;
2019out: 2055out:
2020 bnx2i_ep->state = EP_STATE_IDLE; 2056 bnx2i_ep->state = EP_STATE_IDLE;
2021 return ret; 2057 return ret;
@@ -2054,14 +2090,17 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
2054 2090
2055 mutex_lock(&hba->net_dev_lock); 2091 mutex_lock(&hba->net_dev_lock);
2056 2092
2057 if (bnx2i_ep->state == EP_STATE_IDLE) 2093 if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2058 goto return_bnx2i_ep; 2094 goto out;
2059 2095
2060 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) 2096 if (bnx2i_ep->state == EP_STATE_IDLE)
2061 goto free_resc; 2097 goto free_resc;
2062 2098
2063 if (bnx2i_ep->hba_age != hba->age) 2099 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
2100 (bnx2i_ep->hba_age != hba->age)) {
2101 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2064 goto free_resc; 2102 goto free_resc;
2103 }
2065 2104
2066 /* Do all chip cleanup here */ 2105 /* Do all chip cleanup here */
2067 if (bnx2i_hw_ep_disconnect(bnx2i_ep)) { 2106 if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
@@ -2070,14 +2109,13 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
2070 } 2109 }
2071free_resc: 2110free_resc:
2072 bnx2i_free_qp_resc(hba, bnx2i_ep); 2111 bnx2i_free_qp_resc(hba, bnx2i_ep);
2073return_bnx2i_ep: 2112
2074 if (bnx2i_conn) 2113 if (bnx2i_conn)
2075 bnx2i_conn->ep = NULL; 2114 bnx2i_conn->ep = NULL;
2076 2115
2077 bnx2i_free_ep(ep); 2116 bnx2i_free_ep(ep);
2117out:
2078 mutex_unlock(&hba->net_dev_lock); 2118 mutex_unlock(&hba->net_dev_lock);
2079 if (!hba->ofld_conns_active)
2080 bnx2i_unreg_dev_all();
2081 2119
2082 wake_up_interruptible(&hba->eh_wait); 2120 wake_up_interruptible(&hba->eh_wait);
2083} 2121}
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 96426b751eb2..9174196d9033 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,12 +1,13 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2004 - 2009 Broadcom Corporation 3 * Copyright (c) 2004 - 2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
10 */ 11 */
11 12
12#include "bnx2i.h" 13#include "bnx2i.h"
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 6fae3d285ae7..b837c5b3c8f9 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -442,12 +442,19 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
442 sdev = q->queuedata; 442 sdev = q->queuedata;
443 if (sdev && sdev->scsi_dh_data) 443 if (sdev && sdev->scsi_dh_data)
444 scsi_dh = sdev->scsi_dh_data->scsi_dh; 444 scsi_dh = sdev->scsi_dh_data->scsi_dh;
445 if (!scsi_dh || !get_device(&sdev->sdev_gendev)) 445 if (!scsi_dh || !get_device(&sdev->sdev_gendev) ||
446 sdev->sdev_state == SDEV_CANCEL ||
447 sdev->sdev_state == SDEV_DEL)
446 err = SCSI_DH_NOSYS; 448 err = SCSI_DH_NOSYS;
449 if (sdev->sdev_state == SDEV_OFFLINE)
450 err = SCSI_DH_DEV_OFFLINED;
447 spin_unlock_irqrestore(q->queue_lock, flags); 451 spin_unlock_irqrestore(q->queue_lock, flags);
448 452
449 if (err) 453 if (err) {
454 if (fn)
455 fn(data, err);
450 return err; 456 return err;
457 }
451 458
452 if (scsi_dh->activate) 459 if (scsi_dh->activate)
453 err = scsi_dh->activate(sdev, fn, data); 460 err = scsi_dh->activate(sdev, fn, data);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index d23a538a9dfc..9f9600b67001 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -854,7 +854,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
854 854
855 /* Cleanup the fc_lport */ 855 /* Cleanup the fc_lport */
856 fc_lport_destroy(lport); 856 fc_lport_destroy(lport);
857 fc_fcp_destroy(lport);
858 857
859 /* Stop the transmit retry timer */ 858 /* Stop the transmit retry timer */
860 del_timer_sync(&port->timer); 859 del_timer_sync(&port->timer);
@@ -876,6 +875,9 @@ static void fcoe_if_destroy(struct fc_lport *lport)
876 fc_remove_host(lport->host); 875 fc_remove_host(lport->host);
877 scsi_remove_host(lport->host); 876 scsi_remove_host(lport->host);
878 877
878 /* Destroy lport scsi_priv */
879 fc_fcp_destroy(lport);
880
879 /* There are no more rports or I/O, free the EM */ 881 /* There are no more rports or I/O, free the EM */
880 fc_exch_mgr_free(lport); 882 fc_exch_mgr_free(lport);
881 883
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index bc17c7123202..625c6be25396 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -54,6 +54,7 @@ MODULE_LICENSE("GPL v2");
54static void fcoe_ctlr_timeout(unsigned long); 54static void fcoe_ctlr_timeout(unsigned long);
55static void fcoe_ctlr_timer_work(struct work_struct *); 55static void fcoe_ctlr_timer_work(struct work_struct *);
56static void fcoe_ctlr_recv_work(struct work_struct *); 56static void fcoe_ctlr_recv_work(struct work_struct *);
57static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
57 58
58static void fcoe_ctlr_vn_start(struct fcoe_ctlr *); 59static void fcoe_ctlr_vn_start(struct fcoe_ctlr *);
59static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); 60static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *);
@@ -176,6 +177,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
176 fip->mode = mode; 177 fip->mode = mode;
177 INIT_LIST_HEAD(&fip->fcfs); 178 INIT_LIST_HEAD(&fip->fcfs);
178 mutex_init(&fip->ctlr_mutex); 179 mutex_init(&fip->ctlr_mutex);
180 spin_lock_init(&fip->ctlr_lock);
179 fip->flogi_oxid = FC_XID_UNKNOWN; 181 fip->flogi_oxid = FC_XID_UNKNOWN;
180 setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); 182 setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
181 INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); 183 INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
@@ -231,6 +233,49 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
231EXPORT_SYMBOL(fcoe_ctlr_destroy); 233EXPORT_SYMBOL(fcoe_ctlr_destroy);
232 234
233/** 235/**
236 * fcoe_ctlr_announce() - announce new FCF selection
237 * @fip: The FCoE controller
238 *
239 * Also sets the destination MAC for FCoE and control packets
240 *
241 * Called with neither ctlr_mutex nor ctlr_lock held.
242 */
243static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
244{
245 struct fcoe_fcf *sel;
246 struct fcoe_fcf *fcf;
247
248 mutex_lock(&fip->ctlr_mutex);
249 spin_lock_bh(&fip->ctlr_lock);
250
251 kfree_skb(fip->flogi_req);
252 fip->flogi_req = NULL;
253 list_for_each_entry(fcf, &fip->fcfs, list)
254 fcf->flogi_sent = 0;
255
256 spin_unlock_bh(&fip->ctlr_lock);
257 sel = fip->sel_fcf;
258
259 if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr))
260 goto unlock;
261 if (!is_zero_ether_addr(fip->dest_addr)) {
262 printk(KERN_NOTICE "libfcoe: host%d: "
263 "FIP Fibre-Channel Forwarder MAC %pM deselected\n",
264 fip->lp->host->host_no, fip->dest_addr);
265 memset(fip->dest_addr, 0, ETH_ALEN);
266 }
267 if (sel) {
268 printk(KERN_INFO "libfcoe: host%d: FIP selected "
269 "Fibre-Channel Forwarder MAC %pM\n",
270 fip->lp->host->host_no, sel->fcf_mac);
271 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
272 fip->map_dest = 0;
273 }
274unlock:
275 mutex_unlock(&fip->ctlr_mutex);
276}
277
278/**
234 * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port 279 * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port
235 * @fip: The FCoE controller to get the maximum FCoE size from 280 * @fip: The FCoE controller to get the maximum FCoE size from
236 * 281 *
@@ -564,6 +609,9 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
564 * The caller must check that the length is a multiple of 4. 609 * The caller must check that the length is a multiple of 4.
565 * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). 610 * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
566 * The the skb must also be an fc_frame. 611 * The the skb must also be an fc_frame.
612 *
613 * This is called from the lower-level driver with spinlocks held,
614 * so we must not take a mutex here.
567 */ 615 */
568int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, 616int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
569 struct sk_buff *skb) 617 struct sk_buff *skb)
@@ -601,7 +649,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
601 switch (op) { 649 switch (op) {
602 case ELS_FLOGI: 650 case ELS_FLOGI:
603 op = FIP_DT_FLOGI; 651 op = FIP_DT_FLOGI;
604 break; 652 if (fip->mode == FIP_MODE_VN2VN)
653 break;
654 spin_lock_bh(&fip->ctlr_lock);
655 kfree_skb(fip->flogi_req);
656 fip->flogi_req = skb;
657 fip->flogi_req_send = 1;
658 spin_unlock_bh(&fip->ctlr_lock);
659 schedule_work(&fip->timer_work);
660 return -EINPROGRESS;
605 case ELS_FDISC: 661 case ELS_FDISC:
606 if (ntoh24(fh->fh_s_id)) 662 if (ntoh24(fh->fh_s_id))
607 return 0; 663 return 0;
@@ -922,11 +978,9 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
922 } 978 }
923 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 979 mtu_valid = fcoe_ctlr_mtu_valid(fcf);
924 fcf->time = jiffies; 980 fcf->time = jiffies;
925 if (!found) { 981 if (!found)
926 LIBFCOE_FIP_DBG(fip, "New FCF for fab %16.16llx " 982 LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
927 "map %x val %d\n", 983 fcf->fabric_name, fcf->fcf_mac);
928 fcf->fabric_name, fcf->fc_map, mtu_valid);
929 }
930 984
931 /* 985 /*
932 * If this advertisement is not solicited and our max receive size 986 * If this advertisement is not solicited and our max receive size
@@ -945,6 +999,17 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
945 fcoe_ctlr_solicit(fip, NULL); 999 fcoe_ctlr_solicit(fip, NULL);
946 1000
947 /* 1001 /*
1002 * Put this FCF at the head of the list for priority among equals.
1003 * This helps in the case of an NPV switch which insists we use
1004 * the FCF that answers multicast solicitations, not the others that
1005 * are sending periodic multicast advertisements.
1006 */
1007 if (mtu_valid) {
1008 list_del(&fcf->list);
1009 list_add(&fcf->list, &fip->fcfs);
1010 }
1011
1012 /*
948 * If this is the first validated FCF, note the time and 1013 * If this is the first validated FCF, note the time and
949 * set a timer to trigger selection. 1014 * set a timer to trigger selection.
950 */ 1015 */
@@ -1061,18 +1126,24 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1061 els_op = *(u8 *)(fh + 1); 1126 els_op = *(u8 *)(fh + 1);
1062 1127
1063 if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && 1128 if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) &&
1064 sub == FIP_SC_REP && els_op == ELS_LS_ACC && 1129 sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) {
1065 fip->mode != FIP_MODE_VN2VN) { 1130 if (els_op == ELS_LS_ACC) {
1066 if (!is_valid_ether_addr(granted_mac)) { 1131 if (!is_valid_ether_addr(granted_mac)) {
1067 LIBFCOE_FIP_DBG(fip, 1132 LIBFCOE_FIP_DBG(fip,
1068 "Invalid MAC address %pM in FIP ELS\n", 1133 "Invalid MAC address %pM in FIP ELS\n",
1069 granted_mac); 1134 granted_mac);
1070 goto drop; 1135 goto drop;
1071 } 1136 }
1072 memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); 1137 memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
1073 1138
1074 if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) 1139 if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
1075 fip->flogi_oxid = FC_XID_UNKNOWN; 1140 fip->flogi_oxid = FC_XID_UNKNOWN;
1141 if (els_dtype == FIP_DT_FLOGI)
1142 fcoe_ctlr_announce(fip);
1143 }
1144 } else if (els_dtype == FIP_DT_FLOGI &&
1145 !fcoe_ctlr_flogi_retry(fip))
1146 goto drop; /* retrying FLOGI so drop reject */
1076 } 1147 }
1077 1148
1078 if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) && 1149 if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) &&
@@ -1326,20 +1397,39 @@ drop:
1326 * fcoe_ctlr_select() - Select the best FCF (if possible) 1397 * fcoe_ctlr_select() - Select the best FCF (if possible)
1327 * @fip: The FCoE controller 1398 * @fip: The FCoE controller
1328 * 1399 *
1400 * Returns the selected FCF, or NULL if none are usable.
1401 *
1329 * If there are conflicting advertisements, no FCF can be chosen. 1402 * If there are conflicting advertisements, no FCF can be chosen.
1330 * 1403 *
1404 * If there is already a selected FCF, this will choose a better one or
1405 * an equivalent one that hasn't already been sent a FLOGI.
1406 *
1331 * Called with lock held. 1407 * Called with lock held.
1332 */ 1408 */
1333static void fcoe_ctlr_select(struct fcoe_ctlr *fip) 1409static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
1334{ 1410{
1335 struct fcoe_fcf *fcf; 1411 struct fcoe_fcf *fcf;
1336 struct fcoe_fcf *best = NULL; 1412 struct fcoe_fcf *best = fip->sel_fcf;
1413 struct fcoe_fcf *first;
1414
1415 first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
1337 1416
1338 list_for_each_entry(fcf, &fip->fcfs, list) { 1417 list_for_each_entry(fcf, &fip->fcfs, list) {
1339 LIBFCOE_FIP_DBG(fip, "consider FCF for fab %16.16llx " 1418 LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
1340 "VFID %d map %x val %d\n", 1419 "VFID %d mac %pM map %x val %d "
1341 fcf->fabric_name, fcf->vfid, 1420 "sent %u pri %u\n",
1342 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); 1421 fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
1422 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
1423 fcf->flogi_sent, fcf->pri);
1424 if (fcf->fabric_name != first->fabric_name ||
1425 fcf->vfid != first->vfid ||
1426 fcf->fc_map != first->fc_map) {
1427 LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
1428 "or FC-MAP\n");
1429 return NULL;
1430 }
1431 if (fcf->flogi_sent)
1432 continue;
1343 if (!fcoe_ctlr_fcf_usable(fcf)) { 1433 if (!fcoe_ctlr_fcf_usable(fcf)) {
1344 LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " 1434 LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
1345 "map %x %svalid %savailable\n", 1435 "map %x %svalid %savailable\n",
@@ -1349,21 +1439,131 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
1349 "" : "un"); 1439 "" : "un");
1350 continue; 1440 continue;
1351 } 1441 }
1352 if (!best) { 1442 if (!best || fcf->pri < best->pri || best->flogi_sent)
1353 best = fcf;
1354 continue;
1355 }
1356 if (fcf->fabric_name != best->fabric_name ||
1357 fcf->vfid != best->vfid ||
1358 fcf->fc_map != best->fc_map) {
1359 LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
1360 "or FC-MAP\n");
1361 return;
1362 }
1363 if (fcf->pri < best->pri)
1364 best = fcf; 1443 best = fcf;
1365 } 1444 }
1366 fip->sel_fcf = best; 1445 fip->sel_fcf = best;
1446 if (best) {
1447 LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac);
1448 fip->port_ka_time = jiffies +
1449 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1450 fip->ctlr_ka_time = jiffies + best->fka_period;
1451 if (time_before(fip->ctlr_ka_time, fip->timer.expires))
1452 mod_timer(&fip->timer, fip->ctlr_ka_time);
1453 }
1454 return best;
1455}
1456
1457/**
1458 * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF
1459 * @fip: The FCoE controller
1460 *
1461 * Returns non-zero error if it could not be sent.
1462 *
1463 * Called with ctlr_mutex and ctlr_lock held.
1464 * Caller must verify that fip->sel_fcf is not NULL.
1465 */
1466static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
1467{
1468 struct sk_buff *skb;
1469 struct sk_buff *skb_orig;
1470 struct fc_frame_header *fh;
1471 int error;
1472
1473 skb_orig = fip->flogi_req;
1474 if (!skb_orig)
1475 return -EINVAL;
1476
1477 /*
1478 * Clone and send the FLOGI request. If clone fails, use original.
1479 */
1480 skb = skb_clone(skb_orig, GFP_ATOMIC);
1481 if (!skb) {
1482 skb = skb_orig;
1483 fip->flogi_req = NULL;
1484 }
1485 fh = (struct fc_frame_header *)skb->data;
1486 error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb,
1487 ntoh24(fh->fh_d_id));
1488 if (error) {
1489 kfree_skb(skb);
1490 return error;
1491 }
1492 fip->send(fip, skb);
1493 fip->sel_fcf->flogi_sent = 1;
1494 return 0;
1495}
1496
1497/**
1498 * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible
1499 * @fip: The FCoE controller
1500 *
1501 * Returns non-zero error code if there's no FLOGI request to retry or
1502 * no alternate FCF available.
1503 */
1504static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
1505{
1506 struct fcoe_fcf *fcf;
1507 int error;
1508
1509 mutex_lock(&fip->ctlr_mutex);
1510 spin_lock_bh(&fip->ctlr_lock);
1511 LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
1512 fcf = fcoe_ctlr_select(fip);
1513 if (!fcf || fcf->flogi_sent) {
1514 kfree_skb(fip->flogi_req);
1515 fip->flogi_req = NULL;
1516 error = -ENOENT;
1517 } else {
1518 fcoe_ctlr_solicit(fip, NULL);
1519 error = fcoe_ctlr_flogi_send_locked(fip);
1520 }
1521 spin_unlock_bh(&fip->ctlr_lock);
1522 mutex_unlock(&fip->ctlr_mutex);
1523 return error;
1524}
1525
1526
1527/**
1528 * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI.
1529 * @fip: The FCoE controller that timed out
1530 *
1531 * Done here because fcoe_ctlr_els_send() can't get mutex.
1532 *
1533 * Called with ctlr_mutex held. The caller must not hold ctlr_lock.
1534 */
1535static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
1536{
1537 struct fcoe_fcf *fcf;
1538
1539 spin_lock_bh(&fip->ctlr_lock);
1540 fcf = fip->sel_fcf;
1541 if (!fcf || !fip->flogi_req_send)
1542 goto unlock;
1543
1544 LIBFCOE_FIP_DBG(fip, "sending FLOGI\n");
1545
1546 /*
1547 * If this FLOGI is being sent due to a timeout retry
1548 * to the same FCF as before, select a different FCF if possible.
1549 */
1550 if (fcf->flogi_sent) {
1551 LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n");
1552 fcf = fcoe_ctlr_select(fip);
1553 if (!fcf || fcf->flogi_sent) {
1554 LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n");
1555 list_for_each_entry(fcf, &fip->fcfs, list)
1556 fcf->flogi_sent = 0;
1557 fcf = fcoe_ctlr_select(fip);
1558 }
1559 }
1560 if (fcf) {
1561 fcoe_ctlr_flogi_send_locked(fip);
1562 fip->flogi_req_send = 0;
1563 } else /* XXX */
1564 LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
1565unlock:
1566 spin_unlock_bh(&fip->ctlr_lock);
1367} 1567}
1368 1568
1369/** 1569/**
@@ -1411,34 +1611,16 @@ static void fcoe_ctlr_timer_work(struct work_struct *work)
1411 sel = fip->sel_fcf; 1611 sel = fip->sel_fcf;
1412 if (!sel && fip->sel_time) { 1612 if (!sel && fip->sel_time) {
1413 if (time_after_eq(jiffies, fip->sel_time)) { 1613 if (time_after_eq(jiffies, fip->sel_time)) {
1414 fcoe_ctlr_select(fip); 1614 sel = fcoe_ctlr_select(fip);
1415 sel = fip->sel_fcf;
1416 fip->sel_time = 0; 1615 fip->sel_time = 0;
1417 } else if (time_after(next_timer, fip->sel_time)) 1616 } else if (time_after(next_timer, fip->sel_time))
1418 next_timer = fip->sel_time; 1617 next_timer = fip->sel_time;
1419 } 1618 }
1420 1619
1421 if (sel != fcf) { 1620 if (sel && fip->flogi_req_send)
1422 fcf = sel; /* the old FCF may have been freed */ 1621 fcoe_ctlr_flogi_send(fip);
1423 if (sel) { 1622 else if (!sel && fcf)
1424 printk(KERN_INFO "libfcoe: host%d: FIP selected " 1623 reset = 1;
1425 "Fibre-Channel Forwarder MAC %pM\n",
1426 fip->lp->host->host_no, sel->fcf_mac);
1427 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
1428 fip->map_dest = 0;
1429 fip->port_ka_time = jiffies +
1430 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1431 fip->ctlr_ka_time = jiffies + sel->fka_period;
1432 if (time_after(next_timer, fip->ctlr_ka_time))
1433 next_timer = fip->ctlr_ka_time;
1434 } else {
1435 printk(KERN_NOTICE "libfcoe: host%d: "
1436 "FIP Fibre-Channel Forwarder timed out. "
1437 "Starting FCF discovery.\n",
1438 fip->lp->host->host_no);
1439 reset = 1;
1440 }
1441 }
1442 1624
1443 if (sel && !sel->fd_flags) { 1625 if (sel && !sel->fd_flags) {
1444 if (time_after_eq(jiffies, fip->ctlr_ka_time)) { 1626 if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
@@ -2475,7 +2657,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
2475 case FIP_ST_LINK_WAIT: 2657 case FIP_ST_LINK_WAIT:
2476 goto unlock; 2658 goto unlock;
2477 default: 2659 default:
2478 WARN(1, "unexpected state %d", fip->state); 2660 WARN(1, "unexpected state %d\n", fip->state);
2479 goto unlock; 2661 goto unlock;
2480 } 2662 }
2481 mod_timer(&fip->timer, next_time); 2663 mod_timer(&fip->timer, next_time);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 76365700e2d5..3242bcabad97 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4273,8 +4273,10 @@ static int ioc_general(void __user *arg, char *cmnd)
4273 } 4273 }
4274 4274
4275 rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info); 4275 rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info);
4276 if (rval < 0) 4276 if (rval < 0) {
4277 gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
4277 return rval; 4278 return rval;
4279 }
4278 gen.status = rval; 4280 gen.status = rval;
4279 4281
4280 if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, 4282 if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 0572b9bf4bd6..652754319a4b 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -365,8 +365,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
365 len = 0; 365 len = 0;
366 begin = pos; 366 begin = pos;
367 } 367 }
368 if (pos > offset + length) 368 if (pos > offset + length) {
369 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
369 goto stop_output; 370 goto stop_output;
371 }
370 } 372 }
371 } 373 }
372 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); 374 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
@@ -450,8 +452,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
450 len = 0; 452 len = 0;
451 begin = pos; 453 begin = pos;
452 } 454 }
453 if (pos > offset + length) 455 if (pos > offset + length) {
456 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
454 goto stop_output; 457 goto stop_output;
458 }
455 } while (drv_no != -1); 459 } while (drv_no != -1);
456 460
457 if (is_mirr) { 461 if (is_mirr) {
@@ -472,8 +476,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
472 len = 0; 476 len = 0;
473 begin = pos; 477 begin = pos;
474 } 478 }
475 if (pos > offset + length) 479 if (pos > offset + length) {
480 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
476 goto stop_output; 481 goto stop_output;
482 }
477 } 483 }
478 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); 484 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
479 485
@@ -542,8 +548,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
542 len = 0; 548 len = 0;
543 begin = pos; 549 begin = pos;
544 } 550 }
545 if (pos > offset + length) 551 if (pos > offset + length) {
552 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
546 goto stop_output; 553 goto stop_output;
554 }
547 } 555 }
548 } 556 }
549 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); 557 gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a6dea08664fc..12deffccb8da 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -641,11 +641,6 @@ static void fixup_botched_add(struct ctlr_info *h,
641static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 641static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
642 struct hpsa_scsi_dev_t *dev2) 642 struct hpsa_scsi_dev_t *dev2)
643{ 643{
644 if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
645 (dev1->lun != -1 && dev2->lun != -1)) &&
646 dev1->devtype != 0x0C)
647 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
648
649 /* we compare everything except lun and target as these 644 /* we compare everything except lun and target as these
650 * are not yet assigned. Compare parts likely 645 * are not yet assigned. Compare parts likely
651 * to differ first 646 * to differ first
@@ -660,12 +655,8 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
660 return 0; 655 return 0;
661 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 656 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
662 return 0; 657 return 0;
663 if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
664 return 0;
665 if (dev1->devtype != dev2->devtype) 658 if (dev1->devtype != dev2->devtype)
666 return 0; 659 return 0;
667 if (dev1->raid_level != dev2->raid_level)
668 return 0;
669 if (dev1->bus != dev2->bus) 660 if (dev1->bus != dev2->bus)
670 return 0; 661 return 0;
671 return 1; 662 return 1;
@@ -1477,8 +1468,6 @@ static int hpsa_update_device_info(struct ctlr_info *h,
1477 sizeof(this_device->vendor)); 1468 sizeof(this_device->vendor));
1478 memcpy(this_device->model, &inq_buff[16], 1469 memcpy(this_device->model, &inq_buff[16],
1479 sizeof(this_device->model)); 1470 sizeof(this_device->model));
1480 memcpy(this_device->revision, &inq_buff[32],
1481 sizeof(this_device->revision));
1482 memset(this_device->device_id, 0, 1471 memset(this_device->device_id, 0,
1483 sizeof(this_device->device_id)); 1472 sizeof(this_device->device_id));
1484 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 1473 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index a203ef65cb50..19586e189f0f 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -45,7 +45,6 @@ struct hpsa_scsi_dev_t {
45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 47 unsigned char model[16]; /* bytes 16-31 of inquiry data */
48 unsigned char revision[4]; /* bytes 32-35 of inquiry data */
49 unsigned char raid_level; /* from inquiry page 0xC1 */ 48 unsigned char raid_level; /* from inquiry page 0xC1 */
50}; 49};
51 50
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 57cad7e20caa..b7650613b8c2 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2493,23 +2493,23 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2493} 2493}
2494 2494
2495static const struct ibmvfc_async_desc ae_desc [] = { 2495static const struct ibmvfc_async_desc ae_desc [] = {
2496 { IBMVFC_AE_ELS_PLOGI, "PLOGI", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, 2496 { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2497 { IBMVFC_AE_ELS_LOGO, "LOGO", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, 2497 { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2498 { IBMVFC_AE_ELS_PRLO, "PRLO", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, 2498 { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2499 { IBMVFC_AE_SCN_NPORT, "N-Port SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, 2499 { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2500 { IBMVFC_AE_SCN_GROUP, "Group SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, 2500 { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2501 { IBMVFC_AE_SCN_DOMAIN, "Domain SCN", IBMVFC_DEFAULT_LOG_LEVEL }, 2501 { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
2502 { IBMVFC_AE_SCN_FABRIC, "Fabric SCN", IBMVFC_DEFAULT_LOG_LEVEL }, 2502 { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
2503 { IBMVFC_AE_LINK_UP, "Link Up", IBMVFC_DEFAULT_LOG_LEVEL }, 2503 { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
2504 { IBMVFC_AE_LINK_DOWN, "Link Down", IBMVFC_DEFAULT_LOG_LEVEL }, 2504 { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
2505 { IBMVFC_AE_LINK_DEAD, "Link Dead", IBMVFC_DEFAULT_LOG_LEVEL }, 2505 { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
2506 { IBMVFC_AE_HALT, "Halt", IBMVFC_DEFAULT_LOG_LEVEL }, 2506 { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
2507 { IBMVFC_AE_RESUME, "Resume", IBMVFC_DEFAULT_LOG_LEVEL }, 2507 { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
2508 { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed", IBMVFC_DEFAULT_LOG_LEVEL }, 2508 { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
2509}; 2509};
2510 2510
2511static const struct ibmvfc_async_desc unknown_ae = { 2511static const struct ibmvfc_async_desc unknown_ae = {
2512 0, "Unknown async", IBMVFC_DEFAULT_LOG_LEVEL 2512 "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
2513}; 2513};
2514 2514
2515/** 2515/**
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index ef663e7c9bbc..834c37fc7ce9 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -542,8 +542,8 @@ enum ibmvfc_async_event {
542}; 542};
543 543
544struct ibmvfc_async_desc { 544struct ibmvfc_async_desc {
545 enum ibmvfc_async_event ae;
546 const char *desc; 545 const char *desc;
546 enum ibmvfc_async_event ae;
547 int log_level; 547 int log_level;
548}; 548};
549 549
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5bbaee597e88..de2e09e49a3e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -146,7 +146,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
146 } 146 }
147 }, 147 },
148 { /* CRoC */ 148 { /* CRoC */
149 .mailbox = 0x00040, 149 .mailbox = 0x00044,
150 .cache_line_size = 0x20, 150 .cache_line_size = 0x20,
151 { 151 {
152 .set_interrupt_mask_reg = 0x00010, 152 .set_interrupt_mask_reg = 0x00010,
@@ -1048,6 +1048,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
1048 sizeof(res->res_path)); 1048 sizeof(res->res_path));
1049 1049
1050 res->bus = 0; 1050 res->bus = 0;
1051 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1052 sizeof(res->dev_lun.scsi_lun));
1051 res->lun = scsilun_to_int(&res->dev_lun); 1053 res->lun = scsilun_to_int(&res->dev_lun);
1052 1054
1053 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1055 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
@@ -1063,9 +1065,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
1063 ioa_cfg->max_devs_supported); 1065 ioa_cfg->max_devs_supported);
1064 set_bit(res->target, ioa_cfg->target_ids); 1066 set_bit(res->target, ioa_cfg->target_ids);
1065 } 1067 }
1066
1067 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1068 sizeof(res->dev_lun.scsi_lun));
1069 } else if (res->type == IPR_RES_TYPE_IOAFP) { 1068 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1070 res->bus = IPR_IOAFP_VIRTUAL_BUS; 1069 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1071 res->target = 0; 1070 res->target = 0;
@@ -1116,7 +1115,7 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
1116 if (res->ioa_cfg->sis64) { 1115 if (res->ioa_cfg->sis64) {
1117 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, 1116 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1118 sizeof(cfgtew->u.cfgte64->dev_id)) && 1117 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1119 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun, 1118 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1120 sizeof(cfgtew->u.cfgte64->lun))) { 1119 sizeof(cfgtew->u.cfgte64->lun))) {
1121 return 1; 1120 return 1;
1122 } 1121 }
@@ -2901,6 +2900,12 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2901 return; 2900 return;
2902 } 2901 }
2903 2902
2903 if (ioa_cfg->sis64) {
2904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2905 ssleep(IPR_DUMP_DELAY_SECONDS);
2906 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2907 }
2908
2904 start_addr = readl(ioa_cfg->ioa_mailbox); 2909 start_addr = readl(ioa_cfg->ioa_mailbox);
2905 2910
2906 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { 2911 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
@@ -7473,6 +7478,29 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7473} 7478}
7474 7479
7475/** 7480/**
7481 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7482 * @ipr_cmd: ipr command struct
7483 *
7484 * Description: This function will call to get the unit check buffer.
7485 *
7486 * Return value:
7487 * IPR_RC_JOB_RETURN
7488 **/
7489static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7490{
7491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7492
7493 ENTER;
7494 ioa_cfg->ioa_unit_checked = 0;
7495 ipr_get_unit_check_buffer(ioa_cfg);
7496 ipr_cmd->job_step = ipr_reset_alert;
7497 ipr_reset_start_timer(ipr_cmd, 0);
7498
7499 LEAVE;
7500 return IPR_RC_JOB_RETURN;
7501}
7502
7503/**
7476 * ipr_reset_restore_cfg_space - Restore PCI config space. 7504 * ipr_reset_restore_cfg_space - Restore PCI config space.
7477 * @ipr_cmd: ipr command struct 7505 * @ipr_cmd: ipr command struct
7478 * 7506 *
@@ -7512,11 +7540,17 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7512 } 7540 }
7513 7541
7514 if (ioa_cfg->ioa_unit_checked) { 7542 if (ioa_cfg->ioa_unit_checked) {
7515 ioa_cfg->ioa_unit_checked = 0; 7543 if (ioa_cfg->sis64) {
7516 ipr_get_unit_check_buffer(ioa_cfg); 7544 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7517 ipr_cmd->job_step = ipr_reset_alert; 7545 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7518 ipr_reset_start_timer(ipr_cmd, 0); 7546 return IPR_RC_JOB_RETURN;
7519 return IPR_RC_JOB_RETURN; 7547 } else {
7548 ioa_cfg->ioa_unit_checked = 0;
7549 ipr_get_unit_check_buffer(ioa_cfg);
7550 ipr_cmd->job_step = ipr_reset_alert;
7551 ipr_reset_start_timer(ipr_cmd, 0);
7552 return IPR_RC_JOB_RETURN;
7553 }
7520 } 7554 }
7521 7555
7522 if (ioa_cfg->in_ioa_bringdown) { 7556 if (ioa_cfg->in_ioa_bringdown) {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b28a00f1082c..13f425fb8851 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -218,6 +218,8 @@
218#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) 218#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
219#define IPR_PCI_RESET_TIMEOUT (HZ / 2) 219#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
220#define IPR_DUMP_TIMEOUT (15 * HZ) 220#define IPR_DUMP_TIMEOUT (15 * HZ)
221#define IPR_DUMP_DELAY_SECONDS 4
222#define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ)
221 223
222/* 224/*
223 * SCSI Literals 225 * SCSI Literals
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index ec2a1aec2350..d21367d3305f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -67,6 +67,11 @@ struct workqueue_struct *fc_exch_workqueue;
67struct fc_exch_pool { 67struct fc_exch_pool {
68 u16 next_index; 68 u16 next_index;
69 u16 total_exches; 69 u16 total_exches;
70
71 /* two cache of free slot in exch array */
72 u16 left;
73 u16 right;
74
70 spinlock_t lock; 75 spinlock_t lock;
71 struct list_head ex_list; 76 struct list_head ex_list;
72}; 77};
@@ -108,7 +113,6 @@ struct fc_exch_mgr {
108 atomic_t non_bls_resp; 113 atomic_t non_bls_resp;
109 } stats; 114 } stats;
110}; 115};
111#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
112 116
113/** 117/**
114 * struct fc_exch_mgr_anchor - primary structure for list of EMs 118 * struct fc_exch_mgr_anchor - primary structure for list of EMs
@@ -397,13 +401,23 @@ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
397static void fc_exch_delete(struct fc_exch *ep) 401static void fc_exch_delete(struct fc_exch *ep)
398{ 402{
399 struct fc_exch_pool *pool; 403 struct fc_exch_pool *pool;
404 u16 index;
400 405
401 pool = ep->pool; 406 pool = ep->pool;
402 spin_lock_bh(&pool->lock); 407 spin_lock_bh(&pool->lock);
403 WARN_ON(pool->total_exches <= 0); 408 WARN_ON(pool->total_exches <= 0);
404 pool->total_exches--; 409 pool->total_exches--;
405 fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order, 410
406 NULL); 411 /* update cache of free slot */
412 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
413 if (pool->left == FC_XID_UNKNOWN)
414 pool->left = index;
415 else if (pool->right == FC_XID_UNKNOWN)
416 pool->right = index;
417 else
418 pool->next_index = index;
419
420 fc_exch_ptr_set(pool, index, NULL);
407 list_del(&ep->ex_list); 421 list_del(&ep->ex_list);
408 spin_unlock_bh(&pool->lock); 422 spin_unlock_bh(&pool->lock);
409 fc_exch_release(ep); /* drop hold for exch in mp */ 423 fc_exch_release(ep); /* drop hold for exch in mp */
@@ -636,10 +650,13 @@ static void fc_exch_timeout(struct work_struct *work)
636 if (e_stat & ESB_ST_ABNORMAL) 650 if (e_stat & ESB_ST_ABNORMAL)
637 rc = fc_exch_done_locked(ep); 651 rc = fc_exch_done_locked(ep);
638 spin_unlock_bh(&ep->ex_lock); 652 spin_unlock_bh(&ep->ex_lock);
639 if (!rc)
640 fc_exch_delete(ep);
641 if (resp) 653 if (resp)
642 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); 654 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
655 if (!rc) {
656 /* delete the exchange if it's already being aborted */
657 fc_exch_delete(ep);
658 return;
659 }
643 fc_seq_exch_abort(sp, 2 * ep->r_a_tov); 660 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
644 goto done; 661 goto done;
645 } 662 }
@@ -679,6 +696,19 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
679 pool = per_cpu_ptr(mp->pool, cpu); 696 pool = per_cpu_ptr(mp->pool, cpu);
680 spin_lock_bh(&pool->lock); 697 spin_lock_bh(&pool->lock);
681 put_cpu(); 698 put_cpu();
699
700 /* peek cache of free slot */
701 if (pool->left != FC_XID_UNKNOWN) {
702 index = pool->left;
703 pool->left = FC_XID_UNKNOWN;
704 goto hit;
705 }
706 if (pool->right != FC_XID_UNKNOWN) {
707 index = pool->right;
708 pool->right = FC_XID_UNKNOWN;
709 goto hit;
710 }
711
682 index = pool->next_index; 712 index = pool->next_index;
683 /* allocate new exch from pool */ 713 /* allocate new exch from pool */
684 while (fc_exch_ptr_get(pool, index)) { 714 while (fc_exch_ptr_get(pool, index)) {
@@ -687,7 +717,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
687 goto err; 717 goto err;
688 } 718 }
689 pool->next_index = index == mp->pool_max_index ? 0 : index + 1; 719 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
690 720hit:
691 fc_exch_hold(ep); /* hold for exch in mp */ 721 fc_exch_hold(ep); /* hold for exch in mp */
692 spin_lock_init(&ep->ex_lock); 722 spin_lock_init(&ep->ex_lock);
693 /* 723 /*
@@ -1247,7 +1277,7 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1247 1277
1248 list_for_each_entry(ema, &lport->ema_list, ema_list) 1278 list_for_each_entry(ema, &lport->ema_list, ema_list)
1249 if ((!ema->match || ema->match(fp)) && 1279 if ((!ema->match || ema->match(fp)) &&
1250 fc_seq_lookup_recip(lport, ema->mp, fp) != FC_RJT_NONE) 1280 fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1251 break; 1281 break;
1252 return fr_seq(fp); 1282 return fr_seq(fp);
1253} 1283}
@@ -1343,7 +1373,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1343 } 1373 }
1344 if (ep->esb_stat & ESB_ST_COMPLETE) { 1374 if (ep->esb_stat & ESB_ST_COMPLETE) {
1345 atomic_inc(&mp->stats.xid_not_found); 1375 atomic_inc(&mp->stats.xid_not_found);
1346 goto out; 1376 goto rel;
1347 } 1377 }
1348 if (ep->rxid == FC_XID_UNKNOWN) 1378 if (ep->rxid == FC_XID_UNKNOWN)
1349 ep->rxid = ntohs(fh->fh_rx_id); 1379 ep->rxid = ntohs(fh->fh_rx_id);
@@ -2181,6 +2211,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2181 goto free_mempool; 2211 goto free_mempool;
2182 for_each_possible_cpu(cpu) { 2212 for_each_possible_cpu(cpu) {
2183 pool = per_cpu_ptr(mp->pool, cpu); 2213 pool = per_cpu_ptr(mp->pool, cpu);
2214 pool->left = FC_XID_UNKNOWN;
2215 pool->right = FC_XID_UNKNOWN;
2184 spin_lock_init(&pool->lock); 2216 spin_lock_init(&pool->lock);
2185 INIT_LIST_HEAD(&pool->ex_list); 2217 INIT_LIST_HEAD(&pool->ex_list);
2186 } 2218 }
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2924363d142b..cdc06cda76e5 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -57,6 +57,9 @@ struct kmem_cache *scsi_pkt_cachep;
57#define FC_SRB_READ (1 << 1) 57#define FC_SRB_READ (1 << 1)
58#define FC_SRB_WRITE (1 << 0) 58#define FC_SRB_WRITE (1 << 0)
59 59
60/* constant added to e_d_tov timeout to get rec_tov value */
61#define REC_TOV_CONST 1
62
60/* 63/*
61 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock 64 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
62 */ 65 */
@@ -96,7 +99,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
96static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 99static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
97static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 100static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
98static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); 101static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
99static void fc_fcp_recovery(struct fc_fcp_pkt *); 102static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code);
100static void fc_fcp_timeout(unsigned long); 103static void fc_fcp_timeout(unsigned long);
101static void fc_fcp_rec(struct fc_fcp_pkt *); 104static void fc_fcp_rec(struct fc_fcp_pkt *);
102static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 105static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
@@ -120,14 +123,13 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
120#define FC_DATA_UNDRUN 7 123#define FC_DATA_UNDRUN 7
121#define FC_ERROR 8 124#define FC_ERROR 8
122#define FC_HRD_ERROR 9 125#define FC_HRD_ERROR 9
123#define FC_CMD_RECOVERY 10 126#define FC_CRC_ERROR 10
127#define FC_TIMED_OUT 11
124 128
125/* 129/*
126 * Error recovery timeout values. 130 * Error recovery timeout values.
127 */ 131 */
128#define FC_SCSI_ER_TIMEOUT (10 * HZ)
129#define FC_SCSI_TM_TOV (10 * HZ) 132#define FC_SCSI_TM_TOV (10 * HZ)
130#define FC_SCSI_REC_TOV (2 * HZ)
131#define FC_HOST_RESET_TIMEOUT (30 * HZ) 133#define FC_HOST_RESET_TIMEOUT (30 * HZ)
132#define FC_CAN_QUEUE_PERIOD (60 * HZ) 134#define FC_CAN_QUEUE_PERIOD (60 * HZ)
133 135
@@ -438,6 +440,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
438 void *buf; 440 void *buf;
439 struct scatterlist *sg; 441 struct scatterlist *sg;
440 u32 nents; 442 u32 nents;
443 u8 host_bcode = FC_COMPLETE;
441 444
442 fh = fc_frame_header_get(fp); 445 fh = fc_frame_header_get(fp);
443 offset = ntohl(fh->fh_parm_offset); 446 offset = ntohl(fh->fh_parm_offset);
@@ -446,13 +449,16 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
446 buf = fc_frame_payload_get(fp, 0); 449 buf = fc_frame_payload_get(fp, 0);
447 450
448 /* 451 /*
449 * if this I/O is ddped then clear it 452 * if this I/O is ddped then clear it and initiate recovery since data
450 * and initiate recovery since data 453 * frames are expected to be placed directly in that case.
451 * frames are expected to be placed 454 *
452 * directly in that case. 455 * Indicate error to scsi-ml because something went wrong with the
456 * ddp handling to get us here.
453 */ 457 */
454 if (fsp->xfer_ddp != FC_XID_UNKNOWN) { 458 if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
455 fc_fcp_ddp_done(fsp); 459 fc_fcp_ddp_done(fsp);
460 FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n");
461 host_bcode = FC_ERROR;
456 goto err; 462 goto err;
457 } 463 }
458 if (offset + len > fsp->data_len) { 464 if (offset + len > fsp->data_len) {
@@ -462,6 +468,9 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
462 goto crc_err; 468 goto crc_err;
463 FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " 469 FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
464 "data_len %x\n", len, offset, fsp->data_len); 470 "data_len %x\n", len, offset, fsp->data_len);
471
472 /* Data is corrupted indicate scsi-ml should retry */
473 host_bcode = FC_DATA_OVRRUN;
465 goto err; 474 goto err;
466 } 475 }
467 if (offset != fsp->xfer_len) 476 if (offset != fsp->xfer_len)
@@ -498,8 +507,10 @@ crc_err:
498 * If so, we need to retry the entire operation. 507 * If so, we need to retry the entire operation.
499 * Otherwise, ignore it. 508 * Otherwise, ignore it.
500 */ 509 */
501 if (fsp->state & FC_SRB_DISCONTIG) 510 if (fsp->state & FC_SRB_DISCONTIG) {
511 host_bcode = FC_CRC_ERROR;
502 goto err; 512 goto err;
513 }
503 return; 514 return;
504 } 515 }
505 } 516 }
@@ -517,7 +528,7 @@ crc_err:
517 fc_fcp_complete_locked(fsp); 528 fc_fcp_complete_locked(fsp);
518 return; 529 return;
519err: 530err:
520 fc_fcp_recovery(fsp); 531 fc_fcp_recovery(fsp, host_bcode);
521} 532}
522 533
523/** 534/**
@@ -962,7 +973,13 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
962 } 973 }
963 lport->tt.exch_done(seq); 974 lport->tt.exch_done(seq);
964 } 975 }
965 fc_io_compl(fsp); 976 /*
977 * Some resets driven by SCSI are not I/Os and do not have
978 * SCSI commands associated with the requests. We should not
979 * call I/O completion if we do not have a SCSI command.
980 */
981 if (fsp->cmd)
982 fc_io_compl(fsp);
966} 983}
967 984
968/** 985/**
@@ -1073,6 +1090,21 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1073} 1090}
1074 1091
1075/** 1092/**
1093 * get_fsp_rec_tov() - Helper function to get REC_TOV
1094 * @fsp: the FCP packet
1095 */
1096static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
1097{
1098 struct fc_rport *rport;
1099 struct fc_rport_libfc_priv *rpriv;
1100
1101 rport = fsp->rport;
1102 rpriv = rport->dd_data;
1103
1104 return rpriv->e_d_tov + REC_TOV_CONST;
1105}
1106
1107/**
1076 * fc_fcp_cmd_send() - Send a FCP command 1108 * fc_fcp_cmd_send() - Send a FCP command
1077 * @lport: The local port to send the command on 1109 * @lport: The local port to send the command on
1078 * @fsp: The FCP packet the command is on 1110 * @fsp: The FCP packet the command is on
@@ -1089,6 +1121,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1089 struct fc_rport_libfc_priv *rpriv; 1121 struct fc_rport_libfc_priv *rpriv;
1090 const size_t len = sizeof(fsp->cdb_cmd); 1122 const size_t len = sizeof(fsp->cdb_cmd);
1091 int rc = 0; 1123 int rc = 0;
1124 unsigned int rec_tov;
1092 1125
1093 if (fc_fcp_lock_pkt(fsp)) 1126 if (fc_fcp_lock_pkt(fsp))
1094 return 0; 1127 return 0;
@@ -1119,10 +1152,13 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1119 fsp->seq_ptr = seq; 1152 fsp->seq_ptr = seq;
1120 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1153 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
1121 1154
1155 rec_tov = get_fsp_rec_tov(fsp);
1156
1122 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1157 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
1123 fc_fcp_timer_set(fsp, 1158
1124 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ? 1159 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1125 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT); 1160 fc_fcp_timer_set(fsp, rec_tov);
1161
1126unlock: 1162unlock:
1127 fc_fcp_unlock_pkt(fsp); 1163 fc_fcp_unlock_pkt(fsp);
1128 return rc; 1164 return rc;
@@ -1197,13 +1233,16 @@ static void fc_lun_reset_send(unsigned long data)
1197{ 1233{
1198 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1234 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1199 struct fc_lport *lport = fsp->lp; 1235 struct fc_lport *lport = fsp->lp;
1236 unsigned int rec_tov;
1237
1200 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { 1238 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
1201 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1239 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1202 return; 1240 return;
1203 if (fc_fcp_lock_pkt(fsp)) 1241 if (fc_fcp_lock_pkt(fsp))
1204 return; 1242 return;
1243 rec_tov = get_fsp_rec_tov(fsp);
1205 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); 1244 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
1206 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1245 fc_fcp_timer_set(fsp, rec_tov);
1207 fc_fcp_unlock_pkt(fsp); 1246 fc_fcp_unlock_pkt(fsp);
1208 } 1247 }
1209} 1248}
@@ -1282,27 +1321,27 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1282 * 1321 *
1283 * scsi-eh will escalate for when either happens. 1322 * scsi-eh will escalate for when either happens.
1284 */ 1323 */
1285 return; 1324 goto out;
1286 } 1325 }
1287 1326
1288 if (fc_fcp_lock_pkt(fsp)) 1327 if (fc_fcp_lock_pkt(fsp))
1289 return; 1328 goto out;
1290 1329
1291 /* 1330 /*
1292 * raced with eh timeout handler. 1331 * raced with eh timeout handler.
1293 */ 1332 */
1294 if (!fsp->seq_ptr || !fsp->wait_for_comp) { 1333 if (!fsp->seq_ptr || !fsp->wait_for_comp)
1295 spin_unlock_bh(&fsp->scsi_pkt_lock); 1334 goto out_unlock;
1296 return;
1297 }
1298 1335
1299 fh = fc_frame_header_get(fp); 1336 fh = fc_frame_header_get(fp);
1300 if (fh->fh_type != FC_TYPE_BLS) 1337 if (fh->fh_type != FC_TYPE_BLS)
1301 fc_fcp_resp(fsp, fp); 1338 fc_fcp_resp(fsp, fp);
1302 fsp->seq_ptr = NULL; 1339 fsp->seq_ptr = NULL;
1303 fsp->lp->tt.exch_done(seq); 1340 fsp->lp->tt.exch_done(seq);
1304 fc_frame_free(fp); 1341out_unlock:
1305 fc_fcp_unlock_pkt(fsp); 1342 fc_fcp_unlock_pkt(fsp);
1343out:
1344 fc_frame_free(fp);
1306} 1345}
1307 1346
1308/** 1347/**
@@ -1341,13 +1380,10 @@ static void fc_fcp_timeout(unsigned long data)
1341 1380
1342 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) 1381 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1343 fc_fcp_rec(fsp); 1382 fc_fcp_rec(fsp);
1344 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
1345 jiffies))
1346 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1347 else if (fsp->state & FC_SRB_RCV_STATUS) 1383 else if (fsp->state & FC_SRB_RCV_STATUS)
1348 fc_fcp_complete_locked(fsp); 1384 fc_fcp_complete_locked(fsp);
1349 else 1385 else
1350 fc_fcp_recovery(fsp); 1386 fc_fcp_recovery(fsp, FC_TIMED_OUT);
1351 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; 1387 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
1352unlock: 1388unlock:
1353 fc_fcp_unlock_pkt(fsp); 1389 fc_fcp_unlock_pkt(fsp);
@@ -1373,6 +1409,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1373 fc_fcp_complete_locked(fsp); 1409 fc_fcp_complete_locked(fsp);
1374 return; 1410 return;
1375 } 1411 }
1412
1376 fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec)); 1413 fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
1377 if (!fp) 1414 if (!fp)
1378 goto retry; 1415 goto retry;
@@ -1383,15 +1420,15 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1383 FC_FCTL_REQ, 0); 1420 FC_FCTL_REQ, 0);
1384 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, 1421 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
1385 fc_fcp_rec_resp, fsp, 1422 fc_fcp_rec_resp, fsp,
1386 jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1423 2 * lport->r_a_tov)) {
1387 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1424 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1388 return; 1425 return;
1389 } 1426 }
1390retry: 1427retry:
1391 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1428 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1392 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1429 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1393 else 1430 else
1394 fc_fcp_recovery(fsp); 1431 fc_fcp_recovery(fsp, FC_TIMED_OUT);
1395} 1432}
1396 1433
1397/** 1434/**
@@ -1445,7 +1482,6 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1445 * making progress. 1482 * making progress.
1446 */ 1483 */
1447 rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; 1484 rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1448 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1449 break; 1485 break;
1450 case ELS_RJT_LOGIC: 1486 case ELS_RJT_LOGIC:
1451 case ELS_RJT_UNAB: 1487 case ELS_RJT_UNAB:
@@ -1460,7 +1496,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1460 fc_fcp_retry_cmd(fsp); 1496 fc_fcp_retry_cmd(fsp);
1461 break; 1497 break;
1462 } 1498 }
1463 fc_fcp_recovery(fsp); 1499 fc_fcp_recovery(fsp, FC_ERROR);
1464 break; 1500 break;
1465 } 1501 }
1466 } else if (opcode == ELS_LS_ACC) { 1502 } else if (opcode == ELS_LS_ACC) {
@@ -1498,12 +1534,12 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1498 } 1534 }
1499 fc_fcp_srr(fsp, r_ctl, offset); 1535 fc_fcp_srr(fsp, r_ctl, offset);
1500 } else if (e_stat & ESB_ST_SEQ_INIT) { 1536 } else if (e_stat & ESB_ST_SEQ_INIT) {
1501 1537 unsigned int rec_tov = get_fsp_rec_tov(fsp);
1502 /* 1538 /*
1503 * The remote port has the initiative, so just 1539 * The remote port has the initiative, so just
1504 * keep waiting for it to complete. 1540 * keep waiting for it to complete.
1505 */ 1541 */
1506 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1542 fc_fcp_timer_set(fsp, rec_tov);
1507 } else { 1543 } else {
1508 1544
1509 /* 1545 /*
@@ -1575,7 +1611,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1575 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1611 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1576 fc_fcp_rec(fsp); 1612 fc_fcp_rec(fsp);
1577 else 1613 else
1578 fc_fcp_recovery(fsp); 1614 fc_fcp_recovery(fsp, FC_ERROR);
1579 break; 1615 break;
1580 } 1616 }
1581 fc_fcp_unlock_pkt(fsp); 1617 fc_fcp_unlock_pkt(fsp);
@@ -1587,9 +1623,9 @@ out:
1587 * fc_fcp_recovery() - Handler for fcp_pkt recovery 1623 * fc_fcp_recovery() - Handler for fcp_pkt recovery
1588 * @fsp: The FCP pkt that needs to be aborted 1624 * @fsp: The FCP pkt that needs to be aborted
1589 */ 1625 */
1590static void fc_fcp_recovery(struct fc_fcp_pkt *fsp) 1626static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
1591{ 1627{
1592 fsp->status_code = FC_CMD_RECOVERY; 1628 fsp->status_code = code;
1593 fsp->cdb_status = 0; 1629 fsp->cdb_status = 0;
1594 fsp->io_status = 0; 1630 fsp->io_status = 0;
1595 /* 1631 /*
@@ -1616,6 +1652,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1616 struct fcp_srr *srr; 1652 struct fcp_srr *srr;
1617 struct fc_frame *fp; 1653 struct fc_frame *fp;
1618 u8 cdb_op; 1654 u8 cdb_op;
1655 unsigned int rec_tov;
1619 1656
1620 rport = fsp->rport; 1657 rport = fsp->rport;
1621 rpriv = rport->dd_data; 1658 rpriv = rport->dd_data;
@@ -1640,8 +1677,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1640 rpriv->local_port->port_id, FC_TYPE_FCP, 1677 rpriv->local_port->port_id, FC_TYPE_FCP,
1641 FC_FCTL_REQ, 0); 1678 FC_FCTL_REQ, 0);
1642 1679
1680 rec_tov = get_fsp_rec_tov(fsp);
1643 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, 1681 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
1644 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); 1682 fsp, jiffies_to_msecs(rec_tov));
1645 if (!seq) 1683 if (!seq)
1646 goto retry; 1684 goto retry;
1647 1685
@@ -1665,6 +1703,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1665{ 1703{
1666 struct fc_fcp_pkt *fsp = arg; 1704 struct fc_fcp_pkt *fsp = arg;
1667 struct fc_frame_header *fh; 1705 struct fc_frame_header *fh;
1706 unsigned int rec_tov;
1668 1707
1669 if (IS_ERR(fp)) { 1708 if (IS_ERR(fp)) {
1670 fc_fcp_srr_error(fsp, fp); 1709 fc_fcp_srr_error(fsp, fp);
@@ -1691,11 +1730,12 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1691 switch (fc_frame_payload_op(fp)) { 1730 switch (fc_frame_payload_op(fp)) {
1692 case ELS_LS_ACC: 1731 case ELS_LS_ACC:
1693 fsp->recov_retry = 0; 1732 fsp->recov_retry = 0;
1694 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1733 rec_tov = get_fsp_rec_tov(fsp);
1734 fc_fcp_timer_set(fsp, rec_tov);
1695 break; 1735 break;
1696 case ELS_LS_RJT: 1736 case ELS_LS_RJT:
1697 default: 1737 default:
1698 fc_fcp_recovery(fsp); 1738 fc_fcp_recovery(fsp, FC_ERROR);
1699 break; 1739 break;
1700 } 1740 }
1701 fc_fcp_unlock_pkt(fsp); 1741 fc_fcp_unlock_pkt(fsp);
@@ -1721,7 +1761,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1721 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1761 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1722 fc_fcp_rec(fsp); 1762 fc_fcp_rec(fsp);
1723 else 1763 else
1724 fc_fcp_recovery(fsp); 1764 fc_fcp_recovery(fsp, FC_TIMED_OUT);
1725 break; 1765 break;
1726 case -FC_EX_CLOSED: /* e.g., link failure */ 1766 case -FC_EX_CLOSED: /* e.g., link failure */
1727 /* fall through */ 1767 /* fall through */
@@ -1820,19 +1860,17 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
1820 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1860 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1821 fsp->req_flags = FC_SRB_READ; 1861 fsp->req_flags = FC_SRB_READ;
1822 stats->InputRequests++; 1862 stats->InputRequests++;
1823 stats->InputMegabytes = fsp->data_len; 1863 stats->InputBytes += fsp->data_len;
1824 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1864 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1825 fsp->req_flags = FC_SRB_WRITE; 1865 fsp->req_flags = FC_SRB_WRITE;
1826 stats->OutputRequests++; 1866 stats->OutputRequests++;
1827 stats->OutputMegabytes = fsp->data_len; 1867 stats->OutputBytes += fsp->data_len;
1828 } else { 1868 } else {
1829 fsp->req_flags = 0; 1869 fsp->req_flags = 0;
1830 stats->ControlRequests++; 1870 stats->ControlRequests++;
1831 } 1871 }
1832 put_cpu(); 1872 put_cpu();
1833 1873
1834 fsp->tgt_flags = rpriv->flags;
1835
1836 init_timer(&fsp->timer); 1874 init_timer(&fsp->timer);
1837 fsp->timer.data = (unsigned long)fsp; 1875 fsp->timer.data = (unsigned long)fsp;
1838 1876
@@ -1946,18 +1984,29 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1946 break; 1984 break;
1947 case FC_CMD_ABORTED: 1985 case FC_CMD_ABORTED:
1948 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " 1986 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
1949 "due to FC_CMD_ABORTED\n"); 1987 "due to FC_CMD_ABORTED\n");
1950 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; 1988 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
1951 break; 1989 break;
1952 case FC_CMD_RECOVERY:
1953 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
1954 break;
1955 case FC_CMD_RESET: 1990 case FC_CMD_RESET:
1991 FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
1992 "due to FC_CMD_RESET\n");
1956 sc_cmd->result = (DID_RESET << 16); 1993 sc_cmd->result = (DID_RESET << 16);
1957 break; 1994 break;
1958 case FC_HRD_ERROR: 1995 case FC_HRD_ERROR:
1996 FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
1997 "due to FC_HRD_ERROR\n");
1959 sc_cmd->result = (DID_NO_CONNECT << 16); 1998 sc_cmd->result = (DID_NO_CONNECT << 16);
1960 break; 1999 break;
2000 case FC_CRC_ERROR:
2001 FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml "
2002 "due to FC_CRC_ERROR\n");
2003 sc_cmd->result = (DID_PARITY << 16);
2004 break;
2005 case FC_TIMED_OUT:
2006 FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
2007 "due to FC_TIMED_OUT\n");
2008 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
2009 break;
1961 default: 2010 default:
1962 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " 2011 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
1963 "due to unknown error\n"); 2012 "due to unknown error\n");
@@ -2004,7 +2053,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
2004 fsp = CMD_SP(sc_cmd); 2053 fsp = CMD_SP(sc_cmd);
2005 if (!fsp) { 2054 if (!fsp) {
2006 /* command completed while scsi eh was setting up */ 2055 /* command completed while scsi eh was setting up */
2007 spin_unlock_irqrestore(lport->host->host_lock, flags); 2056 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
2008 return SUCCESS; 2057 return SUCCESS;
2009 } 2058 }
2010 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 2059 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index 16d2162dda1f..eea0c3541b71 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -66,9 +66,21 @@ extern unsigned int fc_debug_logging;
66 66
67#define FC_FCP_DBG(pkt, fmt, args...) \ 67#define FC_FCP_DBG(pkt, fmt, args...) \
68 FC_CHECK_LOGGING(FC_FCP_LOGGING, \ 68 FC_CHECK_LOGGING(FC_FCP_LOGGING, \
69 printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \ 69 { \
70 if ((pkt)->seq_ptr) { \
71 struct fc_exch *_ep = NULL; \
72 _ep = fc_seq_exch((pkt)->seq_ptr); \
73 printk(KERN_INFO "host%u: fcp: %6.6x: " \
74 "xid %04x-%04x: " fmt, \
70 (pkt)->lp->host->host_no, \ 75 (pkt)->lp->host->host_no, \
71 pkt->rport->port_id, ##args)) 76 (pkt)->rport->port_id, \
77 (_ep)->oxid, (_ep)->rxid, ##args); \
78 } else { \
79 printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
80 (pkt)->lp->host->host_no, \
81 (pkt)->rport->port_id, ##args); \
82 } \
83 })
72 84
73#define FC_EXCH_DBG(exch, fmt, args...) \ 85#define FC_EXCH_DBG(exch, fmt, args...) \
74 FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ 86 FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 9be63edbf8fb..c5a10f94f845 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -288,6 +288,8 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
288 struct fc_lport *lport = shost_priv(shost); 288 struct fc_lport *lport = shost_priv(shost);
289 struct timespec v0, v1; 289 struct timespec v0, v1;
290 unsigned int cpu; 290 unsigned int cpu;
291 u64 fcp_in_bytes = 0;
292 u64 fcp_out_bytes = 0;
291 293
292 fcoe_stats = &lport->host_stats; 294 fcoe_stats = &lport->host_stats;
293 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); 295 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
@@ -310,10 +312,12 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
310 fcoe_stats->fcp_input_requests += stats->InputRequests; 312 fcoe_stats->fcp_input_requests += stats->InputRequests;
311 fcoe_stats->fcp_output_requests += stats->OutputRequests; 313 fcoe_stats->fcp_output_requests += stats->OutputRequests;
312 fcoe_stats->fcp_control_requests += stats->ControlRequests; 314 fcoe_stats->fcp_control_requests += stats->ControlRequests;
313 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes; 315 fcp_in_bytes += stats->InputBytes;
314 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes; 316 fcp_out_bytes += stats->OutputBytes;
315 fcoe_stats->link_failure_count += stats->LinkFailureCount; 317 fcoe_stats->link_failure_count += stats->LinkFailureCount;
316 } 318 }
319 fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
320 fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
317 fcoe_stats->lip_count = -1; 321 fcoe_stats->lip_count = -1;
318 fcoe_stats->nos_count = -1; 322 fcoe_stats->nos_count = -1;
319 fcoe_stats->loss_of_sync_count = -1; 323 fcoe_stats->loss_of_sync_count = -1;
@@ -1703,8 +1707,10 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
1703 info->sg = job->reply_payload.sg_list; 1707 info->sg = job->reply_payload.sg_list;
1704 1708
1705 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1709 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1706 NULL, info, tov)) 1710 NULL, info, tov)) {
1711 kfree(info);
1707 return -ECOMM; 1712 return -ECOMM;
1713 }
1708 return 0; 1714 return 0;
1709} 1715}
1710 1716
@@ -1762,8 +1768,10 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
1762 info->sg = job->reply_payload.sg_list; 1768 info->sg = job->reply_payload.sg_list;
1763 1769
1764 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1770 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1765 NULL, info, tov)) 1771 NULL, info, tov)) {
1772 kfree(info);
1766 return -ECOMM; 1773 return -ECOMM;
1774 }
1767 return 0; 1775 return 0;
1768} 1776}
1769 1777
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index a84ef13ed74a..a7175adab32d 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -652,7 +652,7 @@ void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
652 FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp)); 652 FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
653 653
654 if (fp == ERR_PTR(-FC_EX_CLOSED)) 654 if (fp == ERR_PTR(-FC_EX_CLOSED))
655 return; 655 goto put;
656 656
657 mutex_lock(&rdata->rp_mutex); 657 mutex_lock(&rdata->rp_mutex);
658 658
@@ -689,6 +689,7 @@ out:
689 fc_frame_free(fp); 689 fc_frame_free(fp);
690err: 690err:
691 mutex_unlock(&rdata->rp_mutex); 691 mutex_unlock(&rdata->rp_mutex);
692put:
692 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); 693 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
693 return; 694 return;
694bad: 695bad:
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c15fde808c33..da8b61543ee4 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -505,6 +505,7 @@ static void iscsi_free_task(struct iscsi_task *task)
505 struct iscsi_conn *conn = task->conn; 505 struct iscsi_conn *conn = task->conn;
506 struct iscsi_session *session = conn->session; 506 struct iscsi_session *session = conn->session;
507 struct scsi_cmnd *sc = task->sc; 507 struct scsi_cmnd *sc = task->sc;
508 int oldstate = task->state;
508 509
509 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", 510 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
510 task->itt, task->state, task->sc); 511 task->itt, task->state, task->sc);
@@ -525,10 +526,10 @@ static void iscsi_free_task(struct iscsi_task *task)
525 /* SCSI eh reuses commands to verify us */ 526 /* SCSI eh reuses commands to verify us */
526 sc->SCp.ptr = NULL; 527 sc->SCp.ptr = NULL;
527 /* 528 /*
528 * queue command may call this to free the task, but 529 * queue command may call this to free the task, so
529 * not have setup the sc callback 530 * it will decide how to return sc to scsi-ml.
530 */ 531 */
531 if (sc->scsi_done) 532 if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
532 sc->scsi_done(sc); 533 sc->scsi_done(sc);
533 } 534 }
534} 535}
@@ -539,11 +540,12 @@ void __iscsi_get_task(struct iscsi_task *task)
539} 540}
540EXPORT_SYMBOL_GPL(__iscsi_get_task); 541EXPORT_SYMBOL_GPL(__iscsi_get_task);
541 542
542static void __iscsi_put_task(struct iscsi_task *task) 543void __iscsi_put_task(struct iscsi_task *task)
543{ 544{
544 if (atomic_dec_and_test(&task->refcount)) 545 if (atomic_dec_and_test(&task->refcount))
545 iscsi_free_task(task); 546 iscsi_free_task(task);
546} 547}
548EXPORT_SYMBOL_GPL(__iscsi_put_task);
547 549
548void iscsi_put_task(struct iscsi_task *task) 550void iscsi_put_task(struct iscsi_task *task)
549{ 551{
@@ -571,7 +573,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
571 task->itt, task->state, task->sc); 573 task->itt, task->state, task->sc);
572 if (task->state == ISCSI_TASK_COMPLETED || 574 if (task->state == ISCSI_TASK_COMPLETED ||
573 task->state == ISCSI_TASK_ABRT_TMF || 575 task->state == ISCSI_TASK_ABRT_TMF ||
574 task->state == ISCSI_TASK_ABRT_SESS_RECOV) 576 task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
577 task->state == ISCSI_TASK_REQUEUE_SCSIQ)
575 return; 578 return;
576 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); 579 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
577 task->state = state; 580 task->state = state;
@@ -1335,17 +1338,16 @@ void iscsi_session_failure(struct iscsi_session *session,
1335{ 1338{
1336 struct iscsi_conn *conn; 1339 struct iscsi_conn *conn;
1337 struct device *dev; 1340 struct device *dev;
1338 unsigned long flags;
1339 1341
1340 spin_lock_irqsave(&session->lock, flags); 1342 spin_lock_bh(&session->lock);
1341 conn = session->leadconn; 1343 conn = session->leadconn;
1342 if (session->state == ISCSI_STATE_TERMINATE || !conn) { 1344 if (session->state == ISCSI_STATE_TERMINATE || !conn) {
1343 spin_unlock_irqrestore(&session->lock, flags); 1345 spin_unlock_bh(&session->lock);
1344 return; 1346 return;
1345 } 1347 }
1346 1348
1347 dev = get_device(&conn->cls_conn->dev); 1349 dev = get_device(&conn->cls_conn->dev);
1348 spin_unlock_irqrestore(&session->lock, flags); 1350 spin_unlock_bh(&session->lock);
1349 if (!dev) 1351 if (!dev)
1350 return; 1352 return;
1351 /* 1353 /*
@@ -1364,17 +1366,16 @@ EXPORT_SYMBOL_GPL(iscsi_session_failure);
1364void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) 1366void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
1365{ 1367{
1366 struct iscsi_session *session = conn->session; 1368 struct iscsi_session *session = conn->session;
1367 unsigned long flags;
1368 1369
1369 spin_lock_irqsave(&session->lock, flags); 1370 spin_lock_bh(&session->lock);
1370 if (session->state == ISCSI_STATE_FAILED) { 1371 if (session->state == ISCSI_STATE_FAILED) {
1371 spin_unlock_irqrestore(&session->lock, flags); 1372 spin_unlock_bh(&session->lock);
1372 return; 1373 return;
1373 } 1374 }
1374 1375
1375 if (conn->stop_stage == 0) 1376 if (conn->stop_stage == 0)
1376 session->state = ISCSI_STATE_FAILED; 1377 session->state = ISCSI_STATE_FAILED;
1377 spin_unlock_irqrestore(&session->lock, flags); 1378 spin_unlock_bh(&session->lock);
1378 1379
1379 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1380 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1380 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1381 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
@@ -1599,27 +1600,23 @@ enum {
1599 FAILURE_SESSION_NOT_READY, 1600 FAILURE_SESSION_NOT_READY,
1600}; 1601};
1601 1602
1602static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 1603int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1603{ 1604{
1604 struct iscsi_cls_session *cls_session; 1605 struct iscsi_cls_session *cls_session;
1605 struct Scsi_Host *host;
1606 struct iscsi_host *ihost; 1606 struct iscsi_host *ihost;
1607 int reason = 0; 1607 int reason = 0;
1608 struct iscsi_session *session; 1608 struct iscsi_session *session;
1609 struct iscsi_conn *conn; 1609 struct iscsi_conn *conn;
1610 struct iscsi_task *task = NULL; 1610 struct iscsi_task *task = NULL;
1611 1611
1612 sc->scsi_done = done;
1613 sc->result = 0; 1612 sc->result = 0;
1614 sc->SCp.ptr = NULL; 1613 sc->SCp.ptr = NULL;
1615 1614
1616 host = sc->device->host;
1617 ihost = shost_priv(host); 1615 ihost = shost_priv(host);
1618 spin_unlock(host->host_lock);
1619 1616
1620 cls_session = starget_to_session(scsi_target(sc->device)); 1617 cls_session = starget_to_session(scsi_target(sc->device));
1621 session = cls_session->dd_data; 1618 session = cls_session->dd_data;
1622 spin_lock(&session->lock); 1619 spin_lock_bh(&session->lock);
1623 1620
1624 reason = iscsi_session_chkready(cls_session); 1621 reason = iscsi_session_chkready(cls_session);
1625 if (reason) { 1622 if (reason) {
@@ -1705,25 +1702,21 @@ static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi
1705 } 1702 }
1706 1703
1707 session->queued_cmdsn++; 1704 session->queued_cmdsn++;
1708 spin_unlock(&session->lock); 1705 spin_unlock_bh(&session->lock);
1709 spin_lock(host->host_lock);
1710 return 0; 1706 return 0;
1711 1707
1712prepd_reject: 1708prepd_reject:
1713 sc->scsi_done = NULL; 1709 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1714 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1715reject: 1710reject:
1716 spin_unlock(&session->lock); 1711 spin_unlock_bh(&session->lock);
1717 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1712 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
1718 sc->cmnd[0], reason); 1713 sc->cmnd[0], reason);
1719 spin_lock(host->host_lock);
1720 return SCSI_MLQUEUE_TARGET_BUSY; 1714 return SCSI_MLQUEUE_TARGET_BUSY;
1721 1715
1722prepd_fault: 1716prepd_fault:
1723 sc->scsi_done = NULL; 1717 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1724 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1725fault: 1718fault:
1726 spin_unlock(&session->lock); 1719 spin_unlock_bh(&session->lock);
1727 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1720 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
1728 sc->cmnd[0], reason); 1721 sc->cmnd[0], reason);
1729 if (!scsi_bidi_cmnd(sc)) 1722 if (!scsi_bidi_cmnd(sc))
@@ -1732,12 +1725,9 @@ fault:
1732 scsi_out(sc)->resid = scsi_out(sc)->length; 1725 scsi_out(sc)->resid = scsi_out(sc)->length;
1733 scsi_in(sc)->resid = scsi_in(sc)->length; 1726 scsi_in(sc)->resid = scsi_in(sc)->length;
1734 } 1727 }
1735 done(sc); 1728 sc->scsi_done(sc);
1736 spin_lock(host->host_lock);
1737 return 0; 1729 return 0;
1738} 1730}
1739
1740DEF_SCSI_QCMD(iscsi_queuecommand)
1741EXPORT_SYMBOL_GPL(iscsi_queuecommand); 1731EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1742 1732
1743int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) 1733int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
@@ -1795,9 +1785,9 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1795 NULL, 0); 1785 NULL, 0);
1796 if (!task) { 1786 if (!task) {
1797 spin_unlock_bh(&session->lock); 1787 spin_unlock_bh(&session->lock);
1788 iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
1798 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1789 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1799 spin_lock_bh(&session->lock); 1790 spin_lock_bh(&session->lock);
1800 ISCSI_DBG_EH(session, "tmf exec failure\n");
1801 return -EPERM; 1791 return -EPERM;
1802 } 1792 }
1803 conn->tmfcmd_pdus_cnt++; 1793 conn->tmfcmd_pdus_cnt++;
@@ -2202,7 +2192,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2202 goto success_unlocked; 2192 goto success_unlocked;
2203 case TMF_TIMEDOUT: 2193 case TMF_TIMEDOUT:
2204 spin_unlock_bh(&session->lock); 2194 spin_unlock_bh(&session->lock);
2205 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 2195 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2206 goto failed_unlocked; 2196 goto failed_unlocked;
2207 case TMF_NOT_FOUND: 2197 case TMF_NOT_FOUND:
2208 if (!sc->SCp.ptr) { 2198 if (!sc->SCp.ptr) {
@@ -2289,7 +2279,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2289 break; 2279 break;
2290 case TMF_TIMEDOUT: 2280 case TMF_TIMEDOUT:
2291 spin_unlock_bh(&session->lock); 2281 spin_unlock_bh(&session->lock);
2292 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 2282 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2293 goto done; 2283 goto done;
2294 default: 2284 default:
2295 conn->tmf_state = TMF_INITIAL; 2285 conn->tmf_state = TMF_INITIAL;
@@ -2370,7 +2360,7 @@ failed:
2370 * we drop the lock here but the leadconn cannot be destoyed while 2360 * we drop the lock here but the leadconn cannot be destoyed while
2371 * we are in the scsi eh 2361 * we are in the scsi eh
2372 */ 2362 */
2373 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 2363 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2374 2364
2375 ISCSI_DBG_EH(session, "wait for relogin\n"); 2365 ISCSI_DBG_EH(session, "wait for relogin\n");
2376 wait_event_interruptible(conn->ehwait, 2366 wait_event_interruptible(conn->ehwait,
@@ -2452,7 +2442,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2452 break; 2442 break;
2453 case TMF_TIMEDOUT: 2443 case TMF_TIMEDOUT:
2454 spin_unlock_bh(&session->lock); 2444 spin_unlock_bh(&session->lock);
2455 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 2445 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2456 goto done; 2446 goto done;
2457 default: 2447 default:
2458 conn->tmf_state = TMF_INITIAL; 2448 conn->tmf_state = TMF_INITIAL;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index fe8b74c706d2..5257fdfe699a 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -28,6 +28,17 @@
28#include <scsi/scsi_transport_sas.h> 28#include <scsi/scsi_transport_sas.h>
29#include "../scsi_sas_internal.h" 29#include "../scsi_sas_internal.h"
30 30
31static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
32{
33 struct sas_ha_struct *sas_ha = phy->ha;
34
35 if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
36 SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports &&
37 memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0))
38 return false;
39 return true;
40}
41
31/** 42/**
32 * sas_form_port -- add this phy to a port 43 * sas_form_port -- add this phy to a port
33 * @phy: the phy of interest 44 * @phy: the phy of interest
@@ -45,8 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
45 unsigned long flags; 56 unsigned long flags;
46 57
47 if (port) { 58 if (port) {
48 if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, 59 if (!phy_is_wideport_member(port, phy))
49 SAS_ADDR_SIZE) != 0)
50 sas_deform_port(phy); 60 sas_deform_port(phy);
51 else { 61 else {
52 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 62 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
@@ -62,9 +72,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
62 port = sas_ha->sas_port[i]; 72 port = sas_ha->sas_port[i];
63 spin_lock(&port->phy_list_lock); 73 spin_lock(&port->phy_list_lock);
64 if (*(u64 *) port->sas_addr && 74 if (*(u64 *) port->sas_addr &&
65 memcmp(port->attached_sas_addr, 75 phy_is_wideport_member(port, phy) && port->num_phys > 0) {
66 phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 &&
67 port->num_phys > 0) {
68 /* wide port */ 76 /* wide port */
69 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, 77 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
70 port->id); 78 port->id);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 196de40b906c..746dd3d7a092 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -464,12 +464,29 @@ struct unsol_rcv_ct_ctx {
464#define UNSOL_VALID 0x00000001 464#define UNSOL_VALID 0x00000001
465}; 465};
466 466
467#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
468#define LPFC_USER_LINK_SPEED_1G 1 /* 1 Gigabaud */
469#define LPFC_USER_LINK_SPEED_2G 2 /* 2 Gigabaud */
470#define LPFC_USER_LINK_SPEED_4G 4 /* 4 Gigabaud */
471#define LPFC_USER_LINK_SPEED_8G 8 /* 8 Gigabaud */
472#define LPFC_USER_LINK_SPEED_10G 10 /* 10 Gigabaud */
473#define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */
474#define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_16G
475#define LPFC_USER_LINK_SPEED_BITMAP ((1 << LPFC_USER_LINK_SPEED_16G) | \
476 (1 << LPFC_USER_LINK_SPEED_10G) | \
477 (1 << LPFC_USER_LINK_SPEED_8G) | \
478 (1 << LPFC_USER_LINK_SPEED_4G) | \
479 (1 << LPFC_USER_LINK_SPEED_2G) | \
480 (1 << LPFC_USER_LINK_SPEED_1G) | \
481 (1 << LPFC_USER_LINK_SPEED_AUTO))
482#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
483
467struct lpfc_hba { 484struct lpfc_hba {
468 /* SCSI interface function jump table entries */ 485 /* SCSI interface function jump table entries */
469 int (*lpfc_new_scsi_buf) 486 int (*lpfc_new_scsi_buf)
470 (struct lpfc_vport *, int); 487 (struct lpfc_vport *, int);
471 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf) 488 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
472 (struct lpfc_hba *); 489 (struct lpfc_hba *, struct lpfc_nodelist *);
473 int (*lpfc_scsi_prep_dma_buf) 490 int (*lpfc_scsi_prep_dma_buf)
474 (struct lpfc_hba *, struct lpfc_scsi_buf *); 491 (struct lpfc_hba *, struct lpfc_scsi_buf *);
475 void (*lpfc_scsi_unprep_dma_buf) 492 void (*lpfc_scsi_unprep_dma_buf)
@@ -545,7 +562,7 @@ struct lpfc_hba {
545 uint32_t hba_flag; /* hba generic flags */ 562 uint32_t hba_flag; /* hba generic flags */
546#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 563#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
547#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ 564#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
548#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ 565#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
549#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ 566#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
550#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ 567#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
551#define FCP_XRI_ABORT_EVENT 0x20 568#define FCP_XRI_ABORT_EVENT 0x20
@@ -557,6 +574,7 @@ struct lpfc_hba {
557#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ 574#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */
558#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ 575#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
559#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 576#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
577#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
560 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 578 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
561 struct lpfc_dmabuf slim2p; 579 struct lpfc_dmabuf slim2p;
562 580
@@ -606,6 +624,7 @@ struct lpfc_hba {
606 /* HBA Config Parameters */ 624 /* HBA Config Parameters */
607 uint32_t cfg_ack0; 625 uint32_t cfg_ack0;
608 uint32_t cfg_enable_npiv; 626 uint32_t cfg_enable_npiv;
627 uint32_t cfg_enable_rrq;
609 uint32_t cfg_topology; 628 uint32_t cfg_topology;
610 uint32_t cfg_link_speed; 629 uint32_t cfg_link_speed;
611 uint32_t cfg_cr_delay; 630 uint32_t cfg_cr_delay;
@@ -716,6 +735,7 @@ struct lpfc_hba {
716 uint32_t total_scsi_bufs; 735 uint32_t total_scsi_bufs;
717 struct list_head lpfc_iocb_list; 736 struct list_head lpfc_iocb_list;
718 uint32_t total_iocbq_bufs; 737 uint32_t total_iocbq_bufs;
738 struct list_head active_rrq_list;
719 spinlock_t hbalock; 739 spinlock_t hbalock;
720 740
721 /* pci_mem_pools */ 741 /* pci_mem_pools */
@@ -728,6 +748,7 @@ struct lpfc_hba {
728 748
729 mempool_t *mbox_mem_pool; 749 mempool_t *mbox_mem_pool;
730 mempool_t *nlp_mem_pool; 750 mempool_t *nlp_mem_pool;
751 mempool_t *rrq_pool;
731 752
732 struct fc_host_statistics link_stats; 753 struct fc_host_statistics link_stats;
733 enum intr_type_t intr_type; 754 enum intr_type_t intr_type;
@@ -784,6 +805,7 @@ struct lpfc_hba {
784 unsigned long skipped_hb; 805 unsigned long skipped_hb;
785 struct timer_list hb_tmofunc; 806 struct timer_list hb_tmofunc;
786 uint8_t hb_outstanding; 807 uint8_t hb_outstanding;
808 struct timer_list rrq_tmr;
787 enum hba_temp_state over_temp_state; 809 enum hba_temp_state over_temp_state;
788 /* ndlp reference management */ 810 /* ndlp reference management */
789 spinlock_t ndlp_lock; 811 spinlock_t ndlp_lock;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c1cbec01345d..c06491b5862f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -52,10 +52,6 @@
52#define LPFC_MIN_DEVLOSS_TMO 1 52#define LPFC_MIN_DEVLOSS_TMO 1
53#define LPFC_MAX_DEVLOSS_TMO 255 53#define LPFC_MAX_DEVLOSS_TMO 255
54 54
55#define LPFC_MAX_LINK_SPEED 8
56#define LPFC_LINK_SPEED_BITMAP 0x00000117
57#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
58
59/** 55/**
60 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules 56 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
61 * @incr: integer to convert. 57 * @incr: integer to convert.
@@ -463,7 +459,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
463 if (phba->sli.sli_flag & LPFC_MENLO_MAINT) 459 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
464 len += snprintf(buf + len, PAGE_SIZE-len, 460 len += snprintf(buf + len, PAGE_SIZE-len,
465 " Menlo Maint Mode\n"); 461 " Menlo Maint Mode\n");
466 else if (phba->fc_topology == TOPOLOGY_LOOP) { 462 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
467 if (vport->fc_flag & FC_PUBLIC_LOOP) 463 if (vport->fc_flag & FC_PUBLIC_LOOP)
468 len += snprintf(buf + len, PAGE_SIZE-len, 464 len += snprintf(buf + len, PAGE_SIZE-len,
469 " Public Loop\n"); 465 " Public Loop\n");
@@ -1981,6 +1977,13 @@ lpfc_param_show(enable_npiv);
1981lpfc_param_init(enable_npiv, 1, 0, 1); 1977lpfc_param_init(enable_npiv, 1, 0, 1);
1982static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); 1978static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
1983 1979
1980int lpfc_enable_rrq;
1981module_param(lpfc_enable_rrq, int, 0);
1982MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
1983lpfc_param_show(enable_rrq);
1984lpfc_param_init(enable_rrq, 0, 0, 1);
1985static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
1986
1984/* 1987/*
1985# lpfc_suppress_link_up: Bring link up at initialization 1988# lpfc_suppress_link_up: Bring link up at initialization
1986# 0x0 = bring link up (issue MBX_INIT_LINK) 1989# 0x0 = bring link up (issue MBX_INIT_LINK)
@@ -2837,14 +2840,8 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
2837/* 2840/*
2838# lpfc_link_speed: Link speed selection for initializing the Fibre Channel 2841# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
2839# connection. 2842# connection.
2840# 0 = auto select (default) 2843# Value range is [0,16]. Default value is 0.
2841# 1 = 1 Gigabaud
2842# 2 = 2 Gigabaud
2843# 4 = 4 Gigabaud
2844# 8 = 8 Gigabaud
2845# Value range is [0,8]. Default value is 0.
2846*/ 2844*/
2847
2848/** 2845/**
2849 * lpfc_link_speed_set - Set the adapters link speed 2846 * lpfc_link_speed_set - Set the adapters link speed
2850 * @phba: lpfc_hba pointer. 2847 * @phba: lpfc_hba pointer.
@@ -2869,7 +2866,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
2869 struct Scsi_Host *shost = class_to_shost(dev); 2866 struct Scsi_Host *shost = class_to_shost(dev);
2870 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2867 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2871 struct lpfc_hba *phba = vport->phba; 2868 struct lpfc_hba *phba = vport->phba;
2872 int val = 0; 2869 int val = LPFC_USER_LINK_SPEED_AUTO;
2873 int nolip = 0; 2870 int nolip = 0;
2874 const char *val_buf = buf; 2871 const char *val_buf = buf;
2875 int err; 2872 int err;
@@ -2885,15 +2882,20 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
2885 if (sscanf(val_buf, "%i", &val) != 1) 2882 if (sscanf(val_buf, "%i", &val) != 1)
2886 return -EINVAL; 2883 return -EINVAL;
2887 2884
2888 if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 2885 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
2889 ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 2886 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
2890 ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 2887 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
2891 ((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || 2888 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
2892 ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb))) 2889 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
2890 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892 "2879 lpfc_link_speed attribute cannot be set "
2893 "to %d. Speed is not supported by this port.\n",
2894 val);
2893 return -EINVAL; 2895 return -EINVAL;
2894 2896 }
2895 if ((val >= 0 && val <= 8) 2897 if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
2896 && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { 2898 (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
2897 prev_val = phba->cfg_link_speed; 2899 prev_val = phba->cfg_link_speed;
2898 phba->cfg_link_speed = val; 2900 phba->cfg_link_speed = val;
2899 if (nolip) 2901 if (nolip)
@@ -2906,11 +2908,9 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
2906 } else 2908 } else
2907 return strlen(buf); 2909 return strlen(buf);
2908 } 2910 }
2909
2910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2911 "%d:0469 lpfc_link_speed attribute cannot be set to %d, " 2912 "0469 lpfc_link_speed attribute cannot be set to %d, "
2912 "allowed range is [0, 8]\n", 2913 "allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val);
2913 phba->brd_no, val);
2914 return -EINVAL; 2914 return -EINVAL;
2915} 2915}
2916 2916
@@ -2938,8 +2938,8 @@ lpfc_param_show(link_speed)
2938static int 2938static int
2939lpfc_link_speed_init(struct lpfc_hba *phba, int val) 2939lpfc_link_speed_init(struct lpfc_hba *phba, int val)
2940{ 2940{
2941 if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED) 2941 if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
2942 && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { 2942 (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
2943 phba->cfg_link_speed = val; 2943 phba->cfg_link_speed = val;
2944 return 0; 2944 return 0;
2945 } 2945 }
@@ -2947,12 +2947,12 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
2947 "0405 lpfc_link_speed attribute cannot " 2947 "0405 lpfc_link_speed attribute cannot "
2948 "be set to %d, allowed values are " 2948 "be set to %d, allowed values are "
2949 "["LPFC_LINK_SPEED_STRING"]\n", val); 2949 "["LPFC_LINK_SPEED_STRING"]\n", val);
2950 phba->cfg_link_speed = 0; 2950 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
2951 return -EINVAL; 2951 return -EINVAL;
2952} 2952}
2953 2953
2954static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, 2954static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
2955 lpfc_link_speed_show, lpfc_link_speed_store); 2955 lpfc_link_speed_show, lpfc_link_speed_store);
2956 2956
2957/* 2957/*
2958# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) 2958# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
@@ -3305,12 +3305,12 @@ LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
3305LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); 3305LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
3306 3306
3307/* 3307/*
3308# lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer.. 3308# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
3309# 0 = HBA Heartbeat disabled 3309# 0 = HBA Heartbeat disabled
3310# 1 = HBA Heartbeat enabled (default) 3310# 1 = HBA Heartbeat enabled (default)
3311# Value range is [0,1]. Default value is 1. 3311# Value range is [0,1]. Default value is 1.
3312*/ 3312*/
3313LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); 3313LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
3314 3314
3315/* 3315/*
3316# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) 3316# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
@@ -3401,6 +3401,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3401 &dev_attr_lpfc_fdmi_on, 3401 &dev_attr_lpfc_fdmi_on,
3402 &dev_attr_lpfc_max_luns, 3402 &dev_attr_lpfc_max_luns,
3403 &dev_attr_lpfc_enable_npiv, 3403 &dev_attr_lpfc_enable_npiv,
3404 &dev_attr_lpfc_enable_rrq,
3404 &dev_attr_nport_evt_cnt, 3405 &dev_attr_nport_evt_cnt,
3405 &dev_attr_board_mode, 3406 &dev_attr_board_mode,
3406 &dev_attr_max_vpi, 3407 &dev_attr_max_vpi,
@@ -3798,8 +3799,7 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
3798 } 3799 }
3799 break; 3800 break;
3800 case MBX_READ_SPARM64: 3801 case MBX_READ_SPARM64:
3801 case MBX_READ_LA: 3802 case MBX_READ_TOPOLOGY:
3802 case MBX_READ_LA64:
3803 case MBX_REG_LOGIN: 3803 case MBX_REG_LOGIN:
3804 case MBX_REG_LOGIN64: 3804 case MBX_REG_LOGIN64:
3805 case MBX_CONFIG_PORT: 3805 case MBX_CONFIG_PORT:
@@ -3989,7 +3989,7 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
3989 if (vport->port_type == LPFC_NPIV_PORT) { 3989 if (vport->port_type == LPFC_NPIV_PORT) {
3990 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 3990 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
3991 } else if (lpfc_is_link_up(phba)) { 3991 } else if (lpfc_is_link_up(phba)) {
3992 if (phba->fc_topology == TOPOLOGY_LOOP) { 3992 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3993 if (vport->fc_flag & FC_PUBLIC_LOOP) 3993 if (vport->fc_flag & FC_PUBLIC_LOOP)
3994 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 3994 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
3995 else 3995 else
@@ -4058,23 +4058,26 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
4058 4058
4059 if (lpfc_is_link_up(phba)) { 4059 if (lpfc_is_link_up(phba)) {
4060 switch(phba->fc_linkspeed) { 4060 switch(phba->fc_linkspeed) {
4061 case LA_1GHZ_LINK: 4061 case LPFC_LINK_SPEED_1GHZ:
4062 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 4062 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
4063 break; 4063 break;
4064 case LA_2GHZ_LINK: 4064 case LPFC_LINK_SPEED_2GHZ:
4065 fc_host_speed(shost) = FC_PORTSPEED_2GBIT; 4065 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
4066 break; 4066 break;
4067 case LA_4GHZ_LINK: 4067 case LPFC_LINK_SPEED_4GHZ:
4068 fc_host_speed(shost) = FC_PORTSPEED_4GBIT; 4068 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
4069 break; 4069 break;
4070 case LA_8GHZ_LINK: 4070 case LPFC_LINK_SPEED_8GHZ:
4071 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 4071 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
4072 break; 4072 break;
4073 case LA_10GHZ_LINK: 4073 case LPFC_LINK_SPEED_10GHZ:
4074 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 4074 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
4075 break; 4075 break;
4076 default: 4076 case LPFC_LINK_SPEED_16GHZ:
4077 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 4077 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
4078 break;
4079 default:
4080 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
4078 break; 4081 break;
4079 } 4082 }
4080 } else 4083 } else
@@ -4097,7 +4100,7 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
4097 spin_lock_irq(shost->host_lock); 4100 spin_lock_irq(shost->host_lock);
4098 4101
4099 if ((vport->fc_flag & FC_FABRIC) || 4102 if ((vport->fc_flag & FC_FABRIC) ||
4100 ((phba->fc_topology == TOPOLOGY_LOOP) && 4103 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
4101 (vport->fc_flag & FC_PUBLIC_LOOP))) 4104 (vport->fc_flag & FC_PUBLIC_LOOP)))
4102 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); 4105 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
4103 else 4106 else
@@ -4208,11 +4211,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
4208 hs->invalid_crc_count -= lso->invalid_crc_count; 4211 hs->invalid_crc_count -= lso->invalid_crc_count;
4209 hs->error_frames -= lso->error_frames; 4212 hs->error_frames -= lso->error_frames;
4210 4213
4211 if (phba->hba_flag & HBA_FCOE_SUPPORT) { 4214 if (phba->hba_flag & HBA_FCOE_MODE) {
4212 hs->lip_count = -1; 4215 hs->lip_count = -1;
4213 hs->nos_count = (phba->link_events >> 1); 4216 hs->nos_count = (phba->link_events >> 1);
4214 hs->nos_count -= lso->link_events; 4217 hs->nos_count -= lso->link_events;
4215 } else if (phba->fc_topology == TOPOLOGY_LOOP) { 4218 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4216 hs->lip_count = (phba->fc_eventTag >> 1); 4219 hs->lip_count = (phba->fc_eventTag >> 1);
4217 hs->lip_count -= lso->link_events; 4220 hs->lip_count -= lso->link_events;
4218 hs->nos_count = -1; 4221 hs->nos_count = -1;
@@ -4303,7 +4306,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
4303 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 4306 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
4304 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 4307 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
4305 lso->error_frames = pmb->un.varRdLnk.crcCnt; 4308 lso->error_frames = pmb->un.varRdLnk.crcCnt;
4306 if (phba->hba_flag & HBA_FCOE_SUPPORT) 4309 if (phba->hba_flag & HBA_FCOE_MODE)
4307 lso->link_events = (phba->link_events >> 1); 4310 lso->link_events = (phba->link_events >> 1);
4308 else 4311 else
4309 lso->link_events = (phba->fc_eventTag >> 1); 4312 lso->link_events = (phba->fc_eventTag >> 1);
@@ -4615,6 +4618,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4615 lpfc_link_speed_init(phba, lpfc_link_speed); 4618 lpfc_link_speed_init(phba, lpfc_link_speed);
4616 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 4619 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4617 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4620 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
4621 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
4618 lpfc_use_msi_init(phba, lpfc_use_msi); 4622 lpfc_use_msi_init(phba, lpfc_use_msi);
4619 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 4623 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4620 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); 4624 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7260c3af555a..0dd43bb91618 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -162,7 +162,6 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
162 struct lpfc_iocbq *cmdiocbq, 162 struct lpfc_iocbq *cmdiocbq,
163 struct lpfc_iocbq *rspiocbq) 163 struct lpfc_iocbq *rspiocbq)
164{ 164{
165 unsigned long iflags;
166 struct bsg_job_data *dd_data; 165 struct bsg_job_data *dd_data;
167 struct fc_bsg_job *job; 166 struct fc_bsg_job *job;
168 IOCB_t *rsp; 167 IOCB_t *rsp;
@@ -173,9 +172,10 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
173 int rc = 0; 172 int rc = 0;
174 173
175 spin_lock_irqsave(&phba->ct_ev_lock, flags); 174 spin_lock_irqsave(&phba->ct_ev_lock, flags);
176 dd_data = cmdiocbq->context1; 175 dd_data = cmdiocbq->context2;
177 if (!dd_data) { 176 if (!dd_data) {
178 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 177 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
178 lpfc_sli_release_iocbq(phba, cmdiocbq);
179 return; 179 return;
180 } 180 }
181 181
@@ -183,17 +183,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
183 job = iocb->set_job; 183 job = iocb->set_job;
184 job->dd_data = NULL; /* so timeout handler does not reply */ 184 job->dd_data = NULL; /* so timeout handler does not reply */
185 185
186 spin_lock_irqsave(&phba->hbalock, iflags);
187 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
188 if (cmdiocbq->context2 && rspiocbq)
189 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
190 &rspiocbq->iocb, sizeof(IOCB_t));
191 spin_unlock_irqrestore(&phba->hbalock, iflags);
192
193 bmp = iocb->bmp; 186 bmp = iocb->bmp;
194 rspiocbq = iocb->rspiocbq;
195 rsp = &rspiocbq->iocb; 187 rsp = &rspiocbq->iocb;
196 ndlp = iocb->ndlp; 188 ndlp = cmdiocbq->context1;
197 189
198 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 190 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
199 job->request_payload.sg_cnt, DMA_TO_DEVICE); 191 job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -220,7 +212,6 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
220 rsp->un.genreq64.bdl.bdeSize; 212 rsp->un.genreq64.bdl.bdeSize;
221 213
222 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 214 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
223 lpfc_sli_release_iocbq(phba, rspiocbq);
224 lpfc_sli_release_iocbq(phba, cmdiocbq); 215 lpfc_sli_release_iocbq(phba, cmdiocbq);
225 lpfc_nlp_put(ndlp); 216 lpfc_nlp_put(ndlp);
226 kfree(bmp); 217 kfree(bmp);
@@ -247,9 +238,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
247 struct ulp_bde64 *bpl = NULL; 238 struct ulp_bde64 *bpl = NULL;
248 uint32_t timeout; 239 uint32_t timeout;
249 struct lpfc_iocbq *cmdiocbq = NULL; 240 struct lpfc_iocbq *cmdiocbq = NULL;
250 struct lpfc_iocbq *rspiocbq = NULL;
251 IOCB_t *cmd; 241 IOCB_t *cmd;
252 IOCB_t *rsp;
253 struct lpfc_dmabuf *bmp = NULL; 242 struct lpfc_dmabuf *bmp = NULL;
254 int request_nseg; 243 int request_nseg;
255 int reply_nseg; 244 int reply_nseg;
@@ -296,17 +285,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
296 } 285 }
297 286
298 cmd = &cmdiocbq->iocb; 287 cmd = &cmdiocbq->iocb;
299 rspiocbq = lpfc_sli_get_iocbq(phba);
300 if (!rspiocbq) {
301 rc = -ENOMEM;
302 goto free_cmdiocbq;
303 }
304
305 rsp = &rspiocbq->iocb;
306 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 288 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
307 if (!bmp->virt) { 289 if (!bmp->virt) {
308 rc = -ENOMEM; 290 rc = -ENOMEM;
309 goto free_rspiocbq; 291 goto free_cmdiocbq;
310 } 292 }
311 293
312 INIT_LIST_HEAD(&bmp->list); 294 INIT_LIST_HEAD(&bmp->list);
@@ -358,14 +340,12 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
358 cmd->ulpTimeout = timeout; 340 cmd->ulpTimeout = timeout;
359 341
360 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 342 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
361 cmdiocbq->context1 = dd_data; 343 cmdiocbq->context1 = ndlp;
362 cmdiocbq->context2 = rspiocbq; 344 cmdiocbq->context2 = dd_data;
363 dd_data->type = TYPE_IOCB; 345 dd_data->type = TYPE_IOCB;
364 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 346 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
365 dd_data->context_un.iocb.rspiocbq = rspiocbq;
366 dd_data->context_un.iocb.set_job = job; 347 dd_data->context_un.iocb.set_job = job;
367 dd_data->context_un.iocb.bmp = bmp; 348 dd_data->context_un.iocb.bmp = bmp;
368 dd_data->context_un.iocb.ndlp = ndlp;
369 349
370 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 350 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
371 creg_val = readl(phba->HCregaddr); 351 creg_val = readl(phba->HCregaddr);
@@ -391,8 +371,6 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
391 371
392 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 372 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
393 373
394free_rspiocbq:
395 lpfc_sli_release_iocbq(phba, rspiocbq);
396free_cmdiocbq: 374free_cmdiocbq:
397 lpfc_sli_release_iocbq(phba, cmdiocbq); 375 lpfc_sli_release_iocbq(phba, cmdiocbq);
398free_bmp: 376free_bmp:
@@ -1220,7 +1198,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1220 int rc = 0; 1198 int rc = 0;
1221 1199
1222 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1200 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1223 dd_data = cmdiocbq->context1; 1201 dd_data = cmdiocbq->context2;
1224 /* normal completion and timeout crossed paths, already done */ 1202 /* normal completion and timeout crossed paths, already done */
1225 if (!dd_data) { 1203 if (!dd_data) {
1226 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1204 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -1369,8 +1347,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1369 ctiocb->context3 = bmp; 1347 ctiocb->context3 = bmp;
1370 1348
1371 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1349 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1372 ctiocb->context1 = dd_data; 1350 ctiocb->context2 = dd_data;
1373 ctiocb->context2 = NULL; 1351 ctiocb->context1 = ndlp;
1374 dd_data->type = TYPE_IOCB; 1352 dd_data->type = TYPE_IOCB;
1375 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1353 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1376 dd_data->context_un.iocb.rspiocbq = NULL; 1354 dd_data->context_un.iocb.rspiocbq = NULL;
@@ -1641,7 +1619,7 @@ job_error:
1641 * This function obtains a remote port login id so the diag loopback test 1619 * This function obtains a remote port login id so the diag loopback test
1642 * can send and receive its own unsolicited CT command. 1620 * can send and receive its own unsolicited CT command.
1643 **/ 1621 **/
1644static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) 1622static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
1645{ 1623{
1646 LPFC_MBOXQ_t *mbox; 1624 LPFC_MBOXQ_t *mbox;
1647 struct lpfc_dmabuf *dmabuff; 1625 struct lpfc_dmabuf *dmabuff;
@@ -1651,10 +1629,14 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1651 if (!mbox) 1629 if (!mbox)
1652 return -ENOMEM; 1630 return -ENOMEM;
1653 1631
1632 if (phba->sli_rev == LPFC_SLI_REV4)
1633 *rpi = lpfc_sli4_alloc_rpi(phba);
1654 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 1634 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1655 (uint8_t *)&phba->pport->fc_sparam, mbox, 0); 1635 (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
1656 if (status) { 1636 if (status) {
1657 mempool_free(mbox, phba->mbox_mem_pool); 1637 mempool_free(mbox, phba->mbox_mem_pool);
1638 if (phba->sli_rev == LPFC_SLI_REV4)
1639 lpfc_sli4_free_rpi(phba, *rpi);
1658 return -ENOMEM; 1640 return -ENOMEM;
1659 } 1641 }
1660 1642
@@ -1668,6 +1650,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1668 kfree(dmabuff); 1650 kfree(dmabuff);
1669 if (status != MBX_TIMEOUT) 1651 if (status != MBX_TIMEOUT)
1670 mempool_free(mbox, phba->mbox_mem_pool); 1652 mempool_free(mbox, phba->mbox_mem_pool);
1653 if (phba->sli_rev == LPFC_SLI_REV4)
1654 lpfc_sli4_free_rpi(phba, *rpi);
1671 return -ENODEV; 1655 return -ENODEV;
1672 } 1656 }
1673 1657
@@ -1704,8 +1688,9 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1704 mempool_free(mbox, phba->mbox_mem_pool); 1688 mempool_free(mbox, phba->mbox_mem_pool);
1705 return -EIO; 1689 return -EIO;
1706 } 1690 }
1707
1708 mempool_free(mbox, phba->mbox_mem_pool); 1691 mempool_free(mbox, phba->mbox_mem_pool);
1692 if (phba->sli_rev == LPFC_SLI_REV4)
1693 lpfc_sli4_free_rpi(phba, rpi);
1709 return 0; 1694 return 0;
1710} 1695}
1711 1696
@@ -2102,7 +2087,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2102 uint32_t size; 2087 uint32_t size;
2103 uint32_t full_size; 2088 uint32_t full_size;
2104 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 2089 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2105 uint16_t rpi; 2090 uint16_t rpi = 0;
2106 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2091 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2107 IOCB_t *cmd, *rsp; 2092 IOCB_t *cmd, *rsp;
2108 struct lpfc_sli_ct_request *ctreq; 2093 struct lpfc_sli_ct_request *ctreq;
@@ -2162,7 +2147,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2162 goto loopback_test_exit; 2147 goto loopback_test_exit;
2163 } 2148 }
2164 2149
2165 if (size >= BUF_SZ_4K) { 2150 if (full_size >= BUF_SZ_4K) {
2166 /* 2151 /*
2167 * Allocate memory for ioctl data. If buffer is bigger than 64k, 2152 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2168 * then we allocate 64k and re-use that buffer over and over to 2153 * then we allocate 64k and re-use that buffer over and over to
@@ -2171,7 +2156,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2171 * problem with GET_FCPTARGETMAPPING... 2156 * problem with GET_FCPTARGETMAPPING...
2172 */ 2157 */
2173 if (size <= (64 * 1024)) 2158 if (size <= (64 * 1024))
2174 total_mem = size; 2159 total_mem = full_size;
2175 else 2160 else
2176 total_mem = 64 * 1024; 2161 total_mem = 64 * 1024;
2177 } else 2162 } else
@@ -2189,7 +2174,6 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2189 sg_copy_to_buffer(job->request_payload.sg_list, 2174 sg_copy_to_buffer(job->request_payload.sg_list,
2190 job->request_payload.sg_cnt, 2175 job->request_payload.sg_cnt,
2191 ptr, size); 2176 ptr, size);
2192
2193 rc = lpfcdiag_loop_self_reg(phba, &rpi); 2177 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2194 if (rc) 2178 if (rc)
2195 goto loopback_test_exit; 2179 goto loopback_test_exit;
@@ -2601,12 +2585,11 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2601 phba->wait_4_mlo_maint_flg = 1; 2585 phba->wait_4_mlo_maint_flg = 1;
2602 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 2586 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
2603 phba->link_flag &= ~LS_LOOPBACK_MODE; 2587 phba->link_flag &= ~LS_LOOPBACK_MODE;
2604 phba->fc_topology = TOPOLOGY_PT_PT; 2588 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
2605 } 2589 }
2606 break; 2590 break;
2607 case MBX_READ_SPARM64: 2591 case MBX_READ_SPARM64:
2608 case MBX_READ_LA: 2592 case MBX_READ_TOPOLOGY:
2609 case MBX_READ_LA64:
2610 case MBX_REG_LOGIN: 2593 case MBX_REG_LOGIN:
2611 case MBX_REG_LOGIN64: 2594 case MBX_REG_LOGIN64:
2612 case MBX_CONFIG_PORT: 2595 case MBX_CONFIG_PORT:
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index a5f5a093a8a4..17fde522c84a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -31,7 +31,7 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
31void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 31void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
32 32
33void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); 33void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
34int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *); 34int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
35void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 35void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
36void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *); 36void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
37void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); 37void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -40,7 +40,7 @@ int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
40void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 40void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
41void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 41void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
42int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 42int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
43 LPFC_MBOXQ_t *, uint32_t); 43 LPFC_MBOXQ_t *, uint16_t);
44void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 44void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
45void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
46void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 46void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
@@ -64,7 +64,7 @@ void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
64int lpfc_linkdown(struct lpfc_hba *); 64int lpfc_linkdown(struct lpfc_hba *);
65void lpfc_linkdown_port(struct lpfc_vport *); 65void lpfc_linkdown_port(struct lpfc_vport *);
66void lpfc_port_link_failure(struct lpfc_vport *); 66void lpfc_port_link_failure(struct lpfc_vport *);
67void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 67void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *);
68void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 68void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
69void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); 69void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
70void lpfc_retry_pport_discovery(struct lpfc_hba *); 70void lpfc_retry_pport_discovery(struct lpfc_hba *);
@@ -121,6 +121,7 @@ void lpfc_end_rscn(struct lpfc_vport *);
121int lpfc_els_chk_latt(struct lpfc_vport *); 121int lpfc_els_chk_latt(struct lpfc_vport *);
122int lpfc_els_abort_flogi(struct lpfc_hba *); 122int lpfc_els_abort_flogi(struct lpfc_hba *);
123int lpfc_initial_flogi(struct lpfc_vport *); 123int lpfc_initial_flogi(struct lpfc_vport *);
124void lpfc_issue_init_vfi(struct lpfc_vport *);
124int lpfc_initial_fdisc(struct lpfc_vport *); 125int lpfc_initial_fdisc(struct lpfc_vport *);
125int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t); 126int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
126int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 127int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
@@ -415,5 +416,13 @@ struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
415int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, 416int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
416 struct lpfc_iocbq *, uint32_t); 417 struct lpfc_iocbq *, uint32_t);
417uint32_t lpfc_drain_txq(struct lpfc_hba *); 418uint32_t lpfc_drain_txq(struct lpfc_hba *);
418 419void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *);
419 420int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t);
421void lpfc_handle_rrq_active(struct lpfc_hba *);
422int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
423int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
424 uint16_t, uint16_t, uint16_t);
425void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
426void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
427struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
428 uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 463b74902ac4..c004fa9a681e 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -48,14 +48,14 @@
48#include "lpfc_vport.h" 48#include "lpfc_vport.h"
49#include "lpfc_debugfs.h" 49#include "lpfc_debugfs.h"
50 50
51#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver 51/* FDMI Port Speed definitions */
52 * incapable of reporting */ 52#define HBA_PORTSPEED_1GBIT 0x0001 /* 1 GBit/sec */
53#define HBA_PORTSPEED_1GBIT 1 /* 1 GBit/sec */ 53#define HBA_PORTSPEED_2GBIT 0x0002 /* 2 GBit/sec */
54#define HBA_PORTSPEED_2GBIT 2 /* 2 GBit/sec */ 54#define HBA_PORTSPEED_4GBIT 0x0008 /* 4 GBit/sec */
55#define HBA_PORTSPEED_4GBIT 8 /* 4 GBit/sec */ 55#define HBA_PORTSPEED_10GBIT 0x0004 /* 10 GBit/sec */
56#define HBA_PORTSPEED_8GBIT 16 /* 8 GBit/sec */ 56#define HBA_PORTSPEED_8GBIT 0x0010 /* 8 GBit/sec */
57#define HBA_PORTSPEED_10GBIT 4 /* 10 GBit/sec */ 57#define HBA_PORTSPEED_16GBIT 0x0020 /* 16 GBit/sec */
58#define HBA_PORTSPEED_NOT_NEGOTIATED 5 /* Speed not established */ 58#define HBA_PORTSPEED_UNKNOWN 0x0800 /* Unknown */
59 59
60#define FOURBYTES 4 60#define FOURBYTES 4
61 61
@@ -1593,8 +1593,10 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1593 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); 1593 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
1594 1594
1595 ae->un.SupportSpeed = 0; 1595 ae->un.SupportSpeed = 0;
1596 if (phba->lmt & LMT_16Gb)
1597 ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
1596 if (phba->lmt & LMT_10Gb) 1598 if (phba->lmt & LMT_10Gb)
1597 ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT; 1599 ae->un.SupportSpeed |= HBA_PORTSPEED_10GBIT;
1598 if (phba->lmt & LMT_8Gb) 1600 if (phba->lmt & LMT_8Gb)
1599 ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT; 1601 ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT;
1600 if (phba->lmt & LMT_4Gb) 1602 if (phba->lmt & LMT_4Gb)
@@ -1612,24 +1614,26 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1612 ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED); 1614 ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
1613 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); 1615 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
1614 switch(phba->fc_linkspeed) { 1616 switch(phba->fc_linkspeed) {
1615 case LA_1GHZ_LINK: 1617 case LPFC_LINK_SPEED_1GHZ:
1616 ae->un.PortSpeed = HBA_PORTSPEED_1GBIT; 1618 ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
1617 break; 1619 break;
1618 case LA_2GHZ_LINK: 1620 case LPFC_LINK_SPEED_2GHZ:
1619 ae->un.PortSpeed = HBA_PORTSPEED_2GBIT; 1621 ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
1620 break; 1622 break;
1621 case LA_4GHZ_LINK: 1623 case LPFC_LINK_SPEED_4GHZ:
1622 ae->un.PortSpeed = HBA_PORTSPEED_4GBIT; 1624 ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
1623 break; 1625 break;
1624 case LA_8GHZ_LINK: 1626 case LPFC_LINK_SPEED_8GHZ:
1625 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; 1627 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1626 break; 1628 break;
1627 case LA_10GHZ_LINK: 1629 case LPFC_LINK_SPEED_10GHZ:
1628 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT; 1630 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
1629 break; 1631 break;
1630 default: 1632 case LPFC_LINK_SPEED_16GHZ:
1631 ae->un.PortSpeed = 1633 ae->un.PortSpeed = HBA_PORTSPEED_16GBIT;
1632 HBA_PORTSPEED_UNKNOWN; 1634 break;
1635 default:
1636 ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
1633 break; 1637 break;
1634 } 1638 }
1635 pab->ab.EntryCnt++; 1639 pab->ab.EntryCnt++;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 7cae69de36f7..1d84b63fccad 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -68,6 +68,12 @@ struct lpfc_fast_path_event {
68 } un; 68 } un;
69}; 69};
70 70
71#define LPFC_SLI4_MAX_XRI 1024 /* Used to make the ndlp's xri_bitmap */
72#define XRI_BITMAP_ULONGS (LPFC_SLI4_MAX_XRI / BITS_PER_LONG)
73struct lpfc_node_rrqs {
74 unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
75};
76
71struct lpfc_nodelist { 77struct lpfc_nodelist {
72 struct list_head nlp_listp; 78 struct list_head nlp_listp;
73 struct lpfc_name nlp_portname; 79 struct lpfc_name nlp_portname;
@@ -110,8 +116,19 @@ struct lpfc_nodelist {
110 atomic_t cmd_pending; 116 atomic_t cmd_pending;
111 uint32_t cmd_qdepth; 117 uint32_t cmd_qdepth;
112 unsigned long last_change_time; 118 unsigned long last_change_time;
119 struct lpfc_node_rrqs active_rrqs;
113 struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ 120 struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
114}; 121};
122struct lpfc_node_rrq {
123 struct list_head list;
124 uint16_t xritag;
125 uint16_t send_rrq;
126 uint16_t rxid;
127 uint32_t nlp_DID; /* FC D_ID of entry */
128 struct lpfc_vport *vport;
129 struct lpfc_nodelist *ndlp;
130 unsigned long rrq_stop_time;
131};
115 132
116/* Defines for nlp_flag (uint32) */ 133/* Defines for nlp_flag (uint32) */
117#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ 134#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
@@ -136,7 +153,7 @@ struct lpfc_nodelist {
136#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 153#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
137#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 154#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
138#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 155#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
139#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */ 156#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
140 157
141/* ndlp usage management macros */ 158/* ndlp usage management macros */
142#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 159#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 884f4d321799..c62d567cc845 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -375,7 +375,8 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
375 err = 4; 375 err = 4;
376 goto fail; 376 goto fail;
377 } 377 }
378 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0); 378 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
379 ndlp->nlp_rpi);
379 if (rc) { 380 if (rc) {
380 err = 5; 381 err = 5;
381 goto fail_free_mbox; 382 goto fail_free_mbox;
@@ -523,7 +524,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
523 phba->fc_edtovResol = sp->cmn.edtovResolution; 524 phba->fc_edtovResol = sp->cmn.edtovResolution;
524 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 525 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
525 526
526 if (phba->fc_topology == TOPOLOGY_LOOP) { 527 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
527 spin_lock_irq(shost->host_lock); 528 spin_lock_irq(shost->host_lock);
528 vport->fc_flag |= FC_PUBLIC_LOOP; 529 vport->fc_flag |= FC_PUBLIC_LOOP;
529 spin_unlock_irq(shost->host_lock); 530 spin_unlock_irq(shost->host_lock);
@@ -832,6 +833,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
832 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 833 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
833 goto out; 834 goto out;
834 835
836 /* FLOGI failure */
837 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
838 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
839 irsp->ulpStatus, irsp->un.ulpWord[4],
840 irsp->ulpTimeout);
841
835 /* FLOGI failed, so there is no fabric */ 842 /* FLOGI failed, so there is no fabric */
836 spin_lock_irq(shost->host_lock); 843 spin_lock_irq(shost->host_lock);
837 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 844 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -843,13 +850,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
843 */ 850 */
844 if (phba->alpa_map[0] == 0) { 851 if (phba->alpa_map[0] == 0) {
845 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 852 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
853 if ((phba->sli_rev == LPFC_SLI_REV4) &&
854 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
855 (vport->fc_prevDID != vport->fc_myDID))) {
856 if (vport->fc_flag & FC_VFI_REGISTERED)
857 lpfc_sli4_unreg_all_rpis(vport);
858 lpfc_issue_reg_vfi(vport);
859 lpfc_nlp_put(ndlp);
860 goto out;
861 }
846 } 862 }
847
848 /* FLOGI failure */
849 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
850 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
851 irsp->ulpStatus, irsp->un.ulpWord[4],
852 irsp->ulpTimeout);
853 goto flogifail; 863 goto flogifail;
854 } 864 }
855 spin_lock_irq(shost->host_lock); 865 spin_lock_irq(shost->host_lock);
@@ -879,7 +889,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
879 */ 889 */
880 if (sp->cmn.fPort) 890 if (sp->cmn.fPort)
881 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 891 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
882 else if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) 892 else if (!(phba->hba_flag & HBA_FCOE_MODE))
883 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 893 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
884 else { 894 else {
885 lpfc_printf_vlog(vport, KERN_ERR, 895 lpfc_printf_vlog(vport, KERN_ERR,
@@ -1014,7 +1024,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1014 if (sp->cmn.fcphHigh < FC_PH3) 1024 if (sp->cmn.fcphHigh < FC_PH3)
1015 sp->cmn.fcphHigh = FC_PH3; 1025 sp->cmn.fcphHigh = FC_PH3;
1016 1026
1017 if (phba->sli_rev == LPFC_SLI_REV4) { 1027 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1028 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1029 LPFC_SLI_INTF_IF_TYPE_0)) {
1018 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1030 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1019 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1031 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1020 /* FLOGI needs to be 3 for WQE FCFI */ 1032 /* FLOGI needs to be 3 for WQE FCFI */
@@ -1027,7 +1039,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1027 icmd->ulpCt_l = 0; 1039 icmd->ulpCt_l = 0;
1028 } 1040 }
1029 1041
1030 if (phba->fc_topology != TOPOLOGY_LOOP) { 1042 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1031 icmd->un.elsreq64.myID = 0; 1043 icmd->un.elsreq64.myID = 0;
1032 icmd->un.elsreq64.fl = 1; 1044 icmd->un.elsreq64.fl = 1;
1033 } 1045 }
@@ -1281,6 +1293,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1281 uint32_t rc, keepDID = 0; 1293 uint32_t rc, keepDID = 0;
1282 int put_node; 1294 int put_node;
1283 int put_rport; 1295 int put_rport;
1296 struct lpfc_node_rrqs rrq;
1284 1297
1285 /* Fabric nodes can have the same WWPN so we don't bother searching 1298 /* Fabric nodes can have the same WWPN so we don't bother searching
1286 * by WWPN. Just return the ndlp that was given to us. 1299 * by WWPN. Just return the ndlp that was given to us.
@@ -1298,6 +1311,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1298 1311
1299 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1312 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1300 return ndlp; 1313 return ndlp;
1314 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
1301 1315
1302 if (!new_ndlp) { 1316 if (!new_ndlp) {
1303 rc = memcmp(&ndlp->nlp_portname, name, 1317 rc = memcmp(&ndlp->nlp_portname, name,
@@ -1318,12 +1332,25 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1318 if (!new_ndlp) 1332 if (!new_ndlp)
1319 return ndlp; 1333 return ndlp;
1320 keepDID = new_ndlp->nlp_DID; 1334 keepDID = new_ndlp->nlp_DID;
1321 } else 1335 if (phba->sli_rev == LPFC_SLI_REV4)
1336 memcpy(&rrq.xri_bitmap,
1337 &new_ndlp->active_rrqs.xri_bitmap,
1338 sizeof(new_ndlp->active_rrqs.xri_bitmap));
1339 } else {
1322 keepDID = new_ndlp->nlp_DID; 1340 keepDID = new_ndlp->nlp_DID;
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 memcpy(&rrq.xri_bitmap,
1343 &new_ndlp->active_rrqs.xri_bitmap,
1344 sizeof(new_ndlp->active_rrqs.xri_bitmap));
1345 }
1323 1346
1324 lpfc_unreg_rpi(vport, new_ndlp); 1347 lpfc_unreg_rpi(vport, new_ndlp);
1325 new_ndlp->nlp_DID = ndlp->nlp_DID; 1348 new_ndlp->nlp_DID = ndlp->nlp_DID;
1326 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1349 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1350 if (phba->sli_rev == LPFC_SLI_REV4)
1351 memcpy(new_ndlp->active_rrqs.xri_bitmap,
1352 &ndlp->active_rrqs.xri_bitmap,
1353 sizeof(ndlp->active_rrqs.xri_bitmap));
1327 1354
1328 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) 1355 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1329 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1356 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1362,12 +1389,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1362 1389
1363 /* Two ndlps cannot have the same did on the nodelist */ 1390 /* Two ndlps cannot have the same did on the nodelist */
1364 ndlp->nlp_DID = keepDID; 1391 ndlp->nlp_DID = keepDID;
1392 if (phba->sli_rev == LPFC_SLI_REV4)
1393 memcpy(&ndlp->active_rrqs.xri_bitmap,
1394 &rrq.xri_bitmap,
1395 sizeof(ndlp->active_rrqs.xri_bitmap));
1365 lpfc_drop_node(vport, ndlp); 1396 lpfc_drop_node(vport, ndlp);
1366 } 1397 }
1367 else { 1398 else {
1368 lpfc_unreg_rpi(vport, ndlp); 1399 lpfc_unreg_rpi(vport, ndlp);
1369 /* Two ndlps cannot have the same did */ 1400 /* Two ndlps cannot have the same did */
1370 ndlp->nlp_DID = keepDID; 1401 ndlp->nlp_DID = keepDID;
1402 if (phba->sli_rev == LPFC_SLI_REV4)
1403 memcpy(&ndlp->active_rrqs.xri_bitmap,
1404 &rrq.xri_bitmap,
1405 sizeof(ndlp->active_rrqs.xri_bitmap));
1371 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1406 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1372 /* Since we are swapping the ndlp passed in with the new one 1407 /* Since we are swapping the ndlp passed in with the new one
1373 * and the did has already been swapped, copy over the 1408 * and the did has already been swapped, copy over the
@@ -1428,6 +1463,73 @@ lpfc_end_rscn(struct lpfc_vport *vport)
1428} 1463}
1429 1464
1430/** 1465/**
1466 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1467 * @phba: pointer to lpfc hba data structure.
1468 * @cmdiocb: pointer to lpfc command iocb data structure.
1469 * @rspiocb: pointer to lpfc response iocb data structure.
1470 *
1471 * This routine will call the clear rrq function to free the rrq and
1472 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1473 * exist then the clear_rrq is still called because the rrq needs to
1474 * be freed.
1475 **/
1476
1477static void
1478lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1479 struct lpfc_iocbq *rspiocb)
1480{
1481 struct lpfc_vport *vport = cmdiocb->vport;
1482 IOCB_t *irsp;
1483 struct lpfc_nodelist *ndlp;
1484 struct lpfc_node_rrq *rrq;
1485
1486 /* we pass cmdiocb to state machine which needs rspiocb as well */
1487 rrq = cmdiocb->context_un.rrq;
1488 cmdiocb->context_un.rsp_iocb = rspiocb;
1489
1490 irsp = &rspiocb->iocb;
1491 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1492 "RRQ cmpl: status:x%x/x%x did:x%x",
1493 irsp->ulpStatus, irsp->un.ulpWord[4],
1494 irsp->un.elsreq64.remoteID);
1495
1496 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1497 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1498 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1499 "2882 RRQ completes to NPort x%x "
1500 "with no ndlp. Data: x%x x%x x%x\n",
1501 irsp->un.elsreq64.remoteID,
1502 irsp->ulpStatus, irsp->un.ulpWord[4],
1503 irsp->ulpIoTag);
1504 goto out;
1505 }
1506
1507 /* rrq completes to NPort <nlp_DID> */
1508 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1509 "2880 RRQ completes to NPort x%x "
1510 "Data: x%x x%x x%x x%x x%x\n",
1511 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1512 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1513
1514 if (irsp->ulpStatus) {
1515 /* Check for retry */
1516 /* RRQ failed Don't print the vport to vport rjts */
1517 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1518 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1519 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1520 (phba)->pport->cfg_log_verbose & LOG_ELS)
1521 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1522 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1523 ndlp->nlp_DID, irsp->ulpStatus,
1524 irsp->un.ulpWord[4]);
1525 }
1526out:
1527 if (rrq)
1528 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1529 lpfc_els_free_iocb(phba, cmdiocb);
1530 return;
1531}
1532/**
1431 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1533 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1432 * @phba: pointer to lpfc hba data structure. 1534 * @phba: pointer to lpfc hba data structure.
1433 * @cmdiocb: pointer to lpfc command iocb data structure. 1535 * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -2722,7 +2824,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2722 if (cmd == ELS_CMD_FLOGI) { 2824 if (cmd == ELS_CMD_FLOGI) {
2723 if (PCI_DEVICE_ID_HORNET == 2825 if (PCI_DEVICE_ID_HORNET ==
2724 phba->pcidev->device) { 2826 phba->pcidev->device) {
2725 phba->fc_topology = TOPOLOGY_LOOP; 2827 phba->fc_topology = LPFC_TOPOLOGY_LOOP;
2726 phba->pport->fc_myDID = 0; 2828 phba->pport->fc_myDID = 0;
2727 phba->alpa_map[0] = 0; 2829 phba->alpa_map[0] = 0;
2728 phba->alpa_map[1] = 0; 2830 phba->alpa_map[1] = 0;
@@ -2877,7 +2979,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2877 retry = 1; 2979 retry = 1;
2878 2980
2879 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) && 2981 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
2880 (phba->fc_topology != TOPOLOGY_LOOP) && 2982 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
2881 !lpfc_error_lost_link(irsp)) { 2983 !lpfc_error_lost_link(irsp)) {
2882 /* FLOGI retry policy */ 2984 /* FLOGI retry policy */
2883 retry = 1; 2985 retry = 1;
@@ -3219,14 +3321,6 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3219 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3321 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3220 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3322 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3221 3323
3222 /*
3223 * This routine is used to register and unregister in previous SLI
3224 * modes.
3225 */
3226 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3227 (phba->sli_rev == LPFC_SLI_REV4))
3228 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3229
3230 pmb->context1 = NULL; 3324 pmb->context1 = NULL;
3231 pmb->context2 = NULL; 3325 pmb->context2 = NULL;
3232 3326
@@ -3904,6 +3998,47 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3904} 3998}
3905 3999
3906/** 4000/**
4001 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4002 * @vport: pointer to a virtual N_Port data structure.
4003 * @iocb: pointer to the lpfc command iocb data structure.
4004 * @ndlp: pointer to a node-list data structure.
4005 *
4006 * Return
4007 **/
4008static void
4009lpfc_els_clear_rrq(struct lpfc_vport *vport,
4010 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4011{
4012 struct lpfc_hba *phba = vport->phba;
4013 uint8_t *pcmd;
4014 struct RRQ *rrq;
4015 uint16_t rxid;
4016 struct lpfc_node_rrq *prrq;
4017
4018
4019 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4020 pcmd += sizeof(uint32_t);
4021 rrq = (struct RRQ *)pcmd;
4022 rxid = bf_get(rrq_oxid, rrq);
4023
4024 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4025 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4026 " x%x x%x\n",
4027 bf_get(rrq_did, rrq),
4028 bf_get(rrq_oxid, rrq),
4029 rxid,
4030 iocb->iotag, iocb->iocb.ulpContext);
4031
4032 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4033 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4034 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
4035 prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
4036 if (prrq)
4037 lpfc_clr_rrq_active(phba, rxid, prrq);
4038 return;
4039}
4040
4041/**
3907 * lpfc_els_rsp_echo_acc - Issue echo acc response 4042 * lpfc_els_rsp_echo_acc - Issue echo acc response
3908 * @vport: pointer to a virtual N_Port data structure. 4043 * @vport: pointer to a virtual N_Port data structure.
3909 * @data: pointer to echo data to return in the accept. 4044 * @data: pointer to echo data to return in the accept.
@@ -4597,7 +4732,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4597 4732
4598 lpfc_set_disctmo(vport); 4733 lpfc_set_disctmo(vport);
4599 4734
4600 if (phba->fc_topology == TOPOLOGY_LOOP) { 4735 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4601 /* We should never receive a FLOGI in loop mode, ignore it */ 4736 /* We should never receive a FLOGI in loop mode, ignore it */
4602 did = icmd->un.elsreq64.remoteID; 4737 did = icmd->un.elsreq64.remoteID;
4603 4738
@@ -4792,6 +4927,8 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4792 struct lpfc_nodelist *ndlp) 4927 struct lpfc_nodelist *ndlp)
4793{ 4928{
4794 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4929 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4930 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4931 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
4795} 4932}
4796 4933
4797/** 4934/**
@@ -4940,7 +5077,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4940 pcmd += sizeof(uint32_t); /* Skip past command */ 5077 pcmd += sizeof(uint32_t); /* Skip past command */
4941 rps_rsp = (RPS_RSP *)pcmd; 5078 rps_rsp = (RPS_RSP *)pcmd;
4942 5079
4943 if (phba->fc_topology != TOPOLOGY_LOOP) 5080 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
4944 status = 0x10; 5081 status = 0x10;
4945 else 5082 else
4946 status = 0x8; 5083 status = 0x8;
@@ -5194,6 +5331,97 @@ reject_out:
5194 return 0; 5331 return 0;
5195} 5332}
5196 5333
5334/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
5335 * @vport: pointer to a host virtual N_Port data structure.
5336 * @ndlp: pointer to a node-list data structure.
5337 * @did: DID of the target.
5338 * @rrq: Pointer to the rrq struct.
5339 *
5340 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
5341 * Successful the the completion handler will clear the RRQ.
5342 *
5343 * Return codes
5344 * 0 - Successfully sent rrq els iocb.
5345 * 1 - Failed to send rrq els iocb.
5346 **/
5347static int
5348lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5349 uint32_t did, struct lpfc_node_rrq *rrq)
5350{
5351 struct lpfc_hba *phba = vport->phba;
5352 struct RRQ *els_rrq;
5353 IOCB_t *icmd;
5354 struct lpfc_iocbq *elsiocb;
5355 uint8_t *pcmd;
5356 uint16_t cmdsize;
5357 int ret;
5358
5359
5360 if (ndlp != rrq->ndlp)
5361 ndlp = rrq->ndlp;
5362 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5363 return 1;
5364
5365 /* If ndlp is not NULL, we will bump the reference count on it */
5366 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
5367 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
5368 ELS_CMD_RRQ);
5369 if (!elsiocb)
5370 return 1;
5371
5372 icmd = &elsiocb->iocb;
5373 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5374
5375 /* For RRQ request, remainder of payload is Exchange IDs */
5376 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
5377 pcmd += sizeof(uint32_t);
5378 els_rrq = (struct RRQ *) pcmd;
5379
5380 bf_set(rrq_oxid, els_rrq, rrq->xritag);
5381 bf_set(rrq_rxid, els_rrq, rrq->rxid);
5382 bf_set(rrq_did, els_rrq, vport->fc_myDID);
5383 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
5384 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
5385
5386
5387 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5388 "Issue RRQ: did:x%x",
5389 did, rrq->xritag, rrq->rxid);
5390 elsiocb->context_un.rrq = rrq;
5391 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
5392 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5393
5394 if (ret == IOCB_ERROR) {
5395 lpfc_els_free_iocb(phba, elsiocb);
5396 return 1;
5397 }
5398 return 0;
5399}
5400
5401/**
5402 * lpfc_send_rrq - Sends ELS RRQ if needed.
5403 * @phba: pointer to lpfc hba data structure.
5404 * @rrq: pointer to the active rrq.
5405 *
5406 * This routine will call the lpfc_issue_els_rrq if the rrq is
5407 * still active for the xri. If this function returns a failure then
5408 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
5409 *
5410 * Returns 0 Success.
5411 * 1 Failure.
5412 **/
5413int
5414lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
5415{
5416 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
5417 rrq->nlp_DID);
5418 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
5419 return lpfc_issue_els_rrq(rrq->vport, ndlp,
5420 rrq->nlp_DID, rrq);
5421 else
5422 return 1;
5423}
5424
5197/** 5425/**
5198 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 5426 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
5199 * @vport: pointer to a host virtual N_Port data structure. 5427 * @vport: pointer to a host virtual N_Port data structure.
@@ -5482,7 +5710,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5482 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 5710 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
5483 sizeof(struct lpfc_name)))) { 5711 sizeof(struct lpfc_name)))) {
5484 /* This port has switched fabrics. FLOGI is required */ 5712 /* This port has switched fabrics. FLOGI is required */
5485 lpfc_initial_flogi(vport); 5713 lpfc_issue_init_vfi(vport);
5486 } else { 5714 } else {
5487 /* FAN verified - skip FLOGI */ 5715 /* FAN verified - skip FLOGI */
5488 vport->fc_myDID = vport->fc_prevDID; 5716 vport->fc_myDID = vport->fc_prevDID;
@@ -6201,7 +6429,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6201 cmd, did, vport->port_state); 6429 cmd, did, vport->port_state);
6202 6430
6203 /* Unsupported ELS command, reject */ 6431 /* Unsupported ELS command, reject */
6204 rjt_err = LSRJT_INVALID_CMD; 6432 rjt_err = LSRJT_CMD_UNSUPPORTED;
6205 6433
6206 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 6434 /* Unknown ELS command <elsCmd> received from NPORT <did> */
6207 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6435 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6373,7 +6601,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
6373 if (!ndlp) { 6601 if (!ndlp) {
6374 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6602 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6375 if (!ndlp) { 6603 if (!ndlp) {
6376 if (phba->fc_topology == TOPOLOGY_LOOP) { 6604 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6377 lpfc_disc_start(vport); 6605 lpfc_disc_start(vport);
6378 return; 6606 return;
6379 } 6607 }
@@ -6386,7 +6614,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
6386 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6614 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6387 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 6615 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
6388 if (!ndlp) { 6616 if (!ndlp) {
6389 if (phba->fc_topology == TOPOLOGY_LOOP) { 6617 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6390 lpfc_disc_start(vport); 6618 lpfc_disc_start(vport);
6391 return; 6619 return;
6392 } 6620 }
@@ -6408,18 +6636,31 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
6408 } 6636 }
6409 6637
6410 if (vport->cfg_fdmi_on) { 6638 if (vport->cfg_fdmi_on) {
6411 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 6639 /* If this is the first time, allocate an ndlp and initialize
6412 GFP_KERNEL); 6640 * it. Otherwise, make sure the node is enabled and then do the
6641 * login.
6642 */
6643 ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
6644 if (!ndlp_fdmi) {
6645 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
6646 GFP_KERNEL);
6647 if (ndlp_fdmi) {
6648 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
6649 ndlp_fdmi->nlp_type |= NLP_FABRIC;
6650 } else
6651 return;
6652 }
6653 if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
6654 ndlp_fdmi = lpfc_enable_node(vport,
6655 ndlp_fdmi,
6656 NLP_STE_NPR_NODE);
6657
6413 if (ndlp_fdmi) { 6658 if (ndlp_fdmi) {
6414 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
6415 ndlp_fdmi->nlp_type |= NLP_FABRIC;
6416 lpfc_nlp_set_state(vport, ndlp_fdmi, 6659 lpfc_nlp_set_state(vport, ndlp_fdmi,
6417 NLP_STE_PLOGI_ISSUE); 6660 NLP_STE_PLOGI_ISSUE);
6418 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 6661 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
6419 0);
6420 } 6662 }
6421 } 6663 }
6422 return;
6423} 6664}
6424 6665
6425/** 6666/**
@@ -6497,7 +6738,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6497 spin_unlock_irq(shost->host_lock); 6738 spin_unlock_irq(shost->host_lock);
6498 if (vport->port_type == LPFC_PHYSICAL_PORT 6739 if (vport->port_type == LPFC_PHYSICAL_PORT
6499 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 6740 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
6500 lpfc_initial_flogi(vport); 6741 lpfc_issue_init_vfi(vport);
6501 else 6742 else
6502 lpfc_initial_fdisc(vport); 6743 lpfc_initial_fdisc(vport);
6503 break; 6744 break;
@@ -6734,7 +6975,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6734 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 6975 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
6735 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 6976 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
6736 vport->fc_flag |= FC_FABRIC; 6977 vport->fc_flag |= FC_FABRIC;
6737 if (vport->phba->fc_topology == TOPOLOGY_LOOP) 6978 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
6738 vport->fc_flag |= FC_PUBLIC_LOOP; 6979 vport->fc_flag |= FC_PUBLIC_LOOP;
6739 spin_unlock_irq(shost->host_lock); 6980 spin_unlock_irq(shost->host_lock);
6740 6981
@@ -6844,7 +7085,9 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6844 icmd->un.elsreq64.myID = 0; 7085 icmd->un.elsreq64.myID = 0;
6845 icmd->un.elsreq64.fl = 1; 7086 icmd->un.elsreq64.fl = 1;
6846 7087
6847 if (phba->sli_rev == LPFC_SLI_REV4) { 7088 if ((phba->sli_rev == LPFC_SLI_REV4) &&
7089 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7090 LPFC_SLI_INTF_IF_TYPE_0)) {
6848 /* FDISC needs to be 1 for WQE VPI */ 7091 /* FDISC needs to be 1 for WQE VPI */
6849 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; 7092 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6850 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; 7093 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
@@ -7351,8 +7594,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7351 struct sli4_wcqe_xri_aborted *axri) 7594 struct sli4_wcqe_xri_aborted *axri)
7352{ 7595{
7353 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 7596 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
7597 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7598
7354 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7599 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7355 unsigned long iflag = 0; 7600 unsigned long iflag = 0;
7601 struct lpfc_nodelist *ndlp;
7356 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7602 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
7357 7603
7358 spin_lock_irqsave(&phba->hbalock, iflag); 7604 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -7361,11 +7607,14 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7361 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 7607 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7362 if (sglq_entry->sli4_xritag == xri) { 7608 if (sglq_entry->sli4_xritag == xri) {
7363 list_del(&sglq_entry->list); 7609 list_del(&sglq_entry->list);
7610 ndlp = sglq_entry->ndlp;
7611 sglq_entry->ndlp = NULL;
7364 list_add_tail(&sglq_entry->list, 7612 list_add_tail(&sglq_entry->list,
7365 &phba->sli4_hba.lpfc_sgl_list); 7613 &phba->sli4_hba.lpfc_sgl_list);
7366 sglq_entry->state = SGL_FREED; 7614 sglq_entry->state = SGL_FREED;
7367 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7615 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7368 spin_unlock_irqrestore(&phba->hbalock, iflag); 7616 spin_unlock_irqrestore(&phba->hbalock, iflag);
7617 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
7369 7618
7370 /* Check if TXQ queue needs to be serviced */ 7619 /* Check if TXQ queue needs to be serviced */
7371 if (pring->txq_cnt) 7620 if (pring->txq_cnt)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a5d1695dac3d..f9f160ab2ee9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -607,6 +607,8 @@ lpfc_work_done(struct lpfc_hba *phba)
607 607
608 /* Process SLI4 events */ 608 /* Process SLI4 events */
609 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { 609 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
610 if (phba->hba_flag & HBA_RRQ_ACTIVE)
611 lpfc_handle_rrq_active(phba);
610 if (phba->hba_flag & FCP_XRI_ABORT_EVENT) 612 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
611 lpfc_sli4_fcp_xri_abort_event_proc(phba); 613 lpfc_sli4_fcp_xri_abort_event_proc(phba);
612 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 614 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
@@ -966,6 +968,7 @@ lpfc_linkup(struct lpfc_hba *phba)
966 struct lpfc_vport **vports; 968 struct lpfc_vport **vports;
967 int i; 969 int i;
968 970
971 lpfc_cleanup_wt_rrqs(phba);
969 phba->link_state = LPFC_LINK_UP; 972 phba->link_state = LPFC_LINK_UP;
970 973
971 /* Unblock fabric iocbs if they are blocked */ 974 /* Unblock fabric iocbs if they are blocked */
@@ -1064,7 +1067,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1064 1067
1065 mempool_free(pmb, phba->mbox_mem_pool); 1068 mempool_free(pmb, phba->mbox_mem_pool);
1066 1069
1067 if (phba->fc_topology == TOPOLOGY_LOOP && 1070 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1068 vport->fc_flag & FC_PUBLIC_LOOP && 1071 vport->fc_flag & FC_PUBLIC_LOOP &&
1069 !(vport->fc_flag & FC_LBIT)) { 1072 !(vport->fc_flag & FC_LBIT)) {
1070 /* Need to wait for FAN - use discovery timer 1073 /* Need to wait for FAN - use discovery timer
@@ -1078,9 +1081,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1078 /* Start discovery by sending a FLOGI. port_state is identically 1081 /* Start discovery by sending a FLOGI. port_state is identically
1079 * LPFC_FLOGI while waiting for FLOGI cmpl 1082 * LPFC_FLOGI while waiting for FLOGI cmpl
1080 */ 1083 */
1081 if (vport->port_state != LPFC_FLOGI) { 1084 if (vport->port_state != LPFC_FLOGI)
1082 lpfc_initial_flogi(vport); 1085 lpfc_initial_flogi(vport);
1083 }
1084 return; 1086 return;
1085 1087
1086out: 1088out:
@@ -1131,7 +1133,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1131 if (vport->port_state != LPFC_FLOGI) { 1133 if (vport->port_state != LPFC_FLOGI) {
1132 phba->hba_flag |= FCF_RR_INPROG; 1134 phba->hba_flag |= FCF_RR_INPROG;
1133 spin_unlock_irq(&phba->hbalock); 1135 spin_unlock_irq(&phba->hbalock);
1134 lpfc_initial_flogi(vport); 1136 lpfc_issue_init_vfi(vport);
1135 goto out; 1137 goto out;
1136 } 1138 }
1137 spin_unlock_irq(&phba->hbalock); 1139 spin_unlock_irq(&phba->hbalock);
@@ -1353,7 +1355,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1353 if (phba->pport->port_state != LPFC_FLOGI) { 1355 if (phba->pport->port_state != LPFC_FLOGI) {
1354 phba->hba_flag |= FCF_RR_INPROG; 1356 phba->hba_flag |= FCF_RR_INPROG;
1355 spin_unlock_irq(&phba->hbalock); 1357 spin_unlock_irq(&phba->hbalock);
1356 lpfc_initial_flogi(phba->pport); 1358 lpfc_issue_init_vfi(phba->pport);
1357 return; 1359 return;
1358 } 1360 }
1359 spin_unlock_irq(&phba->hbalock); 1361 spin_unlock_irq(&phba->hbalock);
@@ -2331,7 +2333,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2331 phba->fcf.current_rec.fcf_indx, fcf_index); 2333 phba->fcf.current_rec.fcf_indx, fcf_index);
2332 /* Wait 500 ms before retrying FLOGI to current FCF */ 2334 /* Wait 500 ms before retrying FLOGI to current FCF */
2333 msleep(500); 2335 msleep(500);
2334 lpfc_initial_flogi(phba->pport); 2336 lpfc_issue_init_vfi(phba->pport);
2335 goto out; 2337 goto out;
2336 } 2338 }
2337 2339
@@ -2422,6 +2424,63 @@ out:
2422} 2424}
2423 2425
2424/** 2426/**
2427 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2428 * @phba: pointer to lpfc hba data structure.
2429 * @mboxq: pointer to mailbox data structure.
2430 *
2431 * This function handles completion of init vfi mailbox command.
2432 */
2433void
2434lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2435{
2436 struct lpfc_vport *vport = mboxq->vport;
2437
2438 if (mboxq->u.mb.mbxStatus && (mboxq->u.mb.mbxStatus != 0x4002)) {
2439 lpfc_printf_vlog(vport, KERN_ERR,
2440 LOG_MBOX,
2441 "2891 Init VFI mailbox failed 0x%x\n",
2442 mboxq->u.mb.mbxStatus);
2443 mempool_free(mboxq, phba->mbox_mem_pool);
2444 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2445 return;
2446 }
2447 lpfc_initial_flogi(vport);
2448 mempool_free(mboxq, phba->mbox_mem_pool);
2449 return;
2450}
2451
2452/**
2453 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2454 * @vport: pointer to lpfc_vport data structure.
2455 *
2456 * This function issue a init_vfi mailbox command to initialize the VFI and
2457 * VPI for the physical port.
2458 */
2459void
2460lpfc_issue_init_vfi(struct lpfc_vport *vport)
2461{
2462 LPFC_MBOXQ_t *mboxq;
2463 int rc;
2464 struct lpfc_hba *phba = vport->phba;
2465
2466 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2467 if (!mboxq) {
2468 lpfc_printf_vlog(vport, KERN_ERR,
2469 LOG_MBOX, "2892 Failed to allocate "
2470 "init_vfi mailbox\n");
2471 return;
2472 }
2473 lpfc_init_vfi(mboxq, vport);
2474 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2475 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2476 if (rc == MBX_NOT_FINISHED) {
2477 lpfc_printf_vlog(vport, KERN_ERR,
2478 LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
2479 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2480 }
2481}
2482
2483/**
2425 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 2484 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2426 * @phba: pointer to lpfc hba data structure. 2485 * @phba: pointer to lpfc hba data structure.
2427 * @mboxq: pointer to mailbox data structure. 2486 * @mboxq: pointer to mailbox data structure.
@@ -2528,7 +2587,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
2528 FC_VPORT_FAILED); 2587 FC_VPORT_FAILED);
2529 continue; 2588 continue;
2530 } 2589 }
2531 if (phba->fc_topology == TOPOLOGY_LOOP) { 2590 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2532 lpfc_vport_set_state(vports[i], 2591 lpfc_vport_set_state(vports[i],
2533 FC_VPORT_LINKDOWN); 2592 FC_VPORT_LINKDOWN);
2534 continue; 2593 continue;
@@ -2564,7 +2623,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2564 "2018 REG_VFI mbxStatus error x%x " 2623 "2018 REG_VFI mbxStatus error x%x "
2565 "HBA state x%x\n", 2624 "HBA state x%x\n",
2566 mboxq->u.mb.mbxStatus, vport->port_state); 2625 mboxq->u.mb.mbxStatus, vport->port_state);
2567 if (phba->fc_topology == TOPOLOGY_LOOP) { 2626 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2568 /* FLOGI failed, use loop map to make discovery list */ 2627 /* FLOGI failed, use loop map to make discovery list */
2569 lpfc_disc_list_loopmap(vport); 2628 lpfc_disc_list_loopmap(vport);
2570 /* Start discovery */ 2629 /* Start discovery */
@@ -2582,8 +2641,18 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2582 spin_unlock_irq(shost->host_lock); 2641 spin_unlock_irq(shost->host_lock);
2583 2642
2584 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2643 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2585 lpfc_start_fdiscs(phba); 2644 /* For private loop just start discovery and we are done. */
2586 lpfc_do_scr_ns_plogi(phba, vport); 2645 if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
2646 (phba->alpa_map[0] == 0) &&
2647 !(vport->fc_flag & FC_PUBLIC_LOOP)) {
2648 /* Use loop map to make discovery list */
2649 lpfc_disc_list_loopmap(vport);
2650 /* Start discovery */
2651 lpfc_disc_start(vport);
2652 } else {
2653 lpfc_start_fdiscs(phba);
2654 lpfc_do_scr_ns_plogi(phba, vport);
2655 }
2587 } 2656 }
2588 2657
2589fail_free_mem: 2658fail_free_mem:
@@ -2644,7 +2713,7 @@ out:
2644} 2713}
2645 2714
2646static void 2715static void
2647lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 2716lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2648{ 2717{
2649 struct lpfc_vport *vport = phba->pport; 2718 struct lpfc_vport *vport = phba->pport;
2650 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; 2719 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
@@ -2654,31 +2723,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2654 struct fcf_record *fcf_record; 2723 struct fcf_record *fcf_record;
2655 2724
2656 spin_lock_irq(&phba->hbalock); 2725 spin_lock_irq(&phba->hbalock);
2657 switch (la->UlnkSpeed) { 2726 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
2658 case LA_1GHZ_LINK: 2727 case LPFC_LINK_SPEED_1GHZ:
2659 phba->fc_linkspeed = LA_1GHZ_LINK; 2728 case LPFC_LINK_SPEED_2GHZ:
2660 break; 2729 case LPFC_LINK_SPEED_4GHZ:
2661 case LA_2GHZ_LINK: 2730 case LPFC_LINK_SPEED_8GHZ:
2662 phba->fc_linkspeed = LA_2GHZ_LINK; 2731 case LPFC_LINK_SPEED_10GHZ:
2663 break; 2732 case LPFC_LINK_SPEED_16GHZ:
2664 case LA_4GHZ_LINK: 2733 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
2665 phba->fc_linkspeed = LA_4GHZ_LINK;
2666 break;
2667 case LA_8GHZ_LINK:
2668 phba->fc_linkspeed = LA_8GHZ_LINK;
2669 break;
2670 case LA_10GHZ_LINK:
2671 phba->fc_linkspeed = LA_10GHZ_LINK;
2672 break; 2734 break;
2673 default: 2735 default:
2674 phba->fc_linkspeed = LA_UNKNW_LINK; 2736 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
2675 break; 2737 break;
2676 } 2738 }
2677 2739
2678 phba->fc_topology = la->topology; 2740 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
2679 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 2741 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
2680 2742
2681 if (phba->fc_topology == TOPOLOGY_LOOP) { 2743 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2682 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 2744 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
2683 2745
2684 /* if npiv is enabled and this adapter supports npiv log 2746 /* if npiv is enabled and this adapter supports npiv log
@@ -2689,11 +2751,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2689 "1309 Link Up Event npiv not supported in loop " 2751 "1309 Link Up Event npiv not supported in loop "
2690 "topology\n"); 2752 "topology\n");
2691 /* Get Loop Map information */ 2753 /* Get Loop Map information */
2692 if (la->il) 2754 if (bf_get(lpfc_mbx_read_top_il, la))
2693 vport->fc_flag |= FC_LBIT; 2755 vport->fc_flag |= FC_LBIT;
2694 2756
2695 vport->fc_myDID = la->granted_AL_PA; 2757 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
2696 i = la->un.lilpBde64.tus.f.bdeSize; 2758 i = la->lilpBde64.tus.f.bdeSize;
2697 2759
2698 if (i == 0) { 2760 if (i == 0) {
2699 phba->alpa_map[0] = 0; 2761 phba->alpa_map[0] = 0;
@@ -2764,7 +2826,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2764 goto out; 2826 goto out;
2765 } 2827 }
2766 2828
2767 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { 2829 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2768 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2830 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2769 if (!cfglink_mbox) 2831 if (!cfglink_mbox)
2770 goto out; 2832 goto out;
@@ -2874,17 +2936,17 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
2874 2936
2875 2937
2876/* 2938/*
2877 * This routine handles processing a READ_LA mailbox 2939 * This routine handles processing a READ_TOPOLOGY mailbox
2878 * command upon completion. It is setup in the LPFC_MBOXQ 2940 * command upon completion. It is setup in the LPFC_MBOXQ
2879 * as the completion routine when the command is 2941 * as the completion routine when the command is
2880 * handed off to the SLI layer. 2942 * handed off to the SLI layer.
2881 */ 2943 */
2882void 2944void
2883lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2945lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2884{ 2946{
2885 struct lpfc_vport *vport = pmb->vport; 2947 struct lpfc_vport *vport = pmb->vport;
2886 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2948 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2887 READ_LA_VAR *la; 2949 struct lpfc_mbx_read_top *la;
2888 MAILBOX_t *mb = &pmb->u.mb; 2950 MAILBOX_t *mb = &pmb->u.mb;
2889 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2951 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2890 2952
@@ -2897,15 +2959,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2897 mb->mbxStatus, vport->port_state); 2959 mb->mbxStatus, vport->port_state);
2898 lpfc_mbx_issue_link_down(phba); 2960 lpfc_mbx_issue_link_down(phba);
2899 phba->link_state = LPFC_HBA_ERROR; 2961 phba->link_state = LPFC_HBA_ERROR;
2900 goto lpfc_mbx_cmpl_read_la_free_mbuf; 2962 goto lpfc_mbx_cmpl_read_topology_free_mbuf;
2901 } 2963 }
2902 2964
2903 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 2965 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
2904 2966
2905 memcpy(&phba->alpa_map[0], mp->virt, 128); 2967 memcpy(&phba->alpa_map[0], mp->virt, 128);
2906 2968
2907 spin_lock_irq(shost->host_lock); 2969 spin_lock_irq(shost->host_lock);
2908 if (la->pb) 2970 if (bf_get(lpfc_mbx_read_top_pb, la))
2909 vport->fc_flag |= FC_BYPASSED_MODE; 2971 vport->fc_flag |= FC_BYPASSED_MODE;
2910 else 2972 else
2911 vport->fc_flag &= ~FC_BYPASSED_MODE; 2973 vport->fc_flag &= ~FC_BYPASSED_MODE;
@@ -2914,41 +2976,48 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2914 if ((phba->fc_eventTag < la->eventTag) || 2976 if ((phba->fc_eventTag < la->eventTag) ||
2915 (phba->fc_eventTag == la->eventTag)) { 2977 (phba->fc_eventTag == la->eventTag)) {
2916 phba->fc_stat.LinkMultiEvent++; 2978 phba->fc_stat.LinkMultiEvent++;
2917 if (la->attType == AT_LINK_UP) 2979 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
2918 if (phba->fc_eventTag != 0) 2980 if (phba->fc_eventTag != 0)
2919 lpfc_linkdown(phba); 2981 lpfc_linkdown(phba);
2920 } 2982 }
2921 2983
2922 phba->fc_eventTag = la->eventTag; 2984 phba->fc_eventTag = la->eventTag;
2923 spin_lock_irq(&phba->hbalock); 2985 spin_lock_irq(&phba->hbalock);
2924 if (la->mm) 2986 if (bf_get(lpfc_mbx_read_top_mm, la))
2925 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 2987 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
2926 else 2988 else
2927 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 2989 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
2928 spin_unlock_irq(&phba->hbalock); 2990 spin_unlock_irq(&phba->hbalock);
2929 2991
2930 phba->link_events++; 2992 phba->link_events++;
2931 if (la->attType == AT_LINK_UP && (!la->mm)) { 2993 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
2994 (!bf_get(lpfc_mbx_read_top_mm, la))) {
2932 phba->fc_stat.LinkUp++; 2995 phba->fc_stat.LinkUp++;
2933 if (phba->link_flag & LS_LOOPBACK_MODE) { 2996 if (phba->link_flag & LS_LOOPBACK_MODE) {
2934 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 2997 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
2935 "1306 Link Up Event in loop back mode " 2998 "1306 Link Up Event in loop back mode "
2936 "x%x received Data: x%x x%x x%x x%x\n", 2999 "x%x received Data: x%x x%x x%x x%x\n",
2937 la->eventTag, phba->fc_eventTag, 3000 la->eventTag, phba->fc_eventTag,
2938 la->granted_AL_PA, la->UlnkSpeed, 3001 bf_get(lpfc_mbx_read_top_alpa_granted,
3002 la),
3003 bf_get(lpfc_mbx_read_top_link_spd, la),
2939 phba->alpa_map[0]); 3004 phba->alpa_map[0]);
2940 } else { 3005 } else {
2941 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3006 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
2942 "1303 Link Up Event x%x received " 3007 "1303 Link Up Event x%x received "
2943 "Data: x%x x%x x%x x%x x%x x%x %d\n", 3008 "Data: x%x x%x x%x x%x x%x x%x %d\n",
2944 la->eventTag, phba->fc_eventTag, 3009 la->eventTag, phba->fc_eventTag,
2945 la->granted_AL_PA, la->UlnkSpeed, 3010 bf_get(lpfc_mbx_read_top_alpa_granted,
3011 la),
3012 bf_get(lpfc_mbx_read_top_link_spd, la),
2946 phba->alpa_map[0], 3013 phba->alpa_map[0],
2947 la->mm, la->fa, 3014 bf_get(lpfc_mbx_read_top_mm, la),
3015 bf_get(lpfc_mbx_read_top_fa, la),
2948 phba->wait_4_mlo_maint_flg); 3016 phba->wait_4_mlo_maint_flg);
2949 } 3017 }
2950 lpfc_mbx_process_link_up(phba, la); 3018 lpfc_mbx_process_link_up(phba, la);
2951 } else if (la->attType == AT_LINK_DOWN) { 3019 } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
3020 LPFC_ATT_LINK_DOWN) {
2952 phba->fc_stat.LinkDown++; 3021 phba->fc_stat.LinkDown++;
2953 if (phba->link_flag & LS_LOOPBACK_MODE) { 3022 if (phba->link_flag & LS_LOOPBACK_MODE) {
2954 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3023 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -2964,11 +3033,13 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2964 "Data: x%x x%x x%x x%x x%x\n", 3033 "Data: x%x x%x x%x x%x x%x\n",
2965 la->eventTag, phba->fc_eventTag, 3034 la->eventTag, phba->fc_eventTag,
2966 phba->pport->port_state, vport->fc_flag, 3035 phba->pport->port_state, vport->fc_flag,
2967 la->mm, la->fa); 3036 bf_get(lpfc_mbx_read_top_mm, la),
3037 bf_get(lpfc_mbx_read_top_fa, la));
2968 } 3038 }
2969 lpfc_mbx_issue_link_down(phba); 3039 lpfc_mbx_issue_link_down(phba);
2970 } 3040 }
2971 if (la->mm && la->attType == AT_LINK_UP) { 3041 if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
3042 (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
2972 if (phba->link_state != LPFC_LINK_DOWN) { 3043 if (phba->link_state != LPFC_LINK_DOWN) {
2973 phba->fc_stat.LinkDown++; 3044 phba->fc_stat.LinkDown++;
2974 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3045 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -2996,14 +3067,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2996 } 3067 }
2997 } 3068 }
2998 3069
2999 if (la->fa) { 3070 if (bf_get(lpfc_mbx_read_top_fa, la)) {
3000 if (la->mm) 3071 if (bf_get(lpfc_mbx_read_top_mm, la))
3001 lpfc_issue_clear_la(phba, vport); 3072 lpfc_issue_clear_la(phba, vport);
3002 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3073 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3003 "1311 fa %d\n", la->fa); 3074 "1311 fa %d\n",
3075 bf_get(lpfc_mbx_read_top_fa, la));
3004 } 3076 }
3005 3077
3006lpfc_mbx_cmpl_read_la_free_mbuf: 3078lpfc_mbx_cmpl_read_topology_free_mbuf:
3007 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3079 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3008 kfree(mp); 3080 kfree(mp);
3009 mempool_free(pmb, phba->mbox_mem_pool); 3081 mempool_free(pmb, phba->mbox_mem_pool);
@@ -3030,8 +3102,8 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3030 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 3102 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3031 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3103 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3032 3104
3033 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || 3105 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3034 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 3106 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3035 /* We rcvd a rscn after issuing this 3107 /* We rcvd a rscn after issuing this
3036 * mbox reg login, we may have cycled 3108 * mbox reg login, we may have cycled
3037 * back through the state and be 3109 * back through the state and be
@@ -3043,10 +3115,6 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3043 spin_lock_irq(shost->host_lock); 3115 spin_lock_irq(shost->host_lock);
3044 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 3116 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3045 spin_unlock_irq(shost->host_lock); 3117 spin_unlock_irq(shost->host_lock);
3046 if (phba->sli_rev == LPFC_SLI_REV4)
3047 lpfc_sli4_free_rpi(phba,
3048 pmb->u.mb.un.varRegLogin.rpi);
3049
3050 } else 3118 } else
3051 /* Good status, call state machine */ 3119 /* Good status, call state machine */
3052 lpfc_disc_state_machine(vport, ndlp, pmb, 3120 lpfc_disc_state_machine(vport, ndlp, pmb,
@@ -3092,6 +3160,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3092 spin_unlock_irq(shost->host_lock); 3160 spin_unlock_irq(shost->host_lock);
3093 vport->unreg_vpi_cmpl = VPORT_OK; 3161 vport->unreg_vpi_cmpl = VPORT_OK;
3094 mempool_free(pmb, phba->mbox_mem_pool); 3162 mempool_free(pmb, phba->mbox_mem_pool);
3163 lpfc_cleanup_vports_rrqs(vport);
3095 /* 3164 /*
3096 * This shost reference might have been taken at the beginning of 3165 * This shost reference might have been taken at the beginning of
3097 * lpfc_vport_delete() 3166 * lpfc_vport_delete()
@@ -3333,7 +3402,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3333 kfree(mp); 3402 kfree(mp);
3334 mempool_free(pmb, phba->mbox_mem_pool); 3403 mempool_free(pmb, phba->mbox_mem_pool);
3335 3404
3336 if (phba->fc_topology == TOPOLOGY_LOOP) { 3405 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3337 /* FLOGI failed, use loop map to make discovery list */ 3406 /* FLOGI failed, use loop map to make discovery list */
3338 lpfc_disc_list_loopmap(vport); 3407 lpfc_disc_list_loopmap(vport);
3339 3408
@@ -3355,7 +3424,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3355 } 3424 }
3356 3425
3357 ndlp->nlp_rpi = mb->un.varWords[0]; 3426 ndlp->nlp_rpi = mb->un.varWords[0];
3358 ndlp->nlp_flag |= NLP_RPI_VALID; 3427 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3359 ndlp->nlp_type |= NLP_FABRIC; 3428 ndlp->nlp_type |= NLP_FABRIC;
3360 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3429 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3361 3430
@@ -3413,7 +3482,7 @@ out:
3413 /* If no other thread is using the ndlp, free it */ 3482 /* If no other thread is using the ndlp, free it */
3414 lpfc_nlp_not_used(ndlp); 3483 lpfc_nlp_not_used(ndlp);
3415 3484
3416 if (phba->fc_topology == TOPOLOGY_LOOP) { 3485 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3417 /* 3486 /*
3418 * RegLogin failed, use loop map to make discovery 3487 * RegLogin failed, use loop map to make discovery
3419 * list 3488 * list
@@ -3429,7 +3498,7 @@ out:
3429 } 3498 }
3430 3499
3431 ndlp->nlp_rpi = mb->un.varWords[0]; 3500 ndlp->nlp_rpi = mb->un.varWords[0];
3432 ndlp->nlp_flag |= NLP_RPI_VALID; 3501 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3433 ndlp->nlp_type |= NLP_FABRIC; 3502 ndlp->nlp_type |= NLP_FABRIC;
3434 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3503 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3435 3504
@@ -3762,6 +3831,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3762 NLP_INT_NODE_ACT(ndlp); 3831 NLP_INT_NODE_ACT(ndlp);
3763 atomic_set(&ndlp->cmd_pending, 0); 3832 atomic_set(&ndlp->cmd_pending, 0);
3764 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 3833 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3834 if (vport->phba->sli_rev == LPFC_SLI_REV4)
3835 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
3765} 3836}
3766 3837
3767struct lpfc_nodelist * 3838struct lpfc_nodelist *
@@ -3975,7 +4046,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3975 * by firmware with a no rpi error. 4046 * by firmware with a no rpi error.
3976 */ 4047 */
3977 psli = &phba->sli; 4048 psli = &phba->sli;
3978 if (ndlp->nlp_flag & NLP_RPI_VALID) { 4049 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
3979 /* Now process each ring */ 4050 /* Now process each ring */
3980 for (i = 0; i < psli->num_rings; i++) { 4051 for (i = 0; i < psli->num_rings; i++) {
3981 pring = &psli->ring[i]; 4052 pring = &psli->ring[i];
@@ -4023,7 +4094,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4023 LPFC_MBOXQ_t *mbox; 4094 LPFC_MBOXQ_t *mbox;
4024 int rc; 4095 int rc;
4025 4096
4026 if (ndlp->nlp_flag & NLP_RPI_VALID) { 4097 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4027 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4098 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4028 if (mbox) { 4099 if (mbox) {
4029 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 4100 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -4035,8 +4106,9 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4035 } 4106 }
4036 lpfc_no_rpi(phba, ndlp); 4107 lpfc_no_rpi(phba, ndlp);
4037 4108
4038 ndlp->nlp_rpi = 0; 4109 if (phba->sli_rev != LPFC_SLI_REV4)
4039 ndlp->nlp_flag &= ~NLP_RPI_VALID; 4110 ndlp->nlp_rpi = 0;
4111 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
4040 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4112 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4041 return 1; 4113 return 1;
4042 } 4114 }
@@ -4059,11 +4131,16 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
4059 int i; 4131 int i;
4060 4132
4061 vports = lpfc_create_vport_work_array(phba); 4133 vports = lpfc_create_vport_work_array(phba);
4134 if (!vports) {
4135 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
4136 "2884 Vport array allocation failed \n");
4137 return;
4138 }
4062 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4139 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4063 shost = lpfc_shost_from_vport(vports[i]); 4140 shost = lpfc_shost_from_vport(vports[i]);
4064 spin_lock_irq(shost->host_lock); 4141 spin_lock_irq(shost->host_lock);
4065 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 4142 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4066 if (ndlp->nlp_flag & NLP_RPI_VALID) { 4143 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4067 /* The mempool_alloc might sleep */ 4144 /* The mempool_alloc might sleep */
4068 spin_unlock_irq(shost->host_lock); 4145 spin_unlock_irq(shost->host_lock);
4069 lpfc_unreg_rpi(vports[i], ndlp); 4146 lpfc_unreg_rpi(vports[i], ndlp);
@@ -4192,9 +4269,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4192 kfree(mp); 4269 kfree(mp);
4193 } 4270 }
4194 list_del(&mb->list); 4271 list_del(&mb->list);
4195 if (phba->sli_rev == LPFC_SLI_REV4)
4196 lpfc_sli4_free_rpi(phba,
4197 mb->u.mb.un.varRegLogin.rpi);
4198 mempool_free(mb, phba->mbox_mem_pool); 4272 mempool_free(mb, phba->mbox_mem_pool);
4199 /* We shall not invoke the lpfc_nlp_put to decrement 4273 /* We shall not invoke the lpfc_nlp_put to decrement
4200 * the ndlp reference count as we are in the process 4274 * the ndlp reference count as we are in the process
@@ -4236,15 +4310,15 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4236 4310
4237 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4311 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4238 if ((ndlp->nlp_flag & NLP_DEFER_RM) && 4312 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4239 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && 4313 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
4240 !(ndlp->nlp_flag & NLP_RPI_VALID)) { 4314 !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
4241 /* For this case we need to cleanup the default rpi 4315 /* For this case we need to cleanup the default rpi
4242 * allocated by the firmware. 4316 * allocated by the firmware.
4243 */ 4317 */
4244 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 4318 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
4245 != NULL) { 4319 != NULL) {
4246 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, 4320 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
4247 (uint8_t *) &vport->fc_sparam, mbox, 0); 4321 (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
4248 if (rc) { 4322 if (rc) {
4249 mempool_free(mbox, phba->mbox_mem_pool); 4323 mempool_free(mbox, phba->mbox_mem_pool);
4250 } 4324 }
@@ -4436,7 +4510,7 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport)
4436 if (!lpfc_is_link_up(phba)) 4510 if (!lpfc_is_link_up(phba))
4437 return; 4511 return;
4438 4512
4439 if (phba->fc_topology != TOPOLOGY_LOOP) 4513 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
4440 return; 4514 return;
4441 4515
4442 /* Check for loop map present or not */ 4516 /* Check for loop map present or not */
@@ -4788,7 +4862,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
4788 } 4862 }
4789 } 4863 }
4790 if (vport->port_state != LPFC_FLOGI) { 4864 if (vport->port_state != LPFC_FLOGI) {
4791 lpfc_initial_flogi(vport); 4865 if (phba->sli_rev <= LPFC_SLI_REV3)
4866 lpfc_initial_flogi(vport);
4867 else
4868 lpfc_issue_init_vfi(vport);
4792 return; 4869 return;
4793 } 4870 }
4794 break; 4871 break;
@@ -4979,7 +5056,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4979 pmb->context2 = NULL; 5056 pmb->context2 = NULL;
4980 5057
4981 ndlp->nlp_rpi = mb->un.varWords[0]; 5058 ndlp->nlp_rpi = mb->un.varWords[0];
4982 ndlp->nlp_flag |= NLP_RPI_VALID; 5059 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4983 ndlp->nlp_type |= NLP_FABRIC; 5060 ndlp->nlp_type |= NLP_FABRIC;
4984 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 5061 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4985 5062
@@ -5103,6 +5180,8 @@ lpfc_nlp_release(struct kref *kref)
5103 spin_lock_irqsave(&phba->ndlp_lock, flags); 5180 spin_lock_irqsave(&phba->ndlp_lock, flags);
5104 NLP_CLR_NODE_ACT(ndlp); 5181 NLP_CLR_NODE_ACT(ndlp);
5105 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5182 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5183 if (phba->sli_rev == LPFC_SLI_REV4)
5184 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
5106 5185
5107 /* free ndlp memory for final ndlp release */ 5186 /* free ndlp memory for final ndlp release */
5108 if (NLP_CHK_FREE_REQ(ndlp)) { 5187 if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -5254,6 +5333,10 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
5254 5333
5255 vports = lpfc_create_vport_work_array(phba); 5334 vports = lpfc_create_vport_work_array(phba);
5256 5335
5336 /* If driver cannot allocate memory, indicate fcf is in use */
5337 if (!vports)
5338 return 1;
5339
5257 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5340 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5258 shost = lpfc_shost_from_vport(vports[i]); 5341 shost = lpfc_shost_from_vport(vports[i]);
5259 spin_lock_irq(shost->host_lock); 5342 spin_lock_irq(shost->host_lock);
@@ -5269,7 +5352,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
5269 "logged in\n", 5352 "logged in\n",
5270 ndlp->nlp_rpi, ndlp->nlp_DID, 5353 ndlp->nlp_rpi, ndlp->nlp_DID,
5271 ndlp->nlp_flag); 5354 ndlp->nlp_flag);
5272 if (ndlp->nlp_flag & NLP_RPI_VALID) 5355 if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
5273 ret = 1; 5356 ret = 1;
5274 } 5357 }
5275 } 5358 }
@@ -5550,7 +5633,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
5550 * registered, do nothing. 5633 * registered, do nothing.
5551 */ 5634 */
5552 spin_lock_irq(&phba->hbalock); 5635 spin_lock_irq(&phba->hbalock);
5553 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || 5636 if (!(phba->hba_flag & HBA_FCOE_MODE) ||
5554 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 5637 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
5555 !(phba->hba_flag & HBA_FIP_SUPPORT) || 5638 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
5556 (phba->fcf.fcf_flag & FCF_DISCOVERY) || 5639 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 9b8333456465..96ed3ba6ba95 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -880,6 +880,24 @@ struct RLS_RSP { /* Structure is in Big Endian format */
880 uint32_t crcCnt; 880 uint32_t crcCnt;
881}; 881};
882 882
883struct RRQ { /* Structure is in Big Endian format */
884 uint32_t rrq;
885#define rrq_rsvd_SHIFT 24
886#define rrq_rsvd_MASK 0x000000ff
887#define rrq_rsvd_WORD rrq
888#define rrq_did_SHIFT 0
889#define rrq_did_MASK 0x00ffffff
890#define rrq_did_WORD rrq
891 uint32_t rrq_exchg;
892#define rrq_oxid_SHIFT 16
893#define rrq_oxid_MASK 0xffff
894#define rrq_oxid_WORD rrq_exchg
895#define rrq_rxid_SHIFT 0
896#define rrq_rxid_MASK 0xffff
897#define rrq_rxid_WORD rrq_exchg
898};
899
900
883struct RTV_RSP { /* Structure is in Big Endian format */ 901struct RTV_RSP { /* Structure is in Big Endian format */
884 uint32_t ratov; 902 uint32_t ratov;
885 uint32_t edtov; 903 uint32_t edtov;
@@ -1172,7 +1190,10 @@ typedef struct {
1172#define PCI_VENDOR_ID_EMULEX 0x10df 1190#define PCI_VENDOR_ID_EMULEX 0x10df
1173#define PCI_DEVICE_ID_FIREFLY 0x1ae5 1191#define PCI_DEVICE_ID_FIREFLY 0x1ae5
1174#define PCI_DEVICE_ID_PROTEUS_VF 0xe100 1192#define PCI_DEVICE_ID_PROTEUS_VF 0xe100
1193#define PCI_DEVICE_ID_BALIUS 0xe131
1175#define PCI_DEVICE_ID_PROTEUS_PF 0xe180 1194#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
1195#define PCI_DEVICE_ID_LANCER_FC 0xe200
1196#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
1176#define PCI_DEVICE_ID_SAT_SMB 0xf011 1197#define PCI_DEVICE_ID_SAT_SMB 0xf011
1177#define PCI_DEVICE_ID_SAT_MID 0xf015 1198#define PCI_DEVICE_ID_SAT_MID 0xf015
1178#define PCI_DEVICE_ID_RFLY 0xf095 1199#define PCI_DEVICE_ID_RFLY 0xf095
@@ -1189,6 +1210,7 @@ typedef struct {
1189#define PCI_DEVICE_ID_SAT 0xf100 1210#define PCI_DEVICE_ID_SAT 0xf100
1190#define PCI_DEVICE_ID_SAT_SCSP 0xf111 1211#define PCI_DEVICE_ID_SAT_SCSP 0xf111
1191#define PCI_DEVICE_ID_SAT_DCSP 0xf112 1212#define PCI_DEVICE_ID_SAT_DCSP 0xf112
1213#define PCI_DEVICE_ID_FALCON 0xf180
1192#define PCI_DEVICE_ID_SUPERFLY 0xf700 1214#define PCI_DEVICE_ID_SUPERFLY 0xf700
1193#define PCI_DEVICE_ID_DRAGONFLY 0xf800 1215#define PCI_DEVICE_ID_DRAGONFLY 0xf800
1194#define PCI_DEVICE_ID_CENTAUR 0xf900 1216#define PCI_DEVICE_ID_CENTAUR 0xf900
@@ -1210,8 +1232,6 @@ typedef struct {
1210#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 1232#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1211#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1233#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1212#define PCI_DEVICE_ID_TOMCAT 0x0714 1234#define PCI_DEVICE_ID_TOMCAT 0x0714
1213#define PCI_DEVICE_ID_FALCON 0xf180
1214#define PCI_DEVICE_ID_BALIUS 0xe131
1215 1235
1216#define JEDEC_ID_ADDRESS 0x0080001c 1236#define JEDEC_ID_ADDRESS 0x0080001c
1217#define FIREFLY_JEDEC_ID 0x1ACC 1237#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1368,7 +1388,6 @@ typedef struct { /* FireFly BIU registers */
1368#define MBX_READ_LNK_STAT 0x12 1388#define MBX_READ_LNK_STAT 0x12
1369#define MBX_REG_LOGIN 0x13 1389#define MBX_REG_LOGIN 0x13
1370#define MBX_UNREG_LOGIN 0x14 1390#define MBX_UNREG_LOGIN 0x14
1371#define MBX_READ_LA 0x15
1372#define MBX_CLEAR_LA 0x16 1391#define MBX_CLEAR_LA 0x16
1373#define MBX_DUMP_MEMORY 0x17 1392#define MBX_DUMP_MEMORY 0x17
1374#define MBX_DUMP_CONTEXT 0x18 1393#define MBX_DUMP_CONTEXT 0x18
@@ -1402,7 +1421,7 @@ typedef struct { /* FireFly BIU registers */
1402#define MBX_READ_SPARM64 0x8D 1421#define MBX_READ_SPARM64 0x8D
1403#define MBX_READ_RPI64 0x8F 1422#define MBX_READ_RPI64 0x8F
1404#define MBX_REG_LOGIN64 0x93 1423#define MBX_REG_LOGIN64 0x93
1405#define MBX_READ_LA64 0x95 1424#define MBX_READ_TOPOLOGY 0x95
1406#define MBX_REG_VPI 0x96 1425#define MBX_REG_VPI 0x96
1407#define MBX_UNREG_VPI 0x97 1426#define MBX_UNREG_VPI 0x97
1408 1427
@@ -1823,12 +1842,13 @@ typedef struct {
1823#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */ 1842#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */
1824 1843
1825 uint32_t link_speed; 1844 uint32_t link_speed;
1826#define LINK_SPEED_AUTO 0 /* Auto selection */ 1845#define LINK_SPEED_AUTO 0x0 /* Auto selection */
1827#define LINK_SPEED_1G 1 /* 1 Gigabaud */ 1846#define LINK_SPEED_1G 0x1 /* 1 Gigabaud */
1828#define LINK_SPEED_2G 2 /* 2 Gigabaud */ 1847#define LINK_SPEED_2G 0x2 /* 2 Gigabaud */
1829#define LINK_SPEED_4G 4 /* 4 Gigabaud */ 1848#define LINK_SPEED_4G 0x4 /* 4 Gigabaud */
1830#define LINK_SPEED_8G 8 /* 8 Gigabaud */ 1849#define LINK_SPEED_8G 0x8 /* 8 Gigabaud */
1831#define LINK_SPEED_10G 16 /* 10 Gigabaud */ 1850#define LINK_SPEED_10G 0x10 /* 10 Gigabaud */
1851#define LINK_SPEED_16G 0x11 /* 16 Gigabaud */
1832 1852
1833} INIT_LINK_VAR; 1853} INIT_LINK_VAR;
1834 1854
@@ -1999,6 +2019,7 @@ typedef struct {
1999#define LMT_4Gb 0x040 2019#define LMT_4Gb 0x040
2000#define LMT_8Gb 0x080 2020#define LMT_8Gb 0x080
2001#define LMT_10Gb 0x100 2021#define LMT_10Gb 0x100
2022#define LMT_16Gb 0x200
2002 uint32_t rsvd2; 2023 uint32_t rsvd2;
2003 uint32_t rsvd3; 2024 uint32_t rsvd3;
2004 uint32_t max_xri; 2025 uint32_t max_xri;
@@ -2394,100 +2415,93 @@ typedef struct {
2394#endif 2415#endif
2395} UNREG_D_ID_VAR; 2416} UNREG_D_ID_VAR;
2396 2417
2397/* Structure for MB Command READ_LA (21) */ 2418/* Structure for MB Command READ_TOPOLOGY (0x95) */
2398/* Structure for MB Command READ_LA64 (0x95) */ 2419struct lpfc_mbx_read_top {
2399
2400typedef struct {
2401 uint32_t eventTag; /* Event tag */ 2420 uint32_t eventTag; /* Event tag */
2402#ifdef __BIG_ENDIAN_BITFIELD 2421 uint32_t word2;
2403 uint32_t rsvd1:19; 2422#define lpfc_mbx_read_top_fa_SHIFT 12
2404 uint32_t fa:1; 2423#define lpfc_mbx_read_top_fa_MASK 0x00000001
2405 uint32_t mm:1; /* Menlo Maintenance mode enabled */ 2424#define lpfc_mbx_read_top_fa_WORD word2
2406 uint32_t rx:1; 2425#define lpfc_mbx_read_top_mm_SHIFT 11
2407 uint32_t pb:1; 2426#define lpfc_mbx_read_top_mm_MASK 0x00000001
2408 uint32_t il:1; 2427#define lpfc_mbx_read_top_mm_WORD word2
2409 uint32_t attType:8; 2428#define lpfc_mbx_read_top_pb_SHIFT 9
2410#else /* __LITTLE_ENDIAN_BITFIELD */ 2429#define lpfc_mbx_read_top_pb_MASK 0X00000001
2411 uint32_t attType:8; 2430#define lpfc_mbx_read_top_pb_WORD word2
2412 uint32_t il:1; 2431#define lpfc_mbx_read_top_il_SHIFT 8
2413 uint32_t pb:1; 2432#define lpfc_mbx_read_top_il_MASK 0x00000001
2414 uint32_t rx:1; 2433#define lpfc_mbx_read_top_il_WORD word2
2415 uint32_t mm:1; 2434#define lpfc_mbx_read_top_att_type_SHIFT 0
2416 uint32_t fa:1; 2435#define lpfc_mbx_read_top_att_type_MASK 0x000000FF
2417 uint32_t rsvd1:19; 2436#define lpfc_mbx_read_top_att_type_WORD word2
2418#endif 2437#define LPFC_ATT_RESERVED 0x00 /* Reserved - attType */
2419 2438#define LPFC_ATT_LINK_UP 0x01 /* Link is up */
2420#define AT_RESERVED 0x00 /* Reserved - attType */ 2439#define LPFC_ATT_LINK_DOWN 0x02 /* Link is down */
2421#define AT_LINK_UP 0x01 /* Link is up */ 2440 uint32_t word3;
2422#define AT_LINK_DOWN 0x02 /* Link is down */ 2441#define lpfc_mbx_read_top_alpa_granted_SHIFT 24
2423 2442#define lpfc_mbx_read_top_alpa_granted_MASK 0x000000FF
2424#ifdef __BIG_ENDIAN_BITFIELD 2443#define lpfc_mbx_read_top_alpa_granted_WORD word3
2425 uint8_t granted_AL_PA; 2444#define lpfc_mbx_read_top_lip_alps_SHIFT 16
2426 uint8_t lipAlPs; 2445#define lpfc_mbx_read_top_lip_alps_MASK 0x000000FF
2427 uint8_t lipType; 2446#define lpfc_mbx_read_top_lip_alps_WORD word3
2428 uint8_t topology; 2447#define lpfc_mbx_read_top_lip_type_SHIFT 8
2429#else /* __LITTLE_ENDIAN_BITFIELD */ 2448#define lpfc_mbx_read_top_lip_type_MASK 0x000000FF
2430 uint8_t topology; 2449#define lpfc_mbx_read_top_lip_type_WORD word3
2431 uint8_t lipType; 2450#define lpfc_mbx_read_top_topology_SHIFT 0
2432 uint8_t lipAlPs; 2451#define lpfc_mbx_read_top_topology_MASK 0x000000FF
2433 uint8_t granted_AL_PA; 2452#define lpfc_mbx_read_top_topology_WORD word3
2434#endif 2453#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
2435 2454#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
2436#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ 2455#define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */
2437#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ 2456 /* store the LILP AL_PA position map into */
2438#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */ 2457 struct ulp_bde64 lilpBde64;
2439 2458#define LPFC_ALPA_MAP_SIZE 128
2440 union { 2459 uint32_t word7;
2441 struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer 2460#define lpfc_mbx_read_top_ld_lu_SHIFT 31
2442 to */ 2461#define lpfc_mbx_read_top_ld_lu_MASK 0x00000001
2443 /* store the LILP AL_PA position map into */ 2462#define lpfc_mbx_read_top_ld_lu_WORD word7
2444 struct ulp_bde64 lilpBde64; 2463#define lpfc_mbx_read_top_ld_tf_SHIFT 30
2445 } un; 2464#define lpfc_mbx_read_top_ld_tf_MASK 0x00000001
2446 2465#define lpfc_mbx_read_top_ld_tf_WORD word7
2447#ifdef __BIG_ENDIAN_BITFIELD 2466#define lpfc_mbx_read_top_ld_link_spd_SHIFT 8
2448 uint32_t Dlu:1; 2467#define lpfc_mbx_read_top_ld_link_spd_MASK 0x000000FF
2449 uint32_t Dtf:1; 2468#define lpfc_mbx_read_top_ld_link_spd_WORD word7
2450 uint32_t Drsvd2:14; 2469#define lpfc_mbx_read_top_ld_nl_port_SHIFT 4
2451 uint32_t DlnkSpeed:8; 2470#define lpfc_mbx_read_top_ld_nl_port_MASK 0x0000000F
2452 uint32_t DnlPort:4; 2471#define lpfc_mbx_read_top_ld_nl_port_WORD word7
2453 uint32_t Dtx:2; 2472#define lpfc_mbx_read_top_ld_tx_SHIFT 2
2454 uint32_t Drx:2; 2473#define lpfc_mbx_read_top_ld_tx_MASK 0x00000003
2455#else /* __LITTLE_ENDIAN_BITFIELD */ 2474#define lpfc_mbx_read_top_ld_tx_WORD word7
2456 uint32_t Drx:2; 2475#define lpfc_mbx_read_top_ld_rx_SHIFT 0
2457 uint32_t Dtx:2; 2476#define lpfc_mbx_read_top_ld_rx_MASK 0x00000003
2458 uint32_t DnlPort:4; 2477#define lpfc_mbx_read_top_ld_rx_WORD word7
2459 uint32_t DlnkSpeed:8; 2478 uint32_t word8;
2460 uint32_t Drsvd2:14; 2479#define lpfc_mbx_read_top_lu_SHIFT 31
2461 uint32_t Dtf:1; 2480#define lpfc_mbx_read_top_lu_MASK 0x00000001
2462 uint32_t Dlu:1; 2481#define lpfc_mbx_read_top_lu_WORD word8
2463#endif 2482#define lpfc_mbx_read_top_tf_SHIFT 30
2464 2483#define lpfc_mbx_read_top_tf_MASK 0x00000001
2465#ifdef __BIG_ENDIAN_BITFIELD 2484#define lpfc_mbx_read_top_tf_WORD word8
2466 uint32_t Ulu:1; 2485#define lpfc_mbx_read_top_link_spd_SHIFT 8
2467 uint32_t Utf:1; 2486#define lpfc_mbx_read_top_link_spd_MASK 0x000000FF
2468 uint32_t Ursvd2:14; 2487#define lpfc_mbx_read_top_link_spd_WORD word8
2469 uint32_t UlnkSpeed:8; 2488#define lpfc_mbx_read_top_nl_port_SHIFT 4
2470 uint32_t UnlPort:4; 2489#define lpfc_mbx_read_top_nl_port_MASK 0x0000000F
2471 uint32_t Utx:2; 2490#define lpfc_mbx_read_top_nl_port_WORD word8
2472 uint32_t Urx:2; 2491#define lpfc_mbx_read_top_tx_SHIFT 2
2473#else /* __LITTLE_ENDIAN_BITFIELD */ 2492#define lpfc_mbx_read_top_tx_MASK 0x00000003
2474 uint32_t Urx:2; 2493#define lpfc_mbx_read_top_tx_WORD word8
2475 uint32_t Utx:2; 2494#define lpfc_mbx_read_top_rx_SHIFT 0
2476 uint32_t UnlPort:4; 2495#define lpfc_mbx_read_top_rx_MASK 0x00000003
2477 uint32_t UlnkSpeed:8; 2496#define lpfc_mbx_read_top_rx_WORD word8
2478 uint32_t Ursvd2:14; 2497#define LPFC_LINK_SPEED_UNKNOWN 0x0
2479 uint32_t Utf:1; 2498#define LPFC_LINK_SPEED_1GHZ 0x04
2480 uint32_t Ulu:1; 2499#define LPFC_LINK_SPEED_2GHZ 0x08
2481#endif 2500#define LPFC_LINK_SPEED_4GHZ 0x10
2482 2501#define LPFC_LINK_SPEED_8GHZ 0x20
2483#define LA_UNKNW_LINK 0x0 /* lnkSpeed */ 2502#define LPFC_LINK_SPEED_10GHZ 0x40
2484#define LA_1GHZ_LINK 0x04 /* lnkSpeed */ 2503#define LPFC_LINK_SPEED_16GHZ 0x80
2485#define LA_2GHZ_LINK 0x08 /* lnkSpeed */ 2504};
2486#define LA_4GHZ_LINK 0x10 /* lnkSpeed */
2487#define LA_8GHZ_LINK 0x20 /* lnkSpeed */
2488#define LA_10GHZ_LINK 0x40 /* lnkSpeed */
2489
2490} READ_LA_VAR;
2491 2505
2492/* Structure for MB Command CLEAR_LA (22) */ 2506/* Structure for MB Command CLEAR_LA (22) */
2493 2507
@@ -3016,7 +3030,6 @@ typedef union {
3016 READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ 3030 READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
3017 REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */ 3031 REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
3018 UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */ 3032 UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
3019 READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
3020 CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */ 3033 CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
3021 DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */ 3034 DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
3022 UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ 3035 UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
@@ -3026,6 +3039,7 @@ typedef union {
3026 struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ 3039 struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
3027 struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/ 3040 struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
3028 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ 3041 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
3042 struct lpfc_mbx_read_top varReadTop; /* cmd = 0x95 (READ_TOPOLOGY) */
3029 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ 3043 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
3030 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ 3044 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
3031 ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ 3045 ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6e4bc34e1d0d..94c1aa1136de 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -64,29 +64,39 @@ struct lpfc_sli_intf {
64#define lpfc_sli_intf_valid_MASK 0x00000007 64#define lpfc_sli_intf_valid_MASK 0x00000007
65#define lpfc_sli_intf_valid_WORD word0 65#define lpfc_sli_intf_valid_WORD word0
66#define LPFC_SLI_INTF_VALID 6 66#define LPFC_SLI_INTF_VALID 6
67#define lpfc_sli_intf_featurelevel2_SHIFT 24 67#define lpfc_sli_intf_sli_hint2_SHIFT 24
68#define lpfc_sli_intf_featurelevel2_MASK 0x0000001F 68#define lpfc_sli_intf_sli_hint2_MASK 0x0000001F
69#define lpfc_sli_intf_featurelevel2_WORD word0 69#define lpfc_sli_intf_sli_hint2_WORD word0
70#define lpfc_sli_intf_featurelevel1_SHIFT 16 70#define LPFC_SLI_INTF_SLI_HINT2_NONE 0
71#define lpfc_sli_intf_featurelevel1_MASK 0x000000FF 71#define lpfc_sli_intf_sli_hint1_SHIFT 16
72#define lpfc_sli_intf_featurelevel1_WORD word0 72#define lpfc_sli_intf_sli_hint1_MASK 0x000000FF
73#define LPFC_SLI_INTF_FEATURELEVEL1_1 1 73#define lpfc_sli_intf_sli_hint1_WORD word0
74#define LPFC_SLI_INTF_FEATURELEVEL1_2 2 74#define LPFC_SLI_INTF_SLI_HINT1_NONE 0
75#define LPFC_SLI_INTF_SLI_HINT1_1 1
76#define LPFC_SLI_INTF_SLI_HINT1_2 2
77#define lpfc_sli_intf_if_type_SHIFT 12
78#define lpfc_sli_intf_if_type_MASK 0x0000000F
79#define lpfc_sli_intf_if_type_WORD word0
80#define LPFC_SLI_INTF_IF_TYPE_0 0
81#define LPFC_SLI_INTF_IF_TYPE_1 1
82#define LPFC_SLI_INTF_IF_TYPE_2 2
75#define lpfc_sli_intf_sli_family_SHIFT 8 83#define lpfc_sli_intf_sli_family_SHIFT 8
76#define lpfc_sli_intf_sli_family_MASK 0x000000FF 84#define lpfc_sli_intf_sli_family_MASK 0x0000000F
77#define lpfc_sli_intf_sli_family_WORD word0 85#define lpfc_sli_intf_sli_family_WORD word0
78#define LPFC_SLI_INTF_FAMILY_BE2 0 86#define LPFC_SLI_INTF_FAMILY_BE2 0x0
79#define LPFC_SLI_INTF_FAMILY_BE3 1 87#define LPFC_SLI_INTF_FAMILY_BE3 0x1
88#define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa
89#define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb
80#define lpfc_sli_intf_slirev_SHIFT 4 90#define lpfc_sli_intf_slirev_SHIFT 4
81#define lpfc_sli_intf_slirev_MASK 0x0000000F 91#define lpfc_sli_intf_slirev_MASK 0x0000000F
82#define lpfc_sli_intf_slirev_WORD word0 92#define lpfc_sli_intf_slirev_WORD word0
83#define LPFC_SLI_INTF_REV_SLI3 3 93#define LPFC_SLI_INTF_REV_SLI3 3
84#define LPFC_SLI_INTF_REV_SLI4 4 94#define LPFC_SLI_INTF_REV_SLI4 4
85#define lpfc_sli_intf_if_type_SHIFT 0 95#define lpfc_sli_intf_func_type_SHIFT 0
86#define lpfc_sli_intf_if_type_MASK 0x00000007 96#define lpfc_sli_intf_func_type_MASK 0x00000001
87#define lpfc_sli_intf_if_type_WORD word0 97#define lpfc_sli_intf_func_type_WORD word0
88#define LPFC_SLI_INTF_IF_TYPE_0 0 98#define LPFC_SLI_INTF_IF_TYPE_PHYS 0
89#define LPFC_SLI_INTF_IF_TYPE_1 1 99#define LPFC_SLI_INTF_IF_TYPE_VIRT 1
90}; 100};
91 101
92#define LPFC_SLI4_MBX_EMBED true 102#define LPFC_SLI4_MBX_EMBED true
@@ -450,35 +460,40 @@ struct lpfc_register {
450 uint32_t word0; 460 uint32_t word0;
451}; 461};
452 462
463/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */
453#define LPFC_UERR_STATUS_HI 0x00A4 464#define LPFC_UERR_STATUS_HI 0x00A4
454#define LPFC_UERR_STATUS_LO 0x00A0 465#define LPFC_UERR_STATUS_LO 0x00A0
455#define LPFC_UE_MASK_HI 0x00AC 466#define LPFC_UE_MASK_HI 0x00AC
456#define LPFC_UE_MASK_LO 0x00A8 467#define LPFC_UE_MASK_LO 0x00A8
468
469/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
457#define LPFC_SLI_INTF 0x0058 470#define LPFC_SLI_INTF 0x0058
458 471
459/* BAR0 Registers */ 472#define LPFC_SLIPORT_IF2_SMPHR 0x0400
460#define LPFC_HST_STATE 0x00AC 473#define lpfc_port_smphr_perr_SHIFT 31
461#define lpfc_hst_state_perr_SHIFT 31 474#define lpfc_port_smphr_perr_MASK 0x1
462#define lpfc_hst_state_perr_MASK 0x1 475#define lpfc_port_smphr_perr_WORD word0
463#define lpfc_hst_state_perr_WORD word0 476#define lpfc_port_smphr_sfi_SHIFT 30
464#define lpfc_hst_state_sfi_SHIFT 30 477#define lpfc_port_smphr_sfi_MASK 0x1
465#define lpfc_hst_state_sfi_MASK 0x1 478#define lpfc_port_smphr_sfi_WORD word0
466#define lpfc_hst_state_sfi_WORD word0 479#define lpfc_port_smphr_nip_SHIFT 29
467#define lpfc_hst_state_nip_SHIFT 29 480#define lpfc_port_smphr_nip_MASK 0x1
468#define lpfc_hst_state_nip_MASK 0x1 481#define lpfc_port_smphr_nip_WORD word0
469#define lpfc_hst_state_nip_WORD word0 482#define lpfc_port_smphr_ipc_SHIFT 28
470#define lpfc_hst_state_ipc_SHIFT 28 483#define lpfc_port_smphr_ipc_MASK 0x1
471#define lpfc_hst_state_ipc_MASK 0x1 484#define lpfc_port_smphr_ipc_WORD word0
472#define lpfc_hst_state_ipc_WORD word0 485#define lpfc_port_smphr_scr1_SHIFT 27
473#define lpfc_hst_state_xrom_SHIFT 27 486#define lpfc_port_smphr_scr1_MASK 0x1
474#define lpfc_hst_state_xrom_MASK 0x1 487#define lpfc_port_smphr_scr1_WORD word0
475#define lpfc_hst_state_xrom_WORD word0 488#define lpfc_port_smphr_scr2_SHIFT 26
476#define lpfc_hst_state_dl_SHIFT 26 489#define lpfc_port_smphr_scr2_MASK 0x1
477#define lpfc_hst_state_dl_MASK 0x1 490#define lpfc_port_smphr_scr2_WORD word0
478#define lpfc_hst_state_dl_WORD word0 491#define lpfc_port_smphr_host_scratch_SHIFT 16
479#define lpfc_hst_state_port_status_SHIFT 0 492#define lpfc_port_smphr_host_scratch_MASK 0xFF
480#define lpfc_hst_state_port_status_MASK 0xFFFF 493#define lpfc_port_smphr_host_scratch_WORD word0
481#define lpfc_hst_state_port_status_WORD word0 494#define lpfc_port_smphr_port_status_SHIFT 0
495#define lpfc_port_smphr_port_status_MASK 0xFFFF
496#define lpfc_port_smphr_port_status_WORD word0
482 497
483#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 498#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
484#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 499#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
@@ -511,10 +526,46 @@ struct lpfc_register {
511#define LPFC_POST_STAGE_RC_DONE 0x0B07 526#define LPFC_POST_STAGE_RC_DONE 0x0B07
512#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 527#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
513#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 528#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
514#define LPFC_POST_STAGE_ARMFW_READY 0xC000 529#define LPFC_POST_STAGE_PORT_READY 0xC000
515#define LPFC_POST_STAGE_ARMFW_UE 0xF000 530#define LPFC_POST_STAGE_PORT_UE 0xF000
531
532#define LPFC_SLIPORT_STATUS 0x0404
533#define lpfc_sliport_status_err_SHIFT 31
534#define lpfc_sliport_status_err_MASK 0x1
535#define lpfc_sliport_status_err_WORD word0
536#define lpfc_sliport_status_end_SHIFT 30
537#define lpfc_sliport_status_end_MASK 0x1
538#define lpfc_sliport_status_end_WORD word0
539#define lpfc_sliport_status_oti_SHIFT 29
540#define lpfc_sliport_status_oti_MASK 0x1
541#define lpfc_sliport_status_oti_WORD word0
542#define lpfc_sliport_status_rn_SHIFT 24
543#define lpfc_sliport_status_rn_MASK 0x1
544#define lpfc_sliport_status_rn_WORD word0
545#define lpfc_sliport_status_rdy_SHIFT 23
546#define lpfc_sliport_status_rdy_MASK 0x1
547#define lpfc_sliport_status_rdy_WORD word0
548#define MAX_IF_TYPE_2_RESETS 1000
549
550#define LPFC_SLIPORT_CNTRL 0x0408
551#define lpfc_sliport_ctrl_end_SHIFT 30
552#define lpfc_sliport_ctrl_end_MASK 0x1
553#define lpfc_sliport_ctrl_end_WORD word0
554#define LPFC_SLIPORT_LITTLE_ENDIAN 0
555#define LPFC_SLIPORT_BIG_ENDIAN 1
556#define lpfc_sliport_ctrl_ip_SHIFT 27
557#define lpfc_sliport_ctrl_ip_MASK 0x1
558#define lpfc_sliport_ctrl_ip_WORD word0
559#define LPFC_SLIPORT_INIT_PORT 1
560
561#define LPFC_SLIPORT_ERR_1 0x040C
562#define LPFC_SLIPORT_ERR_2 0x0410
563
564/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
565 * reside in BAR 2.
566 */
567#define LPFC_SLIPORT_IF0_SMPHR 0x00AC
516 568
517/* BAR1 Registers */
518#define LPFC_IMR_MASK_ALL 0xFFFFFFFF 569#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
519#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF 570#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
520 571
@@ -569,14 +620,21 @@ struct lpfc_register {
569#define LPFC_SLI4_INTR30 BIT30 620#define LPFC_SLI4_INTR30 BIT30
570#define LPFC_SLI4_INTR31 BIT31 621#define LPFC_SLI4_INTR31 BIT31
571 622
572/* BAR2 Registers */ 623/*
624 * The Doorbell registers defined here exist in different BAR
625 * register sets depending on the UCNA Port's reported if_type
626 * value. For UCNA ports running SLI4 and if_type 0, they reside in
627 * BAR4. For UCNA ports running SLI4 and if_type 2, they reside in
628 * BAR0. The offsets are the same so the driver must account for
629 * any base address difference.
630 */
573#define LPFC_RQ_DOORBELL 0x00A0 631#define LPFC_RQ_DOORBELL 0x00A0
574#define lpfc_rq_doorbell_num_posted_SHIFT 16 632#define lpfc_rq_doorbell_num_posted_SHIFT 16
575#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF 633#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
576#define lpfc_rq_doorbell_num_posted_WORD word0 634#define lpfc_rq_doorbell_num_posted_WORD word0
577#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ 635#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
578#define lpfc_rq_doorbell_id_SHIFT 0 636#define lpfc_rq_doorbell_id_SHIFT 0
579#define lpfc_rq_doorbell_id_MASK 0x03FF 637#define lpfc_rq_doorbell_id_MASK 0xFFFF
580#define lpfc_rq_doorbell_id_WORD word0 638#define lpfc_rq_doorbell_id_WORD word0
581 639
582#define LPFC_WQ_DOORBELL 0x0040 640#define LPFC_WQ_DOORBELL 0x0040
@@ -591,6 +649,11 @@ struct lpfc_register {
591#define lpfc_wq_doorbell_id_WORD word0 649#define lpfc_wq_doorbell_id_WORD word0
592 650
593#define LPFC_EQCQ_DOORBELL 0x0120 651#define LPFC_EQCQ_DOORBELL 0x0120
652#define lpfc_eqcq_doorbell_se_SHIFT 31
653#define lpfc_eqcq_doorbell_se_MASK 0x0001
654#define lpfc_eqcq_doorbell_se_WORD word0
655#define LPFC_EQCQ_SOLICIT_ENABLE_OFF 0
656#define LPFC_EQCQ_SOLICIT_ENABLE_ON 1
594#define lpfc_eqcq_doorbell_arm_SHIFT 29 657#define lpfc_eqcq_doorbell_arm_SHIFT 29
595#define lpfc_eqcq_doorbell_arm_MASK 0x0001 658#define lpfc_eqcq_doorbell_arm_MASK 0x0001
596#define lpfc_eqcq_doorbell_arm_WORD word0 659#define lpfc_eqcq_doorbell_arm_WORD word0
@@ -628,7 +691,7 @@ struct lpfc_register {
628#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF 691#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
629#define lpfc_mq_doorbell_num_posted_WORD word0 692#define lpfc_mq_doorbell_num_posted_WORD word0
630#define lpfc_mq_doorbell_id_SHIFT 0 693#define lpfc_mq_doorbell_id_SHIFT 0
631#define lpfc_mq_doorbell_id_MASK 0x03FF 694#define lpfc_mq_doorbell_id_MASK 0xFFFF
632#define lpfc_mq_doorbell_id_WORD word0 695#define lpfc_mq_doorbell_id_WORD word0
633 696
634struct lpfc_sli4_cfg_mhdr { 697struct lpfc_sli4_cfg_mhdr {
@@ -1048,12 +1111,18 @@ struct lpfc_mbx_mq_create_ext {
1048#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK 1111#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
1049#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 1112#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
1050#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap 1113#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap
1051#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT LPFC_TRAILER_CODE_FCOE 1114#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE
1052#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK 0x00000001 1115#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001
1053#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD async_evt_bmap 1116#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap
1054#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5 1117#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5
1055#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001 1118#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001
1056#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap 1119#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap
1120#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC
1121#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001
1122#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap
1123#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI
1124#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001
1125#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap
1057 struct mq_context context; 1126 struct mq_context context;
1058 struct dma_address page[LPFC_MAX_MQ_PAGE]; 1127 struct dma_address page[LPFC_MAX_MQ_PAGE];
1059 } request; 1128 } request;
@@ -1307,7 +1376,7 @@ struct lpfc_mbx_query_fw_cfg {
1307#define lpfc_function_mode_dal_WORD function_mode 1376#define lpfc_function_mode_dal_WORD function_mode
1308#define lpfc_function_mode_lro_SHIFT 9 1377#define lpfc_function_mode_lro_SHIFT 9
1309#define lpfc_function_mode_lro_MASK 0x00000001 1378#define lpfc_function_mode_lro_MASK 0x00000001
1310#define lpfc_function_mode_lro_WORD function_mode9 1379#define lpfc_function_mode_lro_WORD function_mode
1311#define lpfc_function_mode_flex10_SHIFT 10 1380#define lpfc_function_mode_flex10_SHIFT 10
1312#define lpfc_function_mode_flex10_MASK 0x00000001 1381#define lpfc_function_mode_flex10_MASK 0x00000001
1313#define lpfc_function_mode_flex10_WORD function_mode 1382#define lpfc_function_mode_flex10_WORD function_mode
@@ -1358,10 +1427,16 @@ struct lpfc_mbx_init_vfi {
1358#define lpfc_init_vfi_vf_SHIFT 29 1427#define lpfc_init_vfi_vf_SHIFT 29
1359#define lpfc_init_vfi_vf_MASK 0x00000001 1428#define lpfc_init_vfi_vf_MASK 0x00000001
1360#define lpfc_init_vfi_vf_WORD word1 1429#define lpfc_init_vfi_vf_WORD word1
1430#define lpfc_init_vfi_vp_SHIFT 28
1431#define lpfc_init_vfi_vp_MASK 0x00000001
1432#define lpfc_init_vfi_vp_WORD word1
1361#define lpfc_init_vfi_vfi_SHIFT 0 1433#define lpfc_init_vfi_vfi_SHIFT 0
1362#define lpfc_init_vfi_vfi_MASK 0x0000FFFF 1434#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
1363#define lpfc_init_vfi_vfi_WORD word1 1435#define lpfc_init_vfi_vfi_WORD word1
1364 uint32_t word2; 1436 uint32_t word2;
1437#define lpfc_init_vfi_vpi_SHIFT 16
1438#define lpfc_init_vfi_vpi_MASK 0x0000FFFF
1439#define lpfc_init_vfi_vpi_WORD word2
1365#define lpfc_init_vfi_fcfi_SHIFT 0 1440#define lpfc_init_vfi_fcfi_SHIFT 0
1366#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF 1441#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
1367#define lpfc_init_vfi_fcfi_WORD word2 1442#define lpfc_init_vfi_fcfi_WORD word2
@@ -2069,6 +2144,8 @@ struct lpfc_mcqe {
2069#define LPFC_TRAILER_CODE_FCOE 0x2 2144#define LPFC_TRAILER_CODE_FCOE 0x2
2070#define LPFC_TRAILER_CODE_DCBX 0x3 2145#define LPFC_TRAILER_CODE_DCBX 0x3
2071#define LPFC_TRAILER_CODE_GRP5 0x5 2146#define LPFC_TRAILER_CODE_GRP5 0x5
2147#define LPFC_TRAILER_CODE_FC 0x10
2148#define LPFC_TRAILER_CODE_SLI 0x11
2072}; 2149};
2073 2150
2074struct lpfc_acqe_link { 2151struct lpfc_acqe_link {
@@ -2094,11 +2171,12 @@ struct lpfc_acqe_link {
2094#define LPFC_ASYNC_LINK_STATUS_UP 0x1 2171#define LPFC_ASYNC_LINK_STATUS_UP 0x1
2095#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 2172#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
2096#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 2173#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
2097#define lpfc_acqe_link_physical_SHIFT 0 2174#define lpfc_acqe_link_type_SHIFT 6
2098#define lpfc_acqe_link_physical_MASK 0x000000FF 2175#define lpfc_acqe_link_type_MASK 0x00000003
2099#define lpfc_acqe_link_physical_WORD word0 2176#define lpfc_acqe_link_type_WORD word0
2100#define LPFC_ASYNC_LINK_PORT_A 0x0 2177#define lpfc_acqe_link_number_SHIFT 0
2101#define LPFC_ASYNC_LINK_PORT_B 0x1 2178#define lpfc_acqe_link_number_MASK 0x0000003F
2179#define lpfc_acqe_link_number_WORD word0
2102 uint32_t word1; 2180 uint32_t word1;
2103#define lpfc_acqe_link_fault_SHIFT 0 2181#define lpfc_acqe_link_fault_SHIFT 0
2104#define lpfc_acqe_link_fault_MASK 0x000000FF 2182#define lpfc_acqe_link_fault_MASK 0x000000FF
@@ -2106,29 +2184,31 @@ struct lpfc_acqe_link {
2106#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 2184#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
2107#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 2185#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
2108#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 2186#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
2109#define lpfc_acqe_qos_link_speed_SHIFT 16 2187#define lpfc_acqe_logical_link_speed_SHIFT 16
2110#define lpfc_acqe_qos_link_speed_MASK 0x0000FFFF 2188#define lpfc_acqe_logical_link_speed_MASK 0x0000FFFF
2111#define lpfc_acqe_qos_link_speed_WORD word1 2189#define lpfc_acqe_logical_link_speed_WORD word1
2112 uint32_t event_tag; 2190 uint32_t event_tag;
2113 uint32_t trailer; 2191 uint32_t trailer;
2192#define LPFC_LINK_EVENT_TYPE_PHYSICAL 0x0
2193#define LPFC_LINK_EVENT_TYPE_VIRTUAL 0x1
2114}; 2194};
2115 2195
2116struct lpfc_acqe_fcoe { 2196struct lpfc_acqe_fip {
2117 uint32_t index; 2197 uint32_t index;
2118 uint32_t word1; 2198 uint32_t word1;
2119#define lpfc_acqe_fcoe_fcf_count_SHIFT 0 2199#define lpfc_acqe_fip_fcf_count_SHIFT 0
2120#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF 2200#define lpfc_acqe_fip_fcf_count_MASK 0x0000FFFF
2121#define lpfc_acqe_fcoe_fcf_count_WORD word1 2201#define lpfc_acqe_fip_fcf_count_WORD word1
2122#define lpfc_acqe_fcoe_event_type_SHIFT 16 2202#define lpfc_acqe_fip_event_type_SHIFT 16
2123#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF 2203#define lpfc_acqe_fip_event_type_MASK 0x0000FFFF
2124#define lpfc_acqe_fcoe_event_type_WORD word1 2204#define lpfc_acqe_fip_event_type_WORD word1
2125#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
2126#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
2127#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
2128#define LPFC_FCOE_EVENT_TYPE_CVL 0x4
2129#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD 0x5
2130 uint32_t event_tag; 2205 uint32_t event_tag;
2131 uint32_t trailer; 2206 uint32_t trailer;
2207#define LPFC_FIP_EVENT_TYPE_NEW_FCF 0x1
2208#define LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL 0x2
2209#define LPFC_FIP_EVENT_TYPE_FCF_DEAD 0x3
2210#define LPFC_FIP_EVENT_TYPE_CVL 0x4
2211#define LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD 0x5
2132}; 2212};
2133 2213
2134struct lpfc_acqe_dcbx { 2214struct lpfc_acqe_dcbx {
@@ -2140,9 +2220,12 @@ struct lpfc_acqe_dcbx {
2140 2220
2141struct lpfc_acqe_grp5 { 2221struct lpfc_acqe_grp5 {
2142 uint32_t word0; 2222 uint32_t word0;
2143#define lpfc_acqe_grp5_pport_SHIFT 0 2223#define lpfc_acqe_grp5_type_SHIFT 6
2144#define lpfc_acqe_grp5_pport_MASK 0x000000FF 2224#define lpfc_acqe_grp5_type_MASK 0x00000003
2145#define lpfc_acqe_grp5_pport_WORD word0 2225#define lpfc_acqe_grp5_type_WORD word0
2226#define lpfc_acqe_grp5_number_SHIFT 0
2227#define lpfc_acqe_grp5_number_MASK 0x0000003F
2228#define lpfc_acqe_grp5_number_WORD word0
2146 uint32_t word1; 2229 uint32_t word1;
2147#define lpfc_acqe_grp5_llink_spd_SHIFT 16 2230#define lpfc_acqe_grp5_llink_spd_SHIFT 16
2148#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF 2231#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF
@@ -2151,6 +2234,68 @@ struct lpfc_acqe_grp5 {
2151 uint32_t trailer; 2234 uint32_t trailer;
2152}; 2235};
2153 2236
2237struct lpfc_acqe_fc_la {
2238 uint32_t word0;
2239#define lpfc_acqe_fc_la_speed_SHIFT 24
2240#define lpfc_acqe_fc_la_speed_MASK 0x000000FF
2241#define lpfc_acqe_fc_la_speed_WORD word0
2242#define LPFC_FC_LA_SPEED_UNKOWN 0x0
2243#define LPFC_FC_LA_SPEED_1G 0x1
2244#define LPFC_FC_LA_SPEED_2G 0x2
2245#define LPFC_FC_LA_SPEED_4G 0x4
2246#define LPFC_FC_LA_SPEED_8G 0x8
2247#define LPFC_FC_LA_SPEED_10G 0xA
2248#define LPFC_FC_LA_SPEED_16G 0x10
2249#define lpfc_acqe_fc_la_topology_SHIFT 16
2250#define lpfc_acqe_fc_la_topology_MASK 0x000000FF
2251#define lpfc_acqe_fc_la_topology_WORD word0
2252#define LPFC_FC_LA_TOP_UNKOWN 0x0
2253#define LPFC_FC_LA_TOP_P2P 0x1
2254#define LPFC_FC_LA_TOP_FCAL 0x2
2255#define LPFC_FC_LA_TOP_INTERNAL_LOOP 0x3
2256#define LPFC_FC_LA_TOP_SERDES_LOOP 0x4
2257#define lpfc_acqe_fc_la_att_type_SHIFT 8
2258#define lpfc_acqe_fc_la_att_type_MASK 0x000000FF
2259#define lpfc_acqe_fc_la_att_type_WORD word0
2260#define LPFC_FC_LA_TYPE_LINK_UP 0x1
2261#define LPFC_FC_LA_TYPE_LINK_DOWN 0x2
2262#define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3
2263#define lpfc_acqe_fc_la_port_type_SHIFT 6
2264#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
2265#define lpfc_acqe_fc_la_port_type_WORD word0
2266#define LPFC_LINK_TYPE_ETHERNET 0x0
2267#define LPFC_LINK_TYPE_FC 0x1
2268#define lpfc_acqe_fc_la_port_number_SHIFT 0
2269#define lpfc_acqe_fc_la_port_number_MASK 0x0000003F
2270#define lpfc_acqe_fc_la_port_number_WORD word0
2271 uint32_t word1;
2272#define lpfc_acqe_fc_la_llink_spd_SHIFT 16
2273#define lpfc_acqe_fc_la_llink_spd_MASK 0x0000FFFF
2274#define lpfc_acqe_fc_la_llink_spd_WORD word1
2275#define lpfc_acqe_fc_la_fault_SHIFT 0
2276#define lpfc_acqe_fc_la_fault_MASK 0x000000FF
2277#define lpfc_acqe_fc_la_fault_WORD word1
2278#define LPFC_FC_LA_FAULT_NONE 0x0
2279#define LPFC_FC_LA_FAULT_LOCAL 0x1
2280#define LPFC_FC_LA_FAULT_REMOTE 0x2
2281 uint32_t event_tag;
2282 uint32_t trailer;
2283#define LPFC_FC_LA_EVENT_TYPE_FC_LINK 0x1
2284#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2
2285};
2286
2287struct lpfc_acqe_sli {
2288 uint32_t event_data1;
2289 uint32_t event_data2;
2290 uint32_t reserved;
2291 uint32_t trailer;
2292#define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1
2293#define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2
2294#define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3
2295#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4
2296#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
2297};
2298
2154/* 2299/*
2155 * Define the bootstrap mailbox (bmbx) region used to communicate 2300 * Define the bootstrap mailbox (bmbx) region used to communicate
2156 * mailbox command between the host and port. The mailbox consists 2301 * mailbox command between the host and port. The mailbox consists
@@ -2210,7 +2355,7 @@ struct wqe_common {
2210#define wqe_rcvoxid_WORD word9 2355#define wqe_rcvoxid_WORD word9
2211 uint32_t word10; 2356 uint32_t word10;
2212#define wqe_ebde_cnt_SHIFT 0 2357#define wqe_ebde_cnt_SHIFT 0
2213#define wqe_ebde_cnt_MASK 0x00000007 2358#define wqe_ebde_cnt_MASK 0x0000000f
2214#define wqe_ebde_cnt_WORD word10 2359#define wqe_ebde_cnt_WORD word10
2215#define wqe_lenloc_SHIFT 7 2360#define wqe_lenloc_SHIFT 7
2216#define wqe_lenloc_MASK 0x00000003 2361#define wqe_lenloc_MASK 0x00000003
@@ -2402,7 +2547,6 @@ struct xmit_seq64_wqe {
2402 uint32_t relative_offset; 2547 uint32_t relative_offset;
2403 struct wqe_rctl_dfctl wge_ctl; 2548 struct wqe_rctl_dfctl wge_ctl;
2404 struct wqe_common wqe_com; /* words 6-11 */ 2549 struct wqe_common wqe_com; /* words 6-11 */
2405 /* Note: word10 different REVISIT */
2406 uint32_t xmit_len; 2550 uint32_t xmit_len;
2407 uint32_t rsvd_12_15[3]; 2551 uint32_t rsvd_12_15[3];
2408}; 2552};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b3065791f303..462242dcdd0a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -446,23 +446,25 @@ lpfc_config_port_post(struct lpfc_hba *phba)
446 /* Get the default values for Model Name and Description */ 446 /* Get the default values for Model Name and Description */
447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
448 448
449 if ((phba->cfg_link_speed > LINK_SPEED_10G) 449 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
450 || ((phba->cfg_link_speed == LINK_SPEED_1G) 450 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
451 && !(phba->lmt & LMT_1Gb)) 451 && !(phba->lmt & LMT_1Gb))
452 || ((phba->cfg_link_speed == LINK_SPEED_2G) 452 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
453 && !(phba->lmt & LMT_2Gb)) 453 && !(phba->lmt & LMT_2Gb))
454 || ((phba->cfg_link_speed == LINK_SPEED_4G) 454 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
455 && !(phba->lmt & LMT_4Gb)) 455 && !(phba->lmt & LMT_4Gb))
456 || ((phba->cfg_link_speed == LINK_SPEED_8G) 456 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
457 && !(phba->lmt & LMT_8Gb)) 457 && !(phba->lmt & LMT_8Gb))
458 || ((phba->cfg_link_speed == LINK_SPEED_10G) 458 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
459 && !(phba->lmt & LMT_10Gb))) { 459 && !(phba->lmt & LMT_10Gb))
460 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
461 && !(phba->lmt & LMT_16Gb))) {
460 /* Reset link speed to auto */ 462 /* Reset link speed to auto */
461 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 463 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
462 "1302 Invalid speed for this board: " 464 "1302 Invalid speed for this board: "
463 "Reset link speed to auto: x%x\n", 465 "Reset link speed to auto: x%x\n",
464 phba->cfg_link_speed); 466 phba->cfg_link_speed);
465 phba->cfg_link_speed = LINK_SPEED_AUTO; 467 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
466 } 468 }
467 469
468 phba->link_state = LPFC_LINK_DOWN; 470 phba->link_state = LPFC_LINK_DOWN;
@@ -648,22 +650,23 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
648 mb = &pmb->u.mb; 650 mb = &pmb->u.mb;
649 pmb->vport = vport; 651 pmb->vport = vport;
650 652
651 lpfc_init_link(phba, pmb, phba->cfg_topology, 653 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
652 phba->cfg_link_speed);
653 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 654 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
654 lpfc_set_loopback_flag(phba); 655 lpfc_set_loopback_flag(phba);
655 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 656 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
656 if (rc != MBX_SUCCESS) { 657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658 "0498 Adapter failed to init, mbxCmd x%x " 659 "0498 Adapter failed to init, mbxCmd x%x "
659 "INIT_LINK, mbxStatus x%x\n", 660 "INIT_LINK, mbxStatus x%x\n",
660 mb->mbxCommand, mb->mbxStatus); 661 mb->mbxCommand, mb->mbxStatus);
661 /* Clear all interrupt enable conditions */ 662 if (phba->sli_rev <= LPFC_SLI_REV3) {
662 writel(0, phba->HCregaddr); 663 /* Clear all interrupt enable conditions */
663 readl(phba->HCregaddr); /* flush */ 664 writel(0, phba->HCregaddr);
664 /* Clear all pending interrupts */ 665 readl(phba->HCregaddr); /* flush */
665 writel(0xffffffff, phba->HAregaddr); 666 /* Clear all pending interrupts */
666 readl(phba->HAregaddr); /* flush */ 667 writel(0xffffffff, phba->HAregaddr);
668 readl(phba->HAregaddr); /* flush */
669 }
667 phba->link_state = LPFC_HBA_ERROR; 670 phba->link_state = LPFC_HBA_ERROR;
668 if (rc != MBX_BUSY || flag == MBX_POLL) 671 if (rc != MBX_BUSY || flag == MBX_POLL)
669 mempool_free(pmb, phba->mbox_mem_pool); 672 mempool_free(pmb, phba->mbox_mem_pool);
@@ -927,6 +930,35 @@ lpfc_hb_timeout(unsigned long ptr)
927} 930}
928 931
929/** 932/**
933 * lpfc_rrq_timeout - The RRQ-timer timeout handler
934 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
935 *
936 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
937 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
938 * work-port-events bitmap and the worker thread is notified. This timeout
939 * event will be used by the worker thread to invoke the actual timeout
940 * handler routine, lpfc_rrq_handler. Any periodical operations will
941 * be performed in the timeout handler and the RRQ timeout event bit shall
942 * be cleared by the worker thread after it has taken the event bitmap out.
943 **/
944static void
945lpfc_rrq_timeout(unsigned long ptr)
946{
947 struct lpfc_hba *phba;
948 uint32_t tmo_posted;
949 unsigned long iflag;
950
951 phba = (struct lpfc_hba *)ptr;
952 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
953 tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
954 if (!tmo_posted)
955 phba->hba_flag |= HBA_RRQ_ACTIVE;
956 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
957 if (!tmo_posted)
958 lpfc_worker_wake_up(phba);
959}
960
961/**
930 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 962 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
931 * @phba: pointer to lpfc hba data structure. 963 * @phba: pointer to lpfc hba data structure.
932 * @pmboxq: pointer to the driver internal queue element for mailbox command. 964 * @pmboxq: pointer to the driver internal queue element for mailbox command.
@@ -1374,6 +1406,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1374 struct lpfc_vport *vport = phba->pport; 1406 struct lpfc_vport *vport = phba->pport;
1375 uint32_t event_data; 1407 uint32_t event_data;
1376 struct Scsi_Host *shost; 1408 struct Scsi_Host *shost;
1409 uint32_t if_type;
1410 struct lpfc_register portstat_reg;
1377 1411
1378 /* If the pci channel is offline, ignore possible errors, since 1412 /* If the pci channel is offline, ignore possible errors, since
1379 * we cannot communicate with the pci card anyway. 1413 * we cannot communicate with the pci card anyway.
@@ -1390,17 +1424,49 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1390 /* For now, the actual action for SLI4 device handling is not 1424 /* For now, the actual action for SLI4 device handling is not
1391 * specified yet, just treated it as adaptor hardware failure 1425 * specified yet, just treated it as adaptor hardware failure
1392 */ 1426 */
1393 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1394 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1395 phba->work_status[0], phba->work_status[1]);
1396
1397 event_data = FC_REG_DUMP_EVENT; 1427 event_data = FC_REG_DUMP_EVENT;
1398 shost = lpfc_shost_from_vport(vport); 1428 shost = lpfc_shost_from_vport(vport);
1399 fc_host_post_vendor_event(shost, fc_get_event_number(), 1429 fc_host_post_vendor_event(shost, fc_get_event_number(),
1400 sizeof(event_data), (char *) &event_data, 1430 sizeof(event_data), (char *) &event_data,
1401 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1431 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1402 1432
1403 lpfc_sli4_offline_eratt(phba); 1433 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1434 switch (if_type) {
1435 case LPFC_SLI_INTF_IF_TYPE_0:
1436 lpfc_sli4_offline_eratt(phba);
1437 break;
1438 case LPFC_SLI_INTF_IF_TYPE_2:
1439 portstat_reg.word0 =
1440 readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
1441
1442 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1443 /* TODO: Register for Overtemp async events. */
1444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1445 "2889 Port Overtemperature event, "
1446 "taking port\n");
1447 spin_lock_irq(&phba->hbalock);
1448 phba->over_temp_state = HBA_OVER_TEMP;
1449 spin_unlock_irq(&phba->hbalock);
1450 lpfc_sli4_offline_eratt(phba);
1451 return;
1452 }
1453 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) {
1454 /*
1455 * TODO: Attempt port recovery via a port reset.
1456 * When fully implemented, the driver should
1457 * attempt to recover the port here and return.
1458 * For now, log an error and take the port offline.
1459 */
1460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1461 "2887 Port Error: Attempting "
1462 "Port Recovery\n");
1463 }
1464 lpfc_sli4_offline_eratt(phba);
1465 break;
1466 case LPFC_SLI_INTF_IF_TYPE_1:
1467 default:
1468 break;
1469 }
1404} 1470}
1405 1471
1406/** 1472/**
@@ -1459,8 +1525,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
1459 lpfc_els_flush_all_cmd(phba); 1525 lpfc_els_flush_all_cmd(phba);
1460 1526
1461 psli->slistat.link_event++; 1527 psli->slistat.link_event++;
1462 lpfc_read_la(phba, pmb, mp); 1528 lpfc_read_topology(phba, pmb, mp);
1463 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1529 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1464 pmb->vport = vport; 1530 pmb->vport = vport;
1465 /* Block ELS IOCBs until we have processed this mbox command */ 1531 /* Block ELS IOCBs until we have processed this mbox command */
1466 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1532 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
@@ -1853,6 +1919,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1853 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1919 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1854 "Fibre Channel Adapter"}; 1920 "Fibre Channel Adapter"};
1855 break; 1921 break;
1922 case PCI_DEVICE_ID_LANCER_FC:
1923 oneConnect = 1;
1924 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
1925 break;
1926 case PCI_DEVICE_ID_LANCER_FCOE:
1927 oneConnect = 1;
1928 m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
1929 break;
1856 default: 1930 default:
1857 m = (typeof(m)){"Unknown", "", ""}; 1931 m = (typeof(m)){"Unknown", "", ""};
1858 break; 1932 break;
@@ -2943,63 +3017,6 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2943} 3017}
2944 3018
2945/** 3019/**
2946 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2947 * @phba: pointer to lpfc hba data structure.
2948 *
2949 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2950 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2951 * was successful and the firmware supports FCoE. Any other return indicates
2952 * a error. It is assumed that this function will be called before interrupts
2953 * are enabled.
2954 **/
2955static int
2956lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2957{
2958 int rc = 0;
2959 LPFC_MBOXQ_t *mboxq;
2960 struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2961 uint32_t length;
2962 uint32_t shdr_status, shdr_add_status;
2963
2964 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2965 if (!mboxq) {
2966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2967 "2621 Failed to allocate mbox for "
2968 "query firmware config cmd\n");
2969 return -ENOMEM;
2970 }
2971 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2972 length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2973 sizeof(struct lpfc_sli4_cfg_mhdr));
2974 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2975 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2976 length, LPFC_SLI4_MBX_EMBED);
2977 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2978 /* The IOCTL status is embedded in the mailbox subheader. */
2979 shdr_status = bf_get(lpfc_mbox_hdr_status,
2980 &query_fw_cfg->header.cfg_shdr.response);
2981 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2982 &query_fw_cfg->header.cfg_shdr.response);
2983 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2984 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2985 "2622 Query Firmware Config failed "
2986 "mbx status x%x, status x%x add_status x%x\n",
2987 rc, shdr_status, shdr_add_status);
2988 return -EINVAL;
2989 }
2990 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
2991 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2992 "2623 FCoE Function not supported by firmware. "
2993 "Function mode = %08x\n",
2994 query_fw_cfg->function_mode);
2995 return -EINVAL;
2996 }
2997 if (rc != MBX_TIMEOUT)
2998 mempool_free(mboxq, phba->mbox_mem_pool);
2999 return 0;
3000}
3001
3002/**
3003 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3020 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3004 * @phba: pointer to lpfc hba data structure. 3021 * @phba: pointer to lpfc hba data structure.
3005 * @acqe_link: pointer to the async link completion queue entry. 3022 * @acqe_link: pointer to the async link completion queue entry.
@@ -3051,20 +3068,20 @@ lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3051 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3068 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3052 case LPFC_ASYNC_LINK_STATUS_DOWN: 3069 case LPFC_ASYNC_LINK_STATUS_DOWN:
3053 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3070 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3054 att_type = AT_LINK_DOWN; 3071 att_type = LPFC_ATT_LINK_DOWN;
3055 break; 3072 break;
3056 case LPFC_ASYNC_LINK_STATUS_UP: 3073 case LPFC_ASYNC_LINK_STATUS_UP:
3057 /* Ignore physical link up events - wait for logical link up */ 3074 /* Ignore physical link up events - wait for logical link up */
3058 att_type = AT_RESERVED; 3075 att_type = LPFC_ATT_RESERVED;
3059 break; 3076 break;
3060 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3077 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3061 att_type = AT_LINK_UP; 3078 att_type = LPFC_ATT_LINK_UP;
3062 break; 3079 break;
3063 default: 3080 default:
3064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3065 "0399 Invalid link attention type: x%x\n", 3082 "0399 Invalid link attention type: x%x\n",
3066 bf_get(lpfc_acqe_link_status, acqe_link)); 3083 bf_get(lpfc_acqe_link_status, acqe_link));
3067 att_type = AT_RESERVED; 3084 att_type = LPFC_ATT_RESERVED;
3068 break; 3085 break;
3069 } 3086 }
3070 return att_type; 3087 return att_type;
@@ -3088,36 +3105,32 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3088 3105
3089 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3106 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3090 case LPFC_ASYNC_LINK_SPEED_ZERO: 3107 case LPFC_ASYNC_LINK_SPEED_ZERO:
3091 link_speed = LA_UNKNW_LINK;
3092 break;
3093 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3108 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3094 link_speed = LA_UNKNW_LINK;
3095 break;
3096 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3109 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3097 link_speed = LA_UNKNW_LINK; 3110 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3098 break; 3111 break;
3099 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3112 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3100 link_speed = LA_1GHZ_LINK; 3113 link_speed = LPFC_LINK_SPEED_1GHZ;
3101 break; 3114 break;
3102 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3115 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3103 link_speed = LA_10GHZ_LINK; 3116 link_speed = LPFC_LINK_SPEED_10GHZ;
3104 break; 3117 break;
3105 default: 3118 default:
3106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3107 "0483 Invalid link-attention link speed: x%x\n", 3120 "0483 Invalid link-attention link speed: x%x\n",
3108 bf_get(lpfc_acqe_link_speed, acqe_link)); 3121 bf_get(lpfc_acqe_link_speed, acqe_link));
3109 link_speed = LA_UNKNW_LINK; 3122 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3110 break; 3123 break;
3111 } 3124 }
3112 return link_speed; 3125 return link_speed;
3113} 3126}
3114 3127
3115/** 3128/**
3116 * lpfc_sli4_async_link_evt - Process the asynchronous link event 3129 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3117 * @phba: pointer to lpfc hba data structure. 3130 * @phba: pointer to lpfc hba data structure.
3118 * @acqe_link: pointer to the async link completion queue entry. 3131 * @acqe_link: pointer to the async link completion queue entry.
3119 * 3132 *
3120 * This routine is to handle the SLI4 asynchronous link event. 3133 * This routine is to handle the SLI4 asynchronous FCoE link event.
3121 **/ 3134 **/
3122static void 3135static void
3123lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3136lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
@@ -3126,11 +3139,12 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3126 struct lpfc_dmabuf *mp; 3139 struct lpfc_dmabuf *mp;
3127 LPFC_MBOXQ_t *pmb; 3140 LPFC_MBOXQ_t *pmb;
3128 MAILBOX_t *mb; 3141 MAILBOX_t *mb;
3129 READ_LA_VAR *la; 3142 struct lpfc_mbx_read_top *la;
3130 uint8_t att_type; 3143 uint8_t att_type;
3144 int rc;
3131 3145
3132 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3146 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3133 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 3147 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3134 return; 3148 return;
3135 phba->fcoe_eventtag = acqe_link->event_tag; 3149 phba->fcoe_eventtag = acqe_link->event_tag;
3136 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3150 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -3161,45 +3175,168 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3161 /* Update link event statistics */ 3175 /* Update link event statistics */
3162 phba->sli.slistat.link_event++; 3176 phba->sli.slistat.link_event++;
3163 3177
3164 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 3178 /* Create lpfc_handle_latt mailbox command from link ACQE */
3165 lpfc_read_la(phba, pmb, mp); 3179 lpfc_read_topology(phba, pmb, mp);
3180 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3166 pmb->vport = phba->pport; 3181 pmb->vport = phba->pport;
3167 3182
3183 /* Keep the link status for extra SLI4 state machine reference */
3184 phba->sli4_hba.link_state.speed =
3185 bf_get(lpfc_acqe_link_speed, acqe_link);
3186 phba->sli4_hba.link_state.duplex =
3187 bf_get(lpfc_acqe_link_duplex, acqe_link);
3188 phba->sli4_hba.link_state.status =
3189 bf_get(lpfc_acqe_link_status, acqe_link);
3190 phba->sli4_hba.link_state.type =
3191 bf_get(lpfc_acqe_link_type, acqe_link);
3192 phba->sli4_hba.link_state.number =
3193 bf_get(lpfc_acqe_link_number, acqe_link);
3194 phba->sli4_hba.link_state.fault =
3195 bf_get(lpfc_acqe_link_fault, acqe_link);
3196 phba->sli4_hba.link_state.logical_speed =
3197 bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3198 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3199 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x "
3200 "LA Type:x%x Port Type:%d Port Number:%d Logical "
3201 "speed:%dMbps Fault:%d\n",
3202 phba->sli4_hba.link_state.speed,
3203 phba->sli4_hba.link_state.topology,
3204 phba->sli4_hba.link_state.status,
3205 phba->sli4_hba.link_state.type,
3206 phba->sli4_hba.link_state.number,
3207 phba->sli4_hba.link_state.logical_speed * 10,
3208 phba->sli4_hba.link_state.fault);
3209 /*
3210 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3211 * topology info. Note: Optional for non FC-AL ports.
3212 */
3213 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3214 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3215 if (rc == MBX_NOT_FINISHED)
3216 goto out_free_dmabuf;
3217 return;
3218 }
3219 /*
3220 * For FCoE Mode: fill in all the topology information we need and call
3221 * the READ_TOPOLOGY completion routine to continue without actually
3222 * sending the READ_TOPOLOGY mailbox command to the port.
3223 */
3168 /* Parse and translate status field */ 3224 /* Parse and translate status field */
3169 mb = &pmb->u.mb; 3225 mb = &pmb->u.mb;
3170 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3226 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3171 3227
3172 /* Parse and translate link attention fields */ 3228 /* Parse and translate link attention fields */
3173 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 3229 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3174 la->eventTag = acqe_link->event_tag; 3230 la->eventTag = acqe_link->event_tag;
3175 la->attType = att_type; 3231 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3176 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 3232 bf_set(lpfc_mbx_read_top_link_spd, la,
3233 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3177 3234
3178 /* Fake the the following irrelvant fields */ 3235 /* Fake the the following irrelvant fields */
3179 la->topology = TOPOLOGY_PT_PT; 3236 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3180 la->granted_AL_PA = 0; 3237 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3181 la->il = 0; 3238 bf_set(lpfc_mbx_read_top_il, la, 0);
3182 la->pb = 0; 3239 bf_set(lpfc_mbx_read_top_pb, la, 0);
3183 la->fa = 0; 3240 bf_set(lpfc_mbx_read_top_fa, la, 0);
3184 la->mm = 0; 3241 bf_set(lpfc_mbx_read_top_mm, la, 0);
3242
3243 /* Invoke the lpfc_handle_latt mailbox command callback function */
3244 lpfc_mbx_cmpl_read_topology(phba, pmb);
3245
3246 return;
3185 3247
3248out_free_dmabuf:
3249 kfree(mp);
3250out_free_pmb:
3251 mempool_free(pmb, phba->mbox_mem_pool);
3252}
3253
3254/**
3255 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3256 * @phba: pointer to lpfc hba data structure.
3257 * @acqe_fc: pointer to the async fc completion queue entry.
3258 *
3259 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3260 * that the event was received and then issue a read_topology mailbox command so
3261 * that the rest of the driver will treat it the same as SLI3.
3262 **/
3263static void
3264lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3265{
3266 struct lpfc_dmabuf *mp;
3267 LPFC_MBOXQ_t *pmb;
3268 int rc;
3269
3270 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3271 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3272 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3273 "2895 Non FC link Event detected.(%d)\n",
3274 bf_get(lpfc_trailer_type, acqe_fc));
3275 return;
3276 }
3186 /* Keep the link status for extra SLI4 state machine reference */ 3277 /* Keep the link status for extra SLI4 state machine reference */
3187 phba->sli4_hba.link_state.speed = 3278 phba->sli4_hba.link_state.speed =
3188 bf_get(lpfc_acqe_link_speed, acqe_link); 3279 bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3189 phba->sli4_hba.link_state.duplex = 3280 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3190 bf_get(lpfc_acqe_link_duplex, acqe_link); 3281 phba->sli4_hba.link_state.topology =
3282 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3191 phba->sli4_hba.link_state.status = 3283 phba->sli4_hba.link_state.status =
3192 bf_get(lpfc_acqe_link_status, acqe_link); 3284 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3193 phba->sli4_hba.link_state.physical = 3285 phba->sli4_hba.link_state.type =
3194 bf_get(lpfc_acqe_link_physical, acqe_link); 3286 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3287 phba->sli4_hba.link_state.number =
3288 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3195 phba->sli4_hba.link_state.fault = 3289 phba->sli4_hba.link_state.fault =
3196 bf_get(lpfc_acqe_link_fault, acqe_link); 3290 bf_get(lpfc_acqe_link_fault, acqe_fc);
3197 phba->sli4_hba.link_state.logical_speed = 3291 phba->sli4_hba.link_state.logical_speed =
3198 bf_get(lpfc_acqe_qos_link_speed, acqe_link); 3292 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3293 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3294 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3295 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3296 "%dMbps Fault:%d\n",
3297 phba->sli4_hba.link_state.speed,
3298 phba->sli4_hba.link_state.topology,
3299 phba->sli4_hba.link_state.status,
3300 phba->sli4_hba.link_state.type,
3301 phba->sli4_hba.link_state.number,
3302 phba->sli4_hba.link_state.logical_speed * 10,
3303 phba->sli4_hba.link_state.fault);
3304 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3305 if (!pmb) {
3306 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3307 "2897 The mboxq allocation failed\n");
3308 return;
3309 }
3310 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3311 if (!mp) {
3312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3313 "2898 The lpfc_dmabuf allocation failed\n");
3314 goto out_free_pmb;
3315 }
3316 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3317 if (!mp->virt) {
3318 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3319 "2899 The mbuf allocation failed\n");
3320 goto out_free_dmabuf;
3321 }
3199 3322
3200 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3323 /* Cleanup any outstanding ELS commands */
3201 lpfc_mbx_cmpl_read_la(phba, pmb); 3324 lpfc_els_flush_all_cmd(phba);
3325
3326 /* Block ELS IOCBs until we have done process link event */
3327 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3328
3329 /* Update link event statistics */
3330 phba->sli.slistat.link_event++;
3331
3332 /* Create lpfc_handle_latt mailbox command from link ACQE */
3333 lpfc_read_topology(phba, pmb, mp);
3334 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3335 pmb->vport = phba->pport;
3202 3336
3337 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3338 if (rc == MBX_NOT_FINISHED)
3339 goto out_free_dmabuf;
3203 return; 3340 return;
3204 3341
3205out_free_dmabuf: 3342out_free_dmabuf:
@@ -3209,6 +3346,24 @@ out_free_pmb:
3209} 3346}
3210 3347
3211/** 3348/**
3349 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3350 * @phba: pointer to lpfc hba data structure.
3351 * @acqe_fc: pointer to the async SLI completion queue entry.
3352 *
3353 * This routine is to handle the SLI4 asynchronous SLI events.
3354 **/
3355static void
3356lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3357{
3358 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3359 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3360 "x%08x SLI Event Type:%d",
3361 acqe_sli->event_data1, acqe_sli->event_data2,
3362 bf_get(lpfc_trailer_type, acqe_sli));
3363 return;
3364}
3365
3366/**
3212 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3367 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3213 * @vport: pointer to vport data structure. 3368 * @vport: pointer to vport data structure.
3214 * 3369 *
@@ -3247,10 +3402,12 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3247 if (!ndlp) 3402 if (!ndlp)
3248 return 0; 3403 return 0;
3249 } 3404 }
3250 if (phba->pport->port_state < LPFC_FLOGI) 3405 if ((phba->pport->port_state < LPFC_FLOGI) &&
3406 (phba->pport->port_state != LPFC_VPORT_FAILED))
3251 return NULL; 3407 return NULL;
3252 /* If virtual link is not yet instantiated ignore CVL */ 3408 /* If virtual link is not yet instantiated ignore CVL */
3253 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)) 3409 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3410 && (vport->port_state != LPFC_VPORT_FAILED))
3254 return NULL; 3411 return NULL;
3255 shost = lpfc_shost_from_vport(vport); 3412 shost = lpfc_shost_from_vport(vport);
3256 if (!shost) 3413 if (!shost)
@@ -3285,17 +3442,17 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3285} 3442}
3286 3443
3287/** 3444/**
3288 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3445 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3289 * @phba: pointer to lpfc hba data structure. 3446 * @phba: pointer to lpfc hba data structure.
3290 * @acqe_link: pointer to the async fcoe completion queue entry. 3447 * @acqe_link: pointer to the async fcoe completion queue entry.
3291 * 3448 *
3292 * This routine is to handle the SLI4 asynchronous fcoe event. 3449 * This routine is to handle the SLI4 asynchronous fcoe event.
3293 **/ 3450 **/
3294static void 3451static void
3295lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 3452lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3296 struct lpfc_acqe_fcoe *acqe_fcoe) 3453 struct lpfc_acqe_fip *acqe_fip)
3297{ 3454{
3298 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3455 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3299 int rc; 3456 int rc;
3300 struct lpfc_vport *vport; 3457 struct lpfc_vport *vport;
3301 struct lpfc_nodelist *ndlp; 3458 struct lpfc_nodelist *ndlp;
@@ -3304,25 +3461,25 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3304 struct lpfc_vport **vports; 3461 struct lpfc_vport **vports;
3305 int i; 3462 int i;
3306 3463
3307 phba->fc_eventTag = acqe_fcoe->event_tag; 3464 phba->fc_eventTag = acqe_fip->event_tag;
3308 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3465 phba->fcoe_eventtag = acqe_fip->event_tag;
3309 switch (event_type) { 3466 switch (event_type) {
3310 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3467 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3311 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3468 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3312 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3469 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3313 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3470 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3314 LOG_DISCOVERY, 3471 LOG_DISCOVERY,
3315 "2546 New FCF event, evt_tag:x%x, " 3472 "2546 New FCF event, evt_tag:x%x, "
3316 "index:x%x\n", 3473 "index:x%x\n",
3317 acqe_fcoe->event_tag, 3474 acqe_fip->event_tag,
3318 acqe_fcoe->index); 3475 acqe_fip->index);
3319 else 3476 else
3320 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3477 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3321 LOG_DISCOVERY, 3478 LOG_DISCOVERY,
3322 "2788 FCF param modified event, " 3479 "2788 FCF param modified event, "
3323 "evt_tag:x%x, index:x%x\n", 3480 "evt_tag:x%x, index:x%x\n",
3324 acqe_fcoe->event_tag, 3481 acqe_fip->event_tag,
3325 acqe_fcoe->index); 3482 acqe_fip->index);
3326 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3483 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3327 /* 3484 /*
3328 * During period of FCF discovery, read the FCF 3485 * During period of FCF discovery, read the FCF
@@ -3333,8 +3490,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3333 LOG_DISCOVERY, 3490 LOG_DISCOVERY,
3334 "2779 Read FCF (x%x) for updating " 3491 "2779 Read FCF (x%x) for updating "
3335 "roundrobin FCF failover bmask\n", 3492 "roundrobin FCF failover bmask\n",
3336 acqe_fcoe->index); 3493 acqe_fip->index);
3337 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3494 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3338 } 3495 }
3339 3496
3340 /* If the FCF discovery is in progress, do nothing. */ 3497 /* If the FCF discovery is in progress, do nothing. */
@@ -3360,7 +3517,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3360 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3517 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3361 "2770 Start FCF table scan per async FCF " 3518 "2770 Start FCF table scan per async FCF "
3362 "event, evt_tag:x%x, index:x%x\n", 3519 "event, evt_tag:x%x, index:x%x\n",
3363 acqe_fcoe->event_tag, acqe_fcoe->index); 3520 acqe_fip->event_tag, acqe_fip->index);
3364 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3521 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3365 LPFC_FCOE_FCF_GET_FIRST); 3522 LPFC_FCOE_FCF_GET_FIRST);
3366 if (rc) 3523 if (rc)
@@ -3369,17 +3526,17 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3369 "command failed (x%x)\n", rc); 3526 "command failed (x%x)\n", rc);
3370 break; 3527 break;
3371 3528
3372 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3529 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3373 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3374 "2548 FCF Table full count 0x%x tag 0x%x\n", 3531 "2548 FCF Table full count 0x%x tag 0x%x\n",
3375 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 3532 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3376 acqe_fcoe->event_tag); 3533 acqe_fip->event_tag);
3377 break; 3534 break;
3378 3535
3379 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3536 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3380 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3537 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3381 "2549 FCF (x%x) disconnected from network, " 3538 "2549 FCF (x%x) disconnected from network, "
3382 "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3539 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3383 /* 3540 /*
3384 * If we are in the middle of FCF failover process, clear 3541 * If we are in the middle of FCF failover process, clear
3385 * the corresponding FCF bit in the roundrobin bitmap. 3542 * the corresponding FCF bit in the roundrobin bitmap.
@@ -3388,13 +3545,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3388 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3545 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3389 spin_unlock_irq(&phba->hbalock); 3546 spin_unlock_irq(&phba->hbalock);
3390 /* Update FLOGI FCF failover eligible FCF bmask */ 3547 /* Update FLOGI FCF failover eligible FCF bmask */
3391 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); 3548 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3392 break; 3549 break;
3393 } 3550 }
3394 spin_unlock_irq(&phba->hbalock); 3551 spin_unlock_irq(&phba->hbalock);
3395 3552
3396 /* If the event is not for currently used fcf do nothing */ 3553 /* If the event is not for currently used fcf do nothing */
3397 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3554 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3398 break; 3555 break;
3399 3556
3400 /* 3557 /*
@@ -3411,7 +3568,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3411 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3568 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3412 "2771 Start FCF fast failover process due to " 3569 "2771 Start FCF fast failover process due to "
3413 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3570 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3414 "\n", acqe_fcoe->event_tag, acqe_fcoe->index); 3571 "\n", acqe_fip->event_tag, acqe_fip->index);
3415 rc = lpfc_sli4_redisc_fcf_table(phba); 3572 rc = lpfc_sli4_redisc_fcf_table(phba);
3416 if (rc) { 3573 if (rc) {
3417 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3574 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3438,12 +3595,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3438 lpfc_sli4_perform_all_vport_cvl(phba); 3595 lpfc_sli4_perform_all_vport_cvl(phba);
3439 } 3596 }
3440 break; 3597 break;
3441 case LPFC_FCOE_EVENT_TYPE_CVL: 3598 case LPFC_FIP_EVENT_TYPE_CVL:
3442 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3599 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3443 "2718 Clear Virtual Link Received for VPI 0x%x" 3600 "2718 Clear Virtual Link Received for VPI 0x%x"
3444 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3601 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3445 vport = lpfc_find_vport_by_vpid(phba, 3602 vport = lpfc_find_vport_by_vpid(phba,
3446 acqe_fcoe->index - phba->vpi_base); 3603 acqe_fip->index - phba->vpi_base);
3447 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3604 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3448 if (!ndlp) 3605 if (!ndlp)
3449 break; 3606 break;
@@ -3494,7 +3651,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3494 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3651 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3495 LOG_DISCOVERY, 3652 LOG_DISCOVERY,
3496 "2773 Start FCF failover per CVL, " 3653 "2773 Start FCF failover per CVL, "
3497 "evt_tag:x%x\n", acqe_fcoe->event_tag); 3654 "evt_tag:x%x\n", acqe_fip->event_tag);
3498 rc = lpfc_sli4_redisc_fcf_table(phba); 3655 rc = lpfc_sli4_redisc_fcf_table(phba);
3499 if (rc) { 3656 if (rc) {
3500 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3657 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3522,7 +3679,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3522 default: 3679 default:
3523 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3680 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3524 "0288 Unknown FCoE event type 0x%x event tag " 3681 "0288 Unknown FCoE event type 0x%x event tag "
3525 "0x%x\n", event_type, acqe_fcoe->event_tag); 3682 "0x%x\n", event_type, acqe_fip->event_tag);
3526 break; 3683 break;
3527 } 3684 }
3528} 3685}
@@ -3599,8 +3756,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3599 &cq_event->cqe.acqe_link); 3756 &cq_event->cqe.acqe_link);
3600 break; 3757 break;
3601 case LPFC_TRAILER_CODE_FCOE: 3758 case LPFC_TRAILER_CODE_FCOE:
3602 lpfc_sli4_async_fcoe_evt(phba, 3759 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3603 &cq_event->cqe.acqe_fcoe);
3604 break; 3760 break;
3605 case LPFC_TRAILER_CODE_DCBX: 3761 case LPFC_TRAILER_CODE_DCBX:
3606 lpfc_sli4_async_dcbx_evt(phba, 3762 lpfc_sli4_async_dcbx_evt(phba,
@@ -3610,6 +3766,12 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3610 lpfc_sli4_async_grp5_evt(phba, 3766 lpfc_sli4_async_grp5_evt(phba,
3611 &cq_event->cqe.acqe_grp5); 3767 &cq_event->cqe.acqe_grp5);
3612 break; 3768 break;
3769 case LPFC_TRAILER_CODE_FC:
3770 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3771 break;
3772 case LPFC_TRAILER_CODE_SLI:
3773 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3774 break;
3613 default: 3775 default:
3614 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3776 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3615 "1804 Invalid asynchrous event code: " 3777 "1804 Invalid asynchrous event code: "
@@ -3948,7 +4110,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3948 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4110 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3949 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4111 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3950 struct lpfc_mqe *mqe; 4112 struct lpfc_mqe *mqe;
3951 int longs; 4113 int longs, sli_family;
3952 4114
3953 /* Before proceed, wait for POST done and device ready */ 4115 /* Before proceed, wait for POST done and device ready */
3954 rc = lpfc_sli4_post_status_check(phba); 4116 rc = lpfc_sli4_post_status_check(phba);
@@ -3963,6 +4125,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3963 init_timer(&phba->hb_tmofunc); 4125 init_timer(&phba->hb_tmofunc);
3964 phba->hb_tmofunc.function = lpfc_hb_timeout; 4126 phba->hb_tmofunc.function = lpfc_hb_timeout;
3965 phba->hb_tmofunc.data = (unsigned long)phba; 4127 phba->hb_tmofunc.data = (unsigned long)phba;
4128 init_timer(&phba->rrq_tmr);
4129 phba->rrq_tmr.function = lpfc_rrq_timeout;
4130 phba->rrq_tmr.data = (unsigned long)phba;
3966 4131
3967 psli = &phba->sli; 4132 psli = &phba->sli;
3968 /* MBOX heartbeat timer */ 4133 /* MBOX heartbeat timer */
@@ -4010,12 +4175,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4010 */ 4175 */
4011 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4176 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4012 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4177 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4013 /* Feature Level 1 hardware is limited to 2 pages */ 4178
4014 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == 4179 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4015 LPFC_SLI_INTF_FEATURELEVEL1_1)) 4180 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4016 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4181 switch (sli_family) {
4017 else 4182 case LPFC_SLI_INTF_FAMILY_BE2:
4018 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4183 case LPFC_SLI_INTF_FAMILY_BE3:
4184 /* There is a single hint for BE - 2 pages per BPL. */
4185 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4186 LPFC_SLI_INTF_SLI_HINT1_1)
4187 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4188 break;
4189 case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4190 case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4191 default:
4192 break;
4193 }
4019 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4194 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4020 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4195 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4021 dma_buf_size = dma_buf_size << 1) 4196 dma_buf_size = dma_buf_size << 1)
@@ -4070,6 +4245,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4070 if (rc) 4245 if (rc)
4071 return -ENOMEM; 4246 return -ENOMEM;
4072 4247
4248 /* IF Type 2 ports get initialized now. */
4249 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4250 LPFC_SLI_INTF_IF_TYPE_2) {
4251 rc = lpfc_pci_function_reset(phba);
4252 if (unlikely(rc))
4253 return -ENODEV;
4254 }
4255
4073 /* Create the bootstrap mailbox command */ 4256 /* Create the bootstrap mailbox command */
4074 rc = lpfc_create_bootstrap_mbox(phba); 4257 rc = lpfc_create_bootstrap_mbox(phba);
4075 if (unlikely(rc)) 4258 if (unlikely(rc))
@@ -4080,19 +4263,18 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4080 if (unlikely(rc)) 4263 if (unlikely(rc))
4081 goto out_free_bsmbx; 4264 goto out_free_bsmbx;
4082 4265
4083 rc = lpfc_sli4_fw_cfg_check(phba);
4084 if (unlikely(rc))
4085 goto out_free_bsmbx;
4086
4087 /* Set up the hba's configuration parameters. */ 4266 /* Set up the hba's configuration parameters. */
4088 rc = lpfc_sli4_read_config(phba); 4267 rc = lpfc_sli4_read_config(phba);
4089 if (unlikely(rc)) 4268 if (unlikely(rc))
4090 goto out_free_bsmbx; 4269 goto out_free_bsmbx;
4091 4270
4092 /* Perform a function reset */ 4271 /* IF Type 0 ports get initialized now. */
4093 rc = lpfc_pci_function_reset(phba); 4272 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4094 if (unlikely(rc)) 4273 LPFC_SLI_INTF_IF_TYPE_0) {
4095 goto out_free_bsmbx; 4274 rc = lpfc_pci_function_reset(phba);
4275 if (unlikely(rc))
4276 goto out_free_bsmbx;
4277 }
4096 4278
4097 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4279 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4098 GFP_KERNEL); 4280 GFP_KERNEL);
@@ -5190,97 +5372,183 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5190int 5372int
5191lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5373lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5192{ 5374{
5193 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; 5375 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5194 int i, port_error = -ENODEV; 5376 struct lpfc_register reg_data;
5377 int i, port_error = 0;
5378 uint32_t if_type;
5195 5379
5196 if (!phba->sli4_hba.STAregaddr) 5380 if (!phba->sli4_hba.PSMPHRregaddr)
5197 return -ENODEV; 5381 return -ENODEV;
5198 5382
5199 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5383 /* Wait up to 30 seconds for the SLI Port POST done and ready */
5200 for (i = 0; i < 3000; i++) { 5384 for (i = 0; i < 3000; i++) {
5201 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 5385 portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr);
5202 /* Encounter fatal POST error, break out */ 5386 if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) {
5203 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 5387 /* Port has a fatal POST error, break out */
5204 port_error = -ENODEV; 5388 port_error = -ENODEV;
5205 break; 5389 break;
5206 } 5390 }
5207 if (LPFC_POST_STAGE_ARMFW_READY == 5391 if (LPFC_POST_STAGE_PORT_READY ==
5208 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 5392 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5209 port_error = 0;
5210 break; 5393 break;
5211 }
5212 msleep(10); 5394 msleep(10);
5213 } 5395 }
5214 5396
5215 if (port_error) 5397 /*
5398 * If there was a port error during POST, then don't proceed with
5399 * other register reads as the data may not be valid. Just exit.
5400 */
5401 if (port_error) {
5216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5217 "1408 Failure HBA POST Status: sta_reg=0x%x, " 5403 "1408 Port Failed POST - portsmphr=0x%x, "
5218 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 5404 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5219 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 5405 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5220 bf_get(lpfc_hst_state_perr, &sta_reg), 5406 portsmphr_reg.word0,
5221 bf_get(lpfc_hst_state_sfi, &sta_reg), 5407 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5222 bf_get(lpfc_hst_state_nip, &sta_reg), 5408 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5223 bf_get(lpfc_hst_state_ipc, &sta_reg), 5409 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5224 bf_get(lpfc_hst_state_xrom, &sta_reg), 5410 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5225 bf_get(lpfc_hst_state_dl, &sta_reg), 5411 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5226 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5412 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5227 5413 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5228 /* Log device information */ 5414 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5229 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); 5415 } else {
5230 if (bf_get(lpfc_sli_intf_valid,
5231 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5232 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5416 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5233 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5417 "2534 Device Info: SLIFamily=0x%x, "
5234 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5418 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5419 "SLIHint_2=0x%x, FT=0x%x\n",
5235 bf_get(lpfc_sli_intf_sli_family, 5420 bf_get(lpfc_sli_intf_sli_family,
5236 &phba->sli4_hba.sli_intf), 5421 &phba->sli4_hba.sli_intf),
5237 bf_get(lpfc_sli_intf_slirev, 5422 bf_get(lpfc_sli_intf_slirev,
5238 &phba->sli4_hba.sli_intf), 5423 &phba->sli4_hba.sli_intf),
5239 bf_get(lpfc_sli_intf_featurelevel1, 5424 bf_get(lpfc_sli_intf_if_type,
5425 &phba->sli4_hba.sli_intf),
5426 bf_get(lpfc_sli_intf_sli_hint1,
5240 &phba->sli4_hba.sli_intf), 5427 &phba->sli4_hba.sli_intf),
5241 bf_get(lpfc_sli_intf_featurelevel2, 5428 bf_get(lpfc_sli_intf_sli_hint2,
5429 &phba->sli4_hba.sli_intf),
5430 bf_get(lpfc_sli_intf_func_type,
5242 &phba->sli4_hba.sli_intf)); 5431 &phba->sli4_hba.sli_intf));
5432 /*
5433 * Check for other Port errors during the initialization
5434 * process. Fail the load if the port did not come up
5435 * correctly.
5436 */
5437 if_type = bf_get(lpfc_sli_intf_if_type,
5438 &phba->sli4_hba.sli_intf);
5439 switch (if_type) {
5440 case LPFC_SLI_INTF_IF_TYPE_0:
5441 phba->sli4_hba.ue_mask_lo =
5442 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5443 phba->sli4_hba.ue_mask_hi =
5444 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5445 uerrlo_reg.word0 =
5446 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5447 uerrhi_reg.word0 =
5448 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5449 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5450 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5452 "1422 Unrecoverable Error "
5453 "Detected during POST "
5454 "uerr_lo_reg=0x%x, "
5455 "uerr_hi_reg=0x%x, "
5456 "ue_mask_lo_reg=0x%x, "
5457 "ue_mask_hi_reg=0x%x\n",
5458 uerrlo_reg.word0,
5459 uerrhi_reg.word0,
5460 phba->sli4_hba.ue_mask_lo,
5461 phba->sli4_hba.ue_mask_hi);
5462 port_error = -ENODEV;
5463 }
5464 break;
5465 case LPFC_SLI_INTF_IF_TYPE_2:
5466 /* Final checks. The port status should be clean. */
5467 reg_data.word0 =
5468 readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
5469 if (bf_get(lpfc_sliport_status_err, &reg_data)) {
5470 phba->work_status[0] =
5471 readl(phba->sli4_hba.u.if_type2.
5472 ERR1regaddr);
5473 phba->work_status[1] =
5474 readl(phba->sli4_hba.u.if_type2.
5475 ERR2regaddr);
5476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5477 "2888 Port Error Detected "
5478 "during POST: "
5479 "port status reg 0x%x, "
5480 "port_smphr reg 0x%x, "
5481 "error 1=0x%x, error 2=0x%x\n",
5482 reg_data.word0,
5483 portsmphr_reg.word0,
5484 phba->work_status[0],
5485 phba->work_status[1]);
5486 port_error = -ENODEV;
5487 }
5488 break;
5489 case LPFC_SLI_INTF_IF_TYPE_1:
5490 default:
5491 break;
5492 }
5243 } 5493 }
5244 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5245 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5246 /* With uncoverable error, log the error message and return error */
5247 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5248 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5249 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5250 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5252 "1422 HBA Unrecoverable error: "
5253 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5254 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5255 uerrlo_reg.word0, uerrhi_reg.word0,
5256 phba->sli4_hba.ue_mask_lo,
5257 phba->sli4_hba.ue_mask_hi);
5258 return -ENODEV;
5259 }
5260
5261 return port_error; 5494 return port_error;
5262} 5495}
5263 5496
5264/** 5497/**
5265 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5498 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5266 * @phba: pointer to lpfc hba data structure. 5499 * @phba: pointer to lpfc hba data structure.
5500 * @if_type: The SLI4 interface type getting configured.
5267 * 5501 *
5268 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5502 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5269 * memory map. 5503 * memory map.
5270 **/ 5504 **/
5271static void 5505static void
5272lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 5506lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5273{ 5507{
5274 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5508 switch (if_type) {
5275 LPFC_UERR_STATUS_LO; 5509 case LPFC_SLI_INTF_IF_TYPE_0:
5276 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5510 phba->sli4_hba.u.if_type0.UERRLOregaddr =
5277 LPFC_UERR_STATUS_HI; 5511 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5278 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5512 phba->sli4_hba.u.if_type0.UERRHIregaddr =
5279 LPFC_UE_MASK_LO; 5513 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5280 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5514 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5281 LPFC_UE_MASK_HI; 5515 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5282 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + 5516 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5283 LPFC_SLI_INTF; 5517 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5518 phba->sli4_hba.SLIINTFregaddr =
5519 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5520 break;
5521 case LPFC_SLI_INTF_IF_TYPE_2:
5522 phba->sli4_hba.u.if_type2.ERR1regaddr =
5523 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
5524 phba->sli4_hba.u.if_type2.ERR2regaddr =
5525 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
5526 phba->sli4_hba.u.if_type2.CTRLregaddr =
5527 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
5528 phba->sli4_hba.u.if_type2.STATUSregaddr =
5529 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
5530 phba->sli4_hba.SLIINTFregaddr =
5531 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5532 phba->sli4_hba.PSMPHRregaddr =
5533 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
5534 phba->sli4_hba.RQDBregaddr =
5535 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5536 phba->sli4_hba.WQDBregaddr =
5537 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5538 phba->sli4_hba.EQCQDBregaddr =
5539 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5540 phba->sli4_hba.MQDBregaddr =
5541 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5542 phba->sli4_hba.BMBXregaddr =
5543 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5544 break;
5545 case LPFC_SLI_INTF_IF_TYPE_1:
5546 default:
5547 dev_printk(KERN_ERR, &phba->pcidev->dev,
5548 "FATAL - unsupported SLI4 interface type - %d\n",
5549 if_type);
5550 break;
5551 }
5284} 5552}
5285 5553
5286/** 5554/**
@@ -5293,16 +5561,14 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5293static void 5561static void
5294lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5562lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5295{ 5563{
5296 5564 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5297 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5565 LPFC_SLIPORT_IF0_SMPHR;
5298 LPFC_HST_STATE;
5299 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5566 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5300 LPFC_HST_ISR0; 5567 LPFC_HST_ISR0;
5301 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5568 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5302 LPFC_HST_IMR0; 5569 LPFC_HST_IMR0;
5303 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5570 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5304 LPFC_HST_ISCR0; 5571 LPFC_HST_ISCR0;
5305 return;
5306} 5572}
5307 5573
5308/** 5574/**
@@ -5542,11 +5808,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5542} 5808}
5543 5809
5544/** 5810/**
5545 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 5811 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
5546 * @phba: pointer to lpfc hba data structure. 5812 * @phba: pointer to lpfc hba data structure.
5547 * 5813 *
5548 * This routine is invoked to setup the host-side endian order to the 5814 * This routine is invoked to setup the port-side endian order when
5549 * HBA consistent with the SLI-4 interface spec. 5815 * the port if_type is 0. This routine has no function for other
5816 * if_types.
5550 * 5817 *
5551 * Return codes 5818 * Return codes
5552 * 0 - successful 5819 * 0 - successful
@@ -5557,34 +5824,44 @@ static int
5557lpfc_setup_endian_order(struct lpfc_hba *phba) 5824lpfc_setup_endian_order(struct lpfc_hba *phba)
5558{ 5825{
5559 LPFC_MBOXQ_t *mboxq; 5826 LPFC_MBOXQ_t *mboxq;
5560 uint32_t rc = 0; 5827 uint32_t if_type, rc = 0;
5561 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5828 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5562 HOST_ENDIAN_HIGH_WORD1}; 5829 HOST_ENDIAN_HIGH_WORD1};
5563 5830
5564 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5831 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
5565 if (!mboxq) { 5832 switch (if_type) {
5566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5833 case LPFC_SLI_INTF_IF_TYPE_0:
5567 "0492 Unable to allocate memory for issuing " 5834 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5568 "SLI_CONFIG_SPECIAL mailbox command\n"); 5835 GFP_KERNEL);
5569 return -ENOMEM; 5836 if (!mboxq) {
5570 } 5837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5838 "0492 Unable to allocate memory for "
5839 "issuing SLI_CONFIG_SPECIAL mailbox "
5840 "command\n");
5841 return -ENOMEM;
5842 }
5571 5843
5572 /* 5844 /*
5573 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 5845 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
5574 * words to contain special data values and no other data. 5846 * two words to contain special data values and no other data.
5575 */ 5847 */
5576 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5848 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5577 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5849 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5578 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5850 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5579 if (rc != MBX_SUCCESS) { 5851 if (rc != MBX_SUCCESS) {
5580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5581 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 5853 "0493 SLI_CONFIG_SPECIAL mailbox "
5582 "status x%x\n", 5854 "failed with status x%x\n",
5583 rc); 5855 rc);
5584 rc = -EIO; 5856 rc = -EIO;
5857 }
5858 mempool_free(mboxq, phba->mbox_mem_pool);
5859 break;
5860 case LPFC_SLI_INTF_IF_TYPE_2:
5861 case LPFC_SLI_INTF_IF_TYPE_1:
5862 default:
5863 break;
5585 } 5864 }
5586
5587 mempool_free(mboxq, phba->mbox_mem_pool);
5588 return rc; 5865 return rc;
5589} 5866}
5590 5867
@@ -6416,36 +6693,124 @@ int
6416lpfc_pci_function_reset(struct lpfc_hba *phba) 6693lpfc_pci_function_reset(struct lpfc_hba *phba)
6417{ 6694{
6418 LPFC_MBOXQ_t *mboxq; 6695 LPFC_MBOXQ_t *mboxq;
6419 uint32_t rc = 0; 6696 uint32_t rc = 0, if_type;
6420 uint32_t shdr_status, shdr_add_status; 6697 uint32_t shdr_status, shdr_add_status;
6698 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
6421 union lpfc_sli4_cfg_shdr *shdr; 6699 union lpfc_sli4_cfg_shdr *shdr;
6700 struct lpfc_register reg_data;
6422 6701
6423 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6702 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6424 if (!mboxq) { 6703 switch (if_type) {
6425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6704 case LPFC_SLI_INTF_IF_TYPE_0:
6426 "0494 Unable to allocate memory for issuing " 6705 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6427 "SLI_FUNCTION_RESET mailbox command\n"); 6706 GFP_KERNEL);
6428 return -ENOMEM; 6707 if (!mboxq) {
6429 } 6708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6709 "0494 Unable to allocate memory for "
6710 "issuing SLI_FUNCTION_RESET mailbox "
6711 "command\n");
6712 return -ENOMEM;
6713 }
6430 6714
6431 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 6715 /* Setup PCI function reset mailbox-ioctl command */
6432 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6716 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6433 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6717 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6434 LPFC_SLI4_MBX_EMBED); 6718 LPFC_SLI4_MBX_EMBED);
6435 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6719 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6436 shdr = (union lpfc_sli4_cfg_shdr *) 6720 shdr = (union lpfc_sli4_cfg_shdr *)
6437 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6721 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6438 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6722 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6439 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6723 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6440 if (rc != MBX_TIMEOUT) 6724 &shdr->response);
6441 mempool_free(mboxq, phba->mbox_mem_pool); 6725 if (rc != MBX_TIMEOUT)
6442 if (shdr_status || shdr_add_status || rc) { 6726 mempool_free(mboxq, phba->mbox_mem_pool);
6443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6727 if (shdr_status || shdr_add_status || rc) {
6444 "0495 SLI_FUNCTION_RESET mailbox failed with " 6728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6445 "status x%x add_status x%x, mbx status x%x\n", 6729 "0495 SLI_FUNCTION_RESET mailbox "
6446 shdr_status, shdr_add_status, rc); 6730 "failed with status x%x add_status x%x,"
6447 rc = -ENXIO; 6731 " mbx status x%x\n",
6732 shdr_status, shdr_add_status, rc);
6733 rc = -ENXIO;
6734 }
6735 break;
6736 case LPFC_SLI_INTF_IF_TYPE_2:
6737 for (num_resets = 0;
6738 num_resets < MAX_IF_TYPE_2_RESETS;
6739 num_resets++) {
6740 reg_data.word0 = 0;
6741 bf_set(lpfc_sliport_ctrl_end, &reg_data,
6742 LPFC_SLIPORT_LITTLE_ENDIAN);
6743 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
6744 LPFC_SLIPORT_INIT_PORT);
6745 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
6746 CTRLregaddr);
6747
6748 /*
6749 * Poll the Port Status Register and wait for RDY for
6750 * up to 10 seconds. If the port doesn't respond, treat
6751 * it as an error. If the port responds with RN, start
6752 * the loop again.
6753 */
6754 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
6755 reg_data.word0 =
6756 readl(phba->sli4_hba.u.if_type2.
6757 STATUSregaddr);
6758 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
6759 break;
6760 if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
6761 reset_again++;
6762 break;
6763 }
6764 msleep(10);
6765 }
6766
6767 /*
6768 * If the port responds to the init request with
6769 * reset needed, delay for a bit and restart the loop.
6770 */
6771 if (reset_again) {
6772 msleep(10);
6773 reset_again = 0;
6774 continue;
6775 }
6776
6777 /* Detect any port errors. */
6778 reg_data.word0 = readl(phba->sli4_hba.u.if_type2.
6779 STATUSregaddr);
6780 if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
6781 (rdy_chk >= 1000)) {
6782 phba->work_status[0] = readl(
6783 phba->sli4_hba.u.if_type2.ERR1regaddr);
6784 phba->work_status[1] = readl(
6785 phba->sli4_hba.u.if_type2.ERR2regaddr);
6786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6787 "2890 Port Error Detected "
6788 "during Port Reset: "
6789 "port status reg 0x%x, "
6790 "error 1=0x%x, error 2=0x%x\n",
6791 reg_data.word0,
6792 phba->work_status[0],
6793 phba->work_status[1]);
6794 rc = -ENODEV;
6795 }
6796
6797 /*
6798 * Terminate the outer loop provided the Port indicated
6799 * ready within 10 seconds.
6800 */
6801 if (rdy_chk < 1000)
6802 break;
6803 }
6804 break;
6805 case LPFC_SLI_INTF_IF_TYPE_1:
6806 default:
6807 break;
6448 } 6808 }
6809
6810 /* Catch the not-ready port failure after a port reset. */
6811 if (num_resets >= MAX_IF_TYPE_2_RESETS)
6812 rc = -ENODEV;
6813
6449 return rc; 6814 return rc;
6450} 6815}
6451 6816
@@ -6536,6 +6901,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6536 struct pci_dev *pdev; 6901 struct pci_dev *pdev;
6537 unsigned long bar0map_len, bar1map_len, bar2map_len; 6902 unsigned long bar0map_len, bar1map_len, bar2map_len;
6538 int error = -ENODEV; 6903 int error = -ENODEV;
6904 uint32_t if_type;
6539 6905
6540 /* Obtain PCI device reference */ 6906 /* Obtain PCI device reference */
6541 if (!phba->pcidev) 6907 if (!phba->pcidev)
@@ -6552,61 +6918,105 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6552 } 6918 }
6553 } 6919 }
6554 6920
6555 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6921 /*
6556 * number of bytes required by each mapping. They are actually 6922 * The BARs and register set definitions and offset locations are
6557 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. 6923 * dependent on the if_type.
6924 */
6925 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
6926 &phba->sli4_hba.sli_intf.word0)) {
6927 return error;
6928 }
6929
6930 /* There is no SLI3 failback for SLI4 devices. */
6931 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
6932 LPFC_SLI_INTF_VALID) {
6933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6934 "2894 SLI_INTF reg contents invalid "
6935 "sli_intf reg 0x%x\n",
6936 phba->sli4_hba.sli_intf.word0);
6937 return error;
6938 }
6939
6940 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6941 /*
6942 * Get the bus address of SLI4 device Bar regions and the
6943 * number of bytes required by each mapping. The mapping of the
6944 * particular PCI BARs regions is dependent on the type of
6945 * SLI4 device.
6558 */ 6946 */
6559 if (pci_resource_start(pdev, 0)) { 6947 if (pci_resource_start(pdev, 0)) {
6560 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6948 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6561 bar0map_len = pci_resource_len(pdev, 0); 6949 bar0map_len = pci_resource_len(pdev, 0);
6950
6951 /*
6952 * Map SLI4 PCI Config Space Register base to a kernel virtual
6953 * addr
6954 */
6955 phba->sli4_hba.conf_regs_memmap_p =
6956 ioremap(phba->pci_bar0_map, bar0map_len);
6957 if (!phba->sli4_hba.conf_regs_memmap_p) {
6958 dev_printk(KERN_ERR, &pdev->dev,
6959 "ioremap failed for SLI4 PCI config "
6960 "registers.\n");
6961 goto out;
6962 }
6963 /* Set up BAR0 PCI config space register memory map */
6964 lpfc_sli4_bar0_register_memmap(phba, if_type);
6562 } else { 6965 } else {
6563 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6966 phba->pci_bar0_map = pci_resource_start(pdev, 1);
6564 bar0map_len = pci_resource_len(pdev, 1); 6967 bar0map_len = pci_resource_len(pdev, 1);
6565 } 6968 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
6566 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6969 dev_printk(KERN_ERR, &pdev->dev,
6567 bar1map_len = pci_resource_len(pdev, 2); 6970 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
6568 6971 goto out;
6569 phba->pci_bar2_map = pci_resource_start(pdev, 4); 6972 }
6570 bar2map_len = pci_resource_len(pdev, 4); 6973 phba->sli4_hba.conf_regs_memmap_p =
6571
6572 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6573 phba->sli4_hba.conf_regs_memmap_p =
6574 ioremap(phba->pci_bar0_map, bar0map_len); 6974 ioremap(phba->pci_bar0_map, bar0map_len);
6575 if (!phba->sli4_hba.conf_regs_memmap_p) { 6975 if (!phba->sli4_hba.conf_regs_memmap_p) {
6576 dev_printk(KERN_ERR, &pdev->dev, 6976 dev_printk(KERN_ERR, &pdev->dev,
6577 "ioremap failed for SLI4 PCI config registers.\n"); 6977 "ioremap failed for SLI4 PCI config "
6578 goto out; 6978 "registers.\n");
6979 goto out;
6980 }
6981 lpfc_sli4_bar0_register_memmap(phba, if_type);
6579 } 6982 }
6580 6983
6581 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 6984 if (pci_resource_start(pdev, 2)) {
6582 phba->sli4_hba.ctrl_regs_memmap_p = 6985 /*
6986 * Map SLI4 if type 0 HBA Control Register base to a kernel
6987 * virtual address and setup the registers.
6988 */
6989 phba->pci_bar1_map = pci_resource_start(pdev, 2);
6990 bar1map_len = pci_resource_len(pdev, 2);
6991 phba->sli4_hba.ctrl_regs_memmap_p =
6583 ioremap(phba->pci_bar1_map, bar1map_len); 6992 ioremap(phba->pci_bar1_map, bar1map_len);
6584 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6993 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6585 dev_printk(KERN_ERR, &pdev->dev, 6994 dev_printk(KERN_ERR, &pdev->dev,
6586 "ioremap failed for SLI4 HBA control registers.\n"); 6995 "ioremap failed for SLI4 HBA control registers.\n");
6587 goto out_iounmap_conf; 6996 goto out_iounmap_conf;
6997 }
6998 lpfc_sli4_bar1_register_memmap(phba);
6588 } 6999 }
6589 7000
6590 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 7001 if (pci_resource_start(pdev, 4)) {
6591 phba->sli4_hba.drbl_regs_memmap_p = 7002 /*
7003 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7004 * virtual address and setup the registers.
7005 */
7006 phba->pci_bar2_map = pci_resource_start(pdev, 4);
7007 bar2map_len = pci_resource_len(pdev, 4);
7008 phba->sli4_hba.drbl_regs_memmap_p =
6592 ioremap(phba->pci_bar2_map, bar2map_len); 7009 ioremap(phba->pci_bar2_map, bar2map_len);
6593 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7010 if (!phba->sli4_hba.drbl_regs_memmap_p) {
6594 dev_printk(KERN_ERR, &pdev->dev, 7011 dev_printk(KERN_ERR, &pdev->dev,
6595 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7012 "ioremap failed for SLI4 HBA doorbell registers.\n");
6596 goto out_iounmap_ctrl; 7013 goto out_iounmap_ctrl;
7014 }
7015 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7016 if (error)
7017 goto out_iounmap_all;
6597 } 7018 }
6598 7019
6599 /* Set up BAR0 PCI config space register memory map */
6600 lpfc_sli4_bar0_register_memmap(phba);
6601
6602 /* Set up BAR1 register memory map */
6603 lpfc_sli4_bar1_register_memmap(phba);
6604
6605 /* Set up BAR2 register memory map */
6606 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6607 if (error)
6608 goto out_iounmap_all;
6609
6610 return 0; 7020 return 0;
6611 7021
6612out_iounmap_all: 7022out_iounmap_all:
@@ -8149,6 +8559,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8149 goto out_unset_driver_resource_s4; 8559 goto out_unset_driver_resource_s4;
8150 } 8560 }
8151 8561
8562 INIT_LIST_HEAD(&phba->active_rrq_list);
8563
8152 /* Set up common device driver resources */ 8564 /* Set up common device driver resources */
8153 error = lpfc_setup_driver_resource_phase2(phba); 8565 error = lpfc_setup_driver_resource_phase2(phba);
8154 if (error) { 8566 if (error) {
@@ -8218,7 +8630,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8218 "0451 Configure interrupt mode (%d) " 8630 "0451 Configure interrupt mode (%d) "
8219 "failed active interrupt test.\n", 8631 "failed active interrupt test.\n",
8220 intr_mode); 8632 intr_mode);
8221 /* Unset the preivous SLI-4 HBA setup */ 8633 /* Unset the previous SLI-4 HBA setup. */
8634 /*
8635 * TODO: Is this operation compatible with IF TYPE 2
8636 * devices? All port state is deleted and cleared.
8637 */
8222 lpfc_sli4_unset_hba(phba); 8638 lpfc_sli4_unset_hba(phba);
8223 /* Try next level of interrupt mode */ 8639 /* Try next level of interrupt mode */
8224 cfg_mode = --intr_mode; 8640 cfg_mode = --intr_mode;
@@ -8990,6 +9406,10 @@ static struct pci_device_id lpfc_id_table[] = {
8990 PCI_ANY_ID, PCI_ANY_ID, }, 9406 PCI_ANY_ID, PCI_ANY_ID, },
8991 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9407 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
8992 PCI_ANY_ID, PCI_ANY_ID, }, 9408 PCI_ANY_ID, PCI_ANY_ID, },
9409 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
9410 PCI_ANY_ID, PCI_ANY_ID, },
9411 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9412 PCI_ANY_ID, PCI_ANY_ID, },
8993 { 0 } 9413 { 0 }
8994}; 9414};
8995 9415
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index bb59e9273126..e3b790e59156 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -33,7 +33,7 @@
33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
34#define LOG_LIBDFC 0x00002000 /* Libdfc events */ 34#define LOG_LIBDFC 0x00002000 /* Libdfc events */
35#define LOG_VPORT 0x00004000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOF_SECURITY 0x00008000 /* Security events */ 36#define LOG_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */ 38#define LOG_FIP 0x00020000 /* FIP events */
39#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 39#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 62d0957e1d4c..23403c650207 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -263,18 +263,19 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
263} 263}
264 264
265/** 265/**
266 * lpfc_read_la - Prepare a mailbox command for reading HBA link attention 266 * lpfc_read_topology - Prepare a mailbox command for reading HBA topology
267 * @phba: pointer to lpfc hba data structure. 267 * @phba: pointer to lpfc hba data structure.
268 * @pmb: pointer to the driver internal queue element for mailbox command. 268 * @pmb: pointer to the driver internal queue element for mailbox command.
269 * @mp: DMA buffer memory for reading the link attention information into. 269 * @mp: DMA buffer memory for reading the link attention information into.
270 * 270 *
271 * The read link attention mailbox command is issued to read the Link Event 271 * The read topology mailbox command is issued to read the link topology
272 * Attention information indicated by the HBA port when the Link Event bit 272 * information indicated by the HBA port when the Link Event bit of the Host
273 * of the Host Attention (HSTATT) register is set to 1. A Link Event 273 * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link
274 * Attention ACQE is received from the port (For SLI-4). A Link Event
274 * Attention occurs based on an exception detected at the Fibre Channel link 275 * Attention occurs based on an exception detected at the Fibre Channel link
275 * interface. 276 * interface.
276 * 277 *
277 * This routine prepares the mailbox command for reading HBA link attention 278 * This routine prepares the mailbox command for reading HBA link topology
278 * information. A DMA memory has been set aside and address passed to the 279 * information. A DMA memory has been set aside and address passed to the
279 * HBA through @mp for the HBA to DMA link attention information into the 280 * HBA through @mp for the HBA to DMA link attention information into the
280 * memory as part of the execution of the mailbox command. 281 * memory as part of the execution of the mailbox command.
@@ -283,7 +284,8 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
283 * 0 - Success (currently always return 0) 284 * 0 - Success (currently always return 0)
284 **/ 285 **/
285int 286int
286lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) 287lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
288 struct lpfc_dmabuf *mp)
287{ 289{
288 MAILBOX_t *mb; 290 MAILBOX_t *mb;
289 struct lpfc_sli *psli; 291 struct lpfc_sli *psli;
@@ -293,15 +295,15 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
293 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 295 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
294 296
295 INIT_LIST_HEAD(&mp->list); 297 INIT_LIST_HEAD(&mp->list);
296 mb->mbxCommand = MBX_READ_LA64; 298 mb->mbxCommand = MBX_READ_TOPOLOGY;
297 mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128; 299 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
298 mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys); 300 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
299 mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys); 301 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
300 302
301 /* Save address for later completion and set the owner to host so that 303 /* Save address for later completion and set the owner to host so that
302 * the FW knows this mailbox is available for processing. 304 * the FW knows this mailbox is available for processing.
303 */ 305 */
304 pmb->context1 = (uint8_t *) mp; 306 pmb->context1 = (uint8_t *)mp;
305 mb->mbxOwner = OWN_HOST; 307 mb->mbxOwner = OWN_HOST;
306 return (0); 308 return (0);
307} 309}
@@ -516,18 +518,33 @@ lpfc_init_link(struct lpfc_hba * phba,
516 vpd = &phba->vpd; 518 vpd = &phba->vpd;
517 if (vpd->rev.feaLevelHigh >= 0x02){ 519 if (vpd->rev.feaLevelHigh >= 0x02){
518 switch(linkspeed){ 520 switch(linkspeed){
519 case LINK_SPEED_1G: 521 case LPFC_USER_LINK_SPEED_1G:
520 case LINK_SPEED_2G: 522 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
521 case LINK_SPEED_4G: 523 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
522 case LINK_SPEED_8G: 524 break;
523 mb->un.varInitLnk.link_flags |= 525 case LPFC_USER_LINK_SPEED_2G:
524 FLAGS_LINK_SPEED; 526 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
525 mb->un.varInitLnk.link_speed = linkspeed; 527 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
528 break;
529 case LPFC_USER_LINK_SPEED_4G:
530 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
531 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
532 break;
533 case LPFC_USER_LINK_SPEED_8G:
534 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
535 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
536 break;
537 case LPFC_USER_LINK_SPEED_10G:
538 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
539 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
526 break; 540 break;
527 case LINK_SPEED_AUTO: 541 case LPFC_USER_LINK_SPEED_16G:
528 default: 542 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
529 mb->un.varInitLnk.link_speed = 543 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
530 LINK_SPEED_AUTO; 544 break;
545 case LPFC_USER_LINK_SPEED_AUTO:
546 default:
547 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
531 break; 548 break;
532 } 549 }
533 550
@@ -693,7 +710,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
693 * @did: remote port identifier. 710 * @did: remote port identifier.
694 * @param: pointer to memory holding the server parameters. 711 * @param: pointer to memory holding the server parameters.
695 * @pmb: pointer to the driver internal queue element for mailbox command. 712 * @pmb: pointer to the driver internal queue element for mailbox command.
696 * @flag: action flag to be passed back for the complete function. 713 * @rpi: the rpi to use in the registration (usually only used for SLI4.
697 * 714 *
698 * The registration login mailbox command is used to register an N_Port or 715 * The registration login mailbox command is used to register an N_Port or
699 * F_Port login. This registration allows the HBA to cache the remote N_Port 716 * F_Port login. This registration allows the HBA to cache the remote N_Port
@@ -712,7 +729,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
712 **/ 729 **/
713int 730int
714lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 731lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
715 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 732 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
716{ 733{
717 MAILBOX_t *mb = &pmb->u.mb; 734 MAILBOX_t *mb = &pmb->u.mb;
718 uint8_t *sparam; 735 uint8_t *sparam;
@@ -722,17 +739,13 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
722 739
723 mb->un.varRegLogin.rpi = 0; 740 mb->un.varRegLogin.rpi = 0;
724 if (phba->sli_rev == LPFC_SLI_REV4) { 741 if (phba->sli_rev == LPFC_SLI_REV4) {
725 mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba); 742 mb->un.varRegLogin.rpi = rpi;
726 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) 743 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
727 return 1; 744 return 1;
728 } 745 }
729
730 mb->un.varRegLogin.vpi = vpi + phba->vpi_base; 746 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
731 mb->un.varRegLogin.did = did; 747 mb->un.varRegLogin.did = did;
732 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
733
734 mb->mbxOwner = OWN_HOST; 748 mb->mbxOwner = OWN_HOST;
735
736 /* Get a buffer to hold NPorts Service Parameters */ 749 /* Get a buffer to hold NPorts Service Parameters */
737 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 750 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
738 if (mp) 751 if (mp)
@@ -743,7 +756,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
743 /* REG_LOGIN: no buffers */ 756 /* REG_LOGIN: no buffers */
744 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
745 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " 758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
746 "flag x%x\n", vpi, did, flag); 759 "rpi x%x\n", vpi, did, rpi);
747 return (1); 760 return (1);
748 } 761 }
749 INIT_LIST_HEAD(&mp->list); 762 INIT_LIST_HEAD(&mp->list);
@@ -1918,11 +1931,14 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1918 struct lpfc_mbx_init_vfi *init_vfi; 1931 struct lpfc_mbx_init_vfi *init_vfi;
1919 1932
1920 memset(mbox, 0, sizeof(*mbox)); 1933 memset(mbox, 0, sizeof(*mbox));
1934 mbox->vport = vport;
1921 init_vfi = &mbox->u.mqe.un.init_vfi; 1935 init_vfi = &mbox->u.mqe.un.init_vfi;
1922 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); 1936 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1923 bf_set(lpfc_init_vfi_vr, init_vfi, 1); 1937 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1924 bf_set(lpfc_init_vfi_vt, init_vfi, 1); 1938 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1939 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
1925 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); 1940 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1941 bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
1926 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); 1942 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1927} 1943}
1928 1944
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 8f879e477e9d..cbb48ee8b0bb 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -113,11 +113,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
113 goto fail_free_mbox_pool; 113 goto fail_free_mbox_pool;
114 114
115 if (phba->sli_rev == LPFC_SLI_REV4) { 115 if (phba->sli_rev == LPFC_SLI_REV4) {
116 phba->rrq_pool =
117 mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
118 sizeof(struct lpfc_node_rrq));
119 if (!phba->rrq_pool)
120 goto fail_free_nlp_mem_pool;
116 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", 121 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
117 phba->pcidev, 122 phba->pcidev,
118 LPFC_HDR_BUF_SIZE, align, 0); 123 LPFC_HDR_BUF_SIZE, align, 0);
119 if (!phba->lpfc_hrb_pool) 124 if (!phba->lpfc_hrb_pool)
120 goto fail_free_nlp_mem_pool; 125 goto fail_free_rrq_mem_pool;
121 126
122 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", 127 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
123 phba->pcidev, 128 phba->pcidev,
@@ -147,6 +152,9 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
147 fail_free_hrb_pool: 152 fail_free_hrb_pool:
148 pci_pool_destroy(phba->lpfc_hrb_pool); 153 pci_pool_destroy(phba->lpfc_hrb_pool);
149 phba->lpfc_hrb_pool = NULL; 154 phba->lpfc_hrb_pool = NULL;
155 fail_free_rrq_mem_pool:
156 mempool_destroy(phba->rrq_pool);
157 phba->rrq_pool = NULL;
150 fail_free_nlp_mem_pool: 158 fail_free_nlp_mem_pool:
151 mempool_destroy(phba->nlp_mem_pool); 159 mempool_destroy(phba->nlp_mem_pool);
152 phba->nlp_mem_pool = NULL; 160 phba->nlp_mem_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bccc9c66fa37..d85a7423a694 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -386,7 +386,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
386 goto out; 386 goto out;
387 387
388 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, 388 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
389 (uint8_t *) sp, mbox, 0); 389 (uint8_t *) sp, mbox, ndlp->nlp_rpi);
390 if (rc) { 390 if (rc) {
391 mempool_free(mbox, phba->mbox_mem_pool); 391 mempool_free(mbox, phba->mbox_mem_pool);
392 goto out; 392 goto out;
@@ -632,7 +632,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
632{ 632{
633 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 633 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
634 634
635 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) { 635 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
636 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 636 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
637 return 0; 637 return 0;
638 } 638 }
@@ -968,7 +968,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
968 lpfc_unreg_rpi(vport, ndlp); 968 lpfc_unreg_rpi(vport, ndlp);
969 969
970 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, 970 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
971 (uint8_t *) sp, mbox, 0) == 0) { 971 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
972 switch (ndlp->nlp_DID) { 972 switch (ndlp->nlp_DID) {
973 case NameServer_DID: 973 case NameServer_DID:
974 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 974 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
@@ -1338,12 +1338,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1338 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1338 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1339 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1339 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1340 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1340 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1341 if (phba->sli_rev == LPFC_SLI_REV4) {
1342 spin_unlock_irq(&phba->hbalock);
1343 lpfc_sli4_free_rpi(phba,
1344 mb->u.mb.un.varRegLogin.rpi);
1345 spin_lock_irq(&phba->hbalock);
1346 }
1347 mp = (struct lpfc_dmabuf *) (mb->context1); 1341 mp = (struct lpfc_dmabuf *) (mb->context1);
1348 if (mp) { 1342 if (mp) {
1349 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1343 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1426,7 +1420,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1426 } 1420 }
1427 1421
1428 ndlp->nlp_rpi = mb->un.varWords[0]; 1422 ndlp->nlp_rpi = mb->un.varWords[0];
1429 ndlp->nlp_flag |= NLP_RPI_VALID; 1423 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1430 1424
1431 /* Only if we are not a fabric nport do we issue PRLI */ 1425 /* Only if we are not a fabric nport do we issue PRLI */
1432 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1426 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -2027,7 +2021,7 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2027 2021
2028 if (!mb->mbxStatus) { 2022 if (!mb->mbxStatus) {
2029 ndlp->nlp_rpi = mb->un.varWords[0]; 2023 ndlp->nlp_rpi = mb->un.varWords[0];
2030 ndlp->nlp_flag |= NLP_RPI_VALID; 2024 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2031 } else { 2025 } else {
2032 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 2026 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2033 lpfc_drop_node(vport, ndlp); 2027 lpfc_drop_node(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 581837b3c71a..c97751c95d77 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -621,10 +621,13 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
621 struct sli4_wcqe_xri_aborted *axri) 621 struct sli4_wcqe_xri_aborted *axri)
622{ 622{
623 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 623 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
624 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
624 struct lpfc_scsi_buf *psb, *next_psb; 625 struct lpfc_scsi_buf *psb, *next_psb;
625 unsigned long iflag = 0; 626 unsigned long iflag = 0;
626 struct lpfc_iocbq *iocbq; 627 struct lpfc_iocbq *iocbq;
627 int i; 628 int i;
629 struct lpfc_nodelist *ndlp;
630 int rrq_empty = 0;
628 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 631 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
629 632
630 spin_lock_irqsave(&phba->hbalock, iflag); 633 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -637,8 +640,14 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
637 psb->status = IOSTAT_SUCCESS; 640 psb->status = IOSTAT_SUCCESS;
638 spin_unlock( 641 spin_unlock(
639 &phba->sli4_hba.abts_scsi_buf_list_lock); 642 &phba->sli4_hba.abts_scsi_buf_list_lock);
643 ndlp = psb->rdata->pnode;
644 rrq_empty = list_empty(&phba->active_rrq_list);
640 spin_unlock_irqrestore(&phba->hbalock, iflag); 645 spin_unlock_irqrestore(&phba->hbalock, iflag);
646 if (ndlp)
647 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
641 lpfc_release_scsi_buf_s4(phba, psb); 648 lpfc_release_scsi_buf_s4(phba, psb);
649 if (rrq_empty)
650 lpfc_worker_wake_up(phba);
642 return; 651 return;
643 } 652 }
644 } 653 }
@@ -914,7 +923,7 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
914} 923}
915 924
916/** 925/**
917 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 926 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
918 * @phba: The HBA for which this call is being executed. 927 * @phba: The HBA for which this call is being executed.
919 * 928 *
920 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 929 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
@@ -925,7 +934,7 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
925 * Pointer to lpfc_scsi_buf - Success 934 * Pointer to lpfc_scsi_buf - Success
926 **/ 935 **/
927static struct lpfc_scsi_buf* 936static struct lpfc_scsi_buf*
928lpfc_get_scsi_buf(struct lpfc_hba * phba) 937lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
929{ 938{
930 struct lpfc_scsi_buf * lpfc_cmd = NULL; 939 struct lpfc_scsi_buf * lpfc_cmd = NULL;
931 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 940 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
@@ -941,6 +950,67 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
941 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 950 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
942 return lpfc_cmd; 951 return lpfc_cmd;
943} 952}
953/**
954 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
955 * @phba: The HBA for which this call is being executed.
956 *
957 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
958 * and returns to caller.
959 *
960 * Return codes:
961 * NULL - Error
962 * Pointer to lpfc_scsi_buf - Success
963 **/
964static struct lpfc_scsi_buf*
965lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
966{
967 struct lpfc_scsi_buf *lpfc_cmd = NULL;
968 struct lpfc_scsi_buf *start_lpfc_cmd = NULL;
969 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
970 unsigned long iflag = 0;
971 int found = 0;
972
973 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
974 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
975 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
976 while (!found && lpfc_cmd) {
977 if (lpfc_test_rrq_active(phba, ndlp,
978 lpfc_cmd->cur_iocbq.sli4_xritag)) {
979 lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
980 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
981 list_remove_head(scsi_buf_list, lpfc_cmd,
982 struct lpfc_scsi_buf, list);
983 spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
984 iflag);
985 if (lpfc_cmd == start_lpfc_cmd) {
986 lpfc_cmd = NULL;
987 break;
988 } else
989 continue;
990 }
991 found = 1;
992 lpfc_cmd->seg_cnt = 0;
993 lpfc_cmd->nonsg_phys = 0;
994 lpfc_cmd->prot_seg_cnt = 0;
995 }
996 return lpfc_cmd;
997}
998/**
999 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1000 * @phba: The HBA for which this call is being executed.
1001 *
1002 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1003 * and returns to caller.
1004 *
1005 * Return codes:
1006 * NULL - Error
1007 * Pointer to lpfc_scsi_buf - Success
1008 **/
1009static struct lpfc_scsi_buf*
1010lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1011{
1012 return phba->lpfc_get_scsi_buf(phba, ndlp);
1013}
944 1014
945/** 1015/**
946 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list 1016 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
@@ -2744,18 +2814,19 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2744 2814
2745 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 2815 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; 2816 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2747 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2748 2817
2749 switch (dev_grp) { 2818 switch (dev_grp) {
2750 case LPFC_PCI_DEV_LP: 2819 case LPFC_PCI_DEV_LP:
2751 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; 2820 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2752 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 2821 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2753 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 2822 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2823 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
2754 break; 2824 break;
2755 case LPFC_PCI_DEV_OC: 2825 case LPFC_PCI_DEV_OC:
2756 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; 2826 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2757 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 2827 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2758 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 2828 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2829 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
2759 break; 2830 break;
2760 default: 2831 default:
2761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2764,7 +2835,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2764 return -ENODEV; 2835 return -ENODEV;
2765 break; 2836 break;
2766 } 2837 }
2767 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2768 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 2838 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2769 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 2839 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2770 return 0; 2840 return 0;
@@ -2940,7 +3010,7 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2940 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) 3010 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
2941 goto out_host_busy; 3011 goto out_host_busy;
2942 3012
2943 lpfc_cmd = lpfc_get_scsi_buf(phba); 3013 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
2944 if (lpfc_cmd == NULL) { 3014 if (lpfc_cmd == NULL) {
2945 lpfc_rampdown_queue_depth(phba); 3015 lpfc_rampdown_queue_depth(phba);
2946 3016
@@ -3239,7 +3309,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3239 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3309 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3240 return FAILED; 3310 return FAILED;
3241 3311
3242 lpfc_cmd = lpfc_get_scsi_buf(phba); 3312 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
3243 if (lpfc_cmd == NULL) 3313 if (lpfc_cmd == NULL)
3244 return FAILED; 3314 return FAILED;
3245 lpfc_cmd->timeout = 60; 3315 lpfc_cmd->timeout = 60;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 554efa6623f4..634b2fea9c4d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -513,8 +513,344 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
513} 513}
514 514
515/** 515/**
516 * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
517 * @phba: Pointer to HBA context object.
518 * @ndlp: nodelist pointer for this target.
519 * @xritag: xri used in this exchange.
520 * @rxid: Remote Exchange ID.
521 * @send_rrq: Flag used to determine if we should send rrq els cmd.
522 *
523 * This function is called with hbalock held.
524 * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
525 * rrq struct and adds it to the active_rrq_list.
526 *
527 * returns 0 for rrq slot for this xri
528 * < 0 Were not able to get rrq mem or invalid parameter.
529 **/
530static int
531__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
532 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
533{
534 uint16_t adj_xri;
535 struct lpfc_node_rrq *rrq;
536 int empty;
537
538 /*
539 * set the active bit even if there is no mem available.
540 */
541 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
542 if (!ndlp)
543 return -EINVAL;
544 if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
545 return -EINVAL;
546 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
547 if (rrq) {
548 rrq->send_rrq = send_rrq;
549 rrq->xritag = xritag;
550 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
551 rrq->ndlp = ndlp;
552 rrq->nlp_DID = ndlp->nlp_DID;
553 rrq->vport = ndlp->vport;
554 rrq->rxid = rxid;
555 empty = list_empty(&phba->active_rrq_list);
556 if (phba->cfg_enable_rrq && send_rrq)
557 /*
558 * We need the xri before we can add this to the
559 * phba active rrq list.
560 */
561 rrq->send_rrq = send_rrq;
562 else
563 rrq->send_rrq = 0;
564 list_add_tail(&rrq->list, &phba->active_rrq_list);
565 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
566 phba->hba_flag |= HBA_RRQ_ACTIVE;
567 if (empty)
568 lpfc_worker_wake_up(phba);
569 }
570 return 0;
571 }
572 return -ENOMEM;
573}
574
575/**
576 * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
577 * @phba: Pointer to HBA context object.
578 * @xritag: xri used in this exchange.
579 * @rrq: The RRQ to be cleared.
580 *
581 * This function is called with hbalock held. This function
582 **/
583static void
584__lpfc_clr_rrq_active(struct lpfc_hba *phba,
585 uint16_t xritag,
586 struct lpfc_node_rrq *rrq)
587{
588 uint16_t adj_xri;
589 struct lpfc_nodelist *ndlp;
590
591 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
592
593 /* The target DID could have been swapped (cable swap)
594 * we should use the ndlp from the findnode if it is
595 * available.
596 */
597 if (!ndlp)
598 ndlp = rrq->ndlp;
599
600 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
601 if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
602 rrq->send_rrq = 0;
603 rrq->xritag = 0;
604 rrq->rrq_stop_time = 0;
605 }
606 mempool_free(rrq, phba->rrq_pool);
607}
608
609/**
610 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
611 * @phba: Pointer to HBA context object.
612 *
613 * This function is called with hbalock held. This function
614 * Checks if stop_time (ratov from setting rrq active) has
615 * been reached, if it has and the send_rrq flag is set then
616 * it will call lpfc_send_rrq. If the send_rrq flag is not set
617 * then it will just call the routine to clear the rrq and
618 * free the rrq resource.
619 * The timer is set to the next rrq that is going to expire before
620 * leaving the routine.
621 *
622 **/
623void
624lpfc_handle_rrq_active(struct lpfc_hba *phba)
625{
626 struct lpfc_node_rrq *rrq;
627 struct lpfc_node_rrq *nextrrq;
628 unsigned long next_time;
629 unsigned long iflags;
630
631 spin_lock_irqsave(&phba->hbalock, iflags);
632 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
633 next_time = jiffies + HZ * (phba->fc_ratov + 1);
634 list_for_each_entry_safe(rrq, nextrrq,
635 &phba->active_rrq_list, list) {
636 if (time_after(jiffies, rrq->rrq_stop_time)) {
637 list_del(&rrq->list);
638 if (!rrq->send_rrq)
639 /* this call will free the rrq */
640 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
641 else {
642 /* if we send the rrq then the completion handler
643 * will clear the bit in the xribitmap.
644 */
645 spin_unlock_irqrestore(&phba->hbalock, iflags);
646 if (lpfc_send_rrq(phba, rrq)) {
647 lpfc_clr_rrq_active(phba, rrq->xritag,
648 rrq);
649 }
650 spin_lock_irqsave(&phba->hbalock, iflags);
651 }
652 } else if (time_before(rrq->rrq_stop_time, next_time))
653 next_time = rrq->rrq_stop_time;
654 }
655 spin_unlock_irqrestore(&phba->hbalock, iflags);
656 if (!list_empty(&phba->active_rrq_list))
657 mod_timer(&phba->rrq_tmr, next_time);
658}
659
660/**
661 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
662 * @vport: Pointer to vport context object.
663 * @xri: The xri used in the exchange.
664 * @did: The targets DID for this exchange.
665 *
666 * returns NULL = rrq not found in the phba->active_rrq_list.
667 * rrq = rrq for this xri and target.
668 **/
669struct lpfc_node_rrq *
670lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
671{
672 struct lpfc_hba *phba = vport->phba;
673 struct lpfc_node_rrq *rrq;
674 struct lpfc_node_rrq *nextrrq;
675 unsigned long iflags;
676
677 if (phba->sli_rev != LPFC_SLI_REV4)
678 return NULL;
679 spin_lock_irqsave(&phba->hbalock, iflags);
680 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
681 if (rrq->vport == vport && rrq->xritag == xri &&
682 rrq->nlp_DID == did){
683 list_del(&rrq->list);
684 spin_unlock_irqrestore(&phba->hbalock, iflags);
685 return rrq;
686 }
687 }
688 spin_unlock_irqrestore(&phba->hbalock, iflags);
689 return NULL;
690}
691
692/**
693 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
694 * @vport: Pointer to vport context object.
695 *
696 * Remove all active RRQs for this vport from the phba->active_rrq_list and
697 * clear the rrq.
698 **/
699void
700lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
701
702{
703 struct lpfc_hba *phba = vport->phba;
704 struct lpfc_node_rrq *rrq;
705 struct lpfc_node_rrq *nextrrq;
706 unsigned long iflags;
707
708 if (phba->sli_rev != LPFC_SLI_REV4)
709 return;
710 spin_lock_irqsave(&phba->hbalock, iflags);
711 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
712 if (rrq->vport == vport) {
713 list_del(&rrq->list);
714 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
715 }
716 }
717 spin_unlock_irqrestore(&phba->hbalock, iflags);
718}
719
720/**
721 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
722 * @phba: Pointer to HBA context object.
723 *
724 * Remove all rrqs from the phba->active_rrq_list and free them by
725 * calling __lpfc_clr_active_rrq
726 *
727 **/
728void
729lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
730{
731 struct lpfc_node_rrq *rrq;
732 struct lpfc_node_rrq *nextrrq;
733 unsigned long next_time;
734 unsigned long iflags;
735
736 if (phba->sli_rev != LPFC_SLI_REV4)
737 return;
738 spin_lock_irqsave(&phba->hbalock, iflags);
739 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
740 next_time = jiffies + HZ * (phba->fc_ratov * 2);
741 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
742 list_del(&rrq->list);
743 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
744 }
745 spin_unlock_irqrestore(&phba->hbalock, iflags);
746 if (!list_empty(&phba->active_rrq_list))
747 mod_timer(&phba->rrq_tmr, next_time);
748}
749
750
751/**
752 * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
753 * @phba: Pointer to HBA context object.
754 * @ndlp: Targets nodelist pointer for this exchange.
755 * @xritag the xri in the bitmap to test.
756 *
757 * This function is called with hbalock held. This function
758 * returns 0 = rrq not active for this xri
759 * 1 = rrq is valid for this xri.
760 **/
761static int
762__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
763 uint16_t xritag)
764{
765 uint16_t adj_xri;
766
767 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
768 if (!ndlp)
769 return 0;
770 if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
771 return 1;
772 else
773 return 0;
774}
775
776/**
777 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
778 * @phba: Pointer to HBA context object.
779 * @ndlp: nodelist pointer for this target.
780 * @xritag: xri used in this exchange.
781 * @rxid: Remote Exchange ID.
782 * @send_rrq: Flag used to determine if we should send rrq els cmd.
783 *
784 * This function takes the hbalock.
785 * The active bit is always set in the active rrq xri_bitmap even
786 * if there is no slot avaiable for the other rrq information.
787 *
788 * returns 0 rrq actived for this xri
789 * < 0 No memory or invalid ndlp.
790 **/
791int
792lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
793 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
794{
795 int ret;
796 unsigned long iflags;
797
798 spin_lock_irqsave(&phba->hbalock, iflags);
799 ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
800 spin_unlock_irqrestore(&phba->hbalock, iflags);
801 return ret;
802}
803
804/**
805 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
806 * @phba: Pointer to HBA context object.
807 * @xritag: xri used in this exchange.
808 * @rrq: The RRQ to be cleared.
809 *
810 * This function is takes the hbalock.
811 **/
812void
813lpfc_clr_rrq_active(struct lpfc_hba *phba,
814 uint16_t xritag,
815 struct lpfc_node_rrq *rrq)
816{
817 unsigned long iflags;
818
819 spin_lock_irqsave(&phba->hbalock, iflags);
820 __lpfc_clr_rrq_active(phba, xritag, rrq);
821 spin_unlock_irqrestore(&phba->hbalock, iflags);
822 return;
823}
824
825
826
827/**
828 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
829 * @phba: Pointer to HBA context object.
830 * @ndlp: Targets nodelist pointer for this exchange.
831 * @xritag the xri in the bitmap to test.
832 *
833 * This function takes the hbalock.
834 * returns 0 = rrq not active for this xri
835 * 1 = rrq is valid for this xri.
836 **/
837int
838lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
839 uint16_t xritag)
840{
841 int ret;
842 unsigned long iflags;
843
844 spin_lock_irqsave(&phba->hbalock, iflags);
845 ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
846 spin_unlock_irqrestore(&phba->hbalock, iflags);
847 return ret;
848}
849
850/**
516 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 851 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
517 * @phba: Pointer to HBA context object. 852 * @phba: Pointer to HBA context object.
853 * @piocb: Pointer to the iocbq.
518 * 854 *
519 * This function is called with hbalock held. This function 855 * This function is called with hbalock held. This function
520 * Gets a new driver sglq object from the sglq list. If the 856 * Gets a new driver sglq object from the sglq list. If the
@@ -522,17 +858,51 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
522 * allocated sglq object else it returns NULL. 858 * allocated sglq object else it returns NULL.
523 **/ 859 **/
524static struct lpfc_sglq * 860static struct lpfc_sglq *
525__lpfc_sli_get_sglq(struct lpfc_hba *phba) 861__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
526{ 862{
527 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 863 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
528 struct lpfc_sglq *sglq = NULL; 864 struct lpfc_sglq *sglq = NULL;
865 struct lpfc_sglq *start_sglq = NULL;
529 uint16_t adj_xri; 866 uint16_t adj_xri;
867 struct lpfc_scsi_buf *lpfc_cmd;
868 struct lpfc_nodelist *ndlp;
869 int found = 0;
870
871 if (piocbq->iocb_flag & LPFC_IO_FCP) {
872 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
873 ndlp = lpfc_cmd->rdata->pnode;
874 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
875 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
876 ndlp = piocbq->context_un.ndlp;
877 else
878 ndlp = piocbq->context1;
879
530 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 880 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
531 if (!sglq) 881 start_sglq = sglq;
532 return NULL; 882 while (!found) {
533 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 883 if (!sglq)
534 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 884 return NULL;
535 sglq->state = SGL_ALLOCATED; 885 adj_xri = sglq->sli4_xritag -
886 phba->sli4_hba.max_cfg_param.xri_base;
887 if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
888 /* This xri has an rrq outstanding for this DID.
889 * put it back in the list and get another xri.
890 */
891 list_add_tail(&sglq->list, lpfc_sgl_list);
892 sglq = NULL;
893 list_remove_head(lpfc_sgl_list, sglq,
894 struct lpfc_sglq, list);
895 if (sglq == start_sglq) {
896 sglq = NULL;
897 break;
898 } else
899 continue;
900 }
901 sglq->ndlp = ndlp;
902 found = 1;
903 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
904 sglq->state = SGL_ALLOCATED;
905 }
536 return sglq; 906 return sglq;
537} 907}
538 908
@@ -598,6 +968,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
598 &phba->sli4_hba.abts_sgl_list_lock, iflag); 968 &phba->sli4_hba.abts_sgl_list_lock, iflag);
599 } else { 969 } else {
600 sglq->state = SGL_FREED; 970 sglq->state = SGL_FREED;
971 sglq->ndlp = NULL;
601 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 972 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
602 973
603 /* Check if TXQ queue needs to be serviced */ 974 /* Check if TXQ queue needs to be serviced */
@@ -1634,7 +2005,6 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1634 case MBX_READ_LNK_STAT: 2005 case MBX_READ_LNK_STAT:
1635 case MBX_REG_LOGIN: 2006 case MBX_REG_LOGIN:
1636 case MBX_UNREG_LOGIN: 2007 case MBX_UNREG_LOGIN:
1637 case MBX_READ_LA:
1638 case MBX_CLEAR_LA: 2008 case MBX_CLEAR_LA:
1639 case MBX_DUMP_MEMORY: 2009 case MBX_DUMP_MEMORY:
1640 case MBX_DUMP_CONTEXT: 2010 case MBX_DUMP_CONTEXT:
@@ -1656,7 +2026,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1656 case MBX_READ_SPARM64: 2026 case MBX_READ_SPARM64:
1657 case MBX_READ_RPI64: 2027 case MBX_READ_RPI64:
1658 case MBX_REG_LOGIN64: 2028 case MBX_REG_LOGIN64:
1659 case MBX_READ_LA64: 2029 case MBX_READ_TOPOLOGY:
1660 case MBX_WRITE_WWN: 2030 case MBX_WRITE_WWN:
1661 case MBX_SET_DEBUG: 2031 case MBX_SET_DEBUG:
1662 case MBX_LOAD_EXP_ROM: 2032 case MBX_LOAD_EXP_ROM:
@@ -1746,11 +2116,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1746 kfree(mp); 2116 kfree(mp);
1747 } 2117 }
1748 2118
1749 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1750 (phba->sli_rev == LPFC_SLI_REV4) &&
1751 (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
1752 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1753
1754 /* 2119 /*
1755 * If a REG_LOGIN succeeded after node is destroyed or node 2120 * If a REG_LOGIN succeeded after node is destroyed or node
1756 * is in re-discovery driver need to cleanup the RPI. 2121 * is in re-discovery driver need to cleanup the RPI.
@@ -3483,12 +3848,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3483 phba->pport->fc_myDID = 0; 3848 phba->pport->fc_myDID = 0;
3484 phba->pport->fc_prevDID = 0; 3849 phba->pport->fc_prevDID = 0;
3485 3850
3486 /* Turn off parity checking and serr during the physical reset */
3487 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3488 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3489 (cfg_value &
3490 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3491
3492 spin_lock_irq(&phba->hbalock); 3851 spin_lock_irq(&phba->hbalock);
3493 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3852 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3494 phba->fcf.fcf_flag = 0; 3853 phba->fcf.fcf_flag = 0;
@@ -3508,9 +3867,18 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3508 /* Now physically reset the device */ 3867 /* Now physically reset the device */
3509 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3868 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3510 "0389 Performing PCI function reset!\n"); 3869 "0389 Performing PCI function reset!\n");
3870
3871 /* Turn off parity checking and serr during the physical reset */
3872 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3873 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3874 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3875
3511 /* Perform FCoE PCI function reset */ 3876 /* Perform FCoE PCI function reset */
3512 lpfc_pci_function_reset(phba); 3877 lpfc_pci_function_reset(phba);
3513 3878
3879 /* Restore PCI cmd register */
3880 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3881
3514 return 0; 3882 return 0;
3515} 3883}
3516 3884
@@ -4317,6 +4685,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4317 struct lpfc_vport *vport = phba->pport; 4685 struct lpfc_vport *vport = phba->pport;
4318 struct lpfc_dmabuf *mp; 4686 struct lpfc_dmabuf *mp;
4319 4687
4688 /*
4689 * TODO: Why does this routine execute these task in a different
4690 * order from probe?
4691 */
4320 /* Perform a PCI function reset to start from clean */ 4692 /* Perform a PCI function reset to start from clean */
4321 rc = lpfc_pci_function_reset(phba); 4693 rc = lpfc_pci_function_reset(phba);
4322 if (unlikely(rc)) 4694 if (unlikely(rc))
@@ -4357,13 +4729,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4357 } 4729 }
4358 4730
4359 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 4731 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4360 if (unlikely(rc)) 4732 if (unlikely(rc)) {
4361 goto out_free_vpd; 4733 kfree(vpd);
4362 4734 goto out_free_mbox;
4735 }
4363 mqe = &mboxq->u.mqe; 4736 mqe = &mboxq->u.mqe;
4364 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4737 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4365 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4738 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4366 phba->hba_flag |= HBA_FCOE_SUPPORT; 4739 phba->hba_flag |= HBA_FCOE_MODE;
4740 else
4741 phba->hba_flag &= ~HBA_FCOE_MODE;
4367 4742
4368 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 4743 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4369 LPFC_DCBX_CEE_MODE) 4744 LPFC_DCBX_CEE_MODE)
@@ -4372,13 +4747,14 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4372 phba->hba_flag &= ~HBA_FIP_SUPPORT; 4747 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4373 4748
4374 if (phba->sli_rev != LPFC_SLI_REV4 || 4749 if (phba->sli_rev != LPFC_SLI_REV4 ||
4375 !(phba->hba_flag & HBA_FCOE_SUPPORT)) { 4750 !(phba->hba_flag & HBA_FCOE_MODE)) {
4376 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4751 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4377 "0376 READ_REV Error. SLI Level %d " 4752 "0376 READ_REV Error. SLI Level %d "
4378 "FCoE enabled %d\n", 4753 "FCoE enabled %d\n",
4379 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT); 4754 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
4380 rc = -EIO; 4755 rc = -EIO;
4381 goto out_free_vpd; 4756 kfree(vpd);
4757 goto out_free_mbox;
4382 } 4758 }
4383 /* 4759 /*
4384 * Evaluate the read rev and vpd data. Populate the driver 4760 * Evaluate the read rev and vpd data. Populate the driver
@@ -4392,6 +4768,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4392 "Using defaults.\n", rc); 4768 "Using defaults.\n", rc);
4393 rc = 0; 4769 rc = 0;
4394 } 4770 }
4771 kfree(vpd);
4395 4772
4396 /* Save information as VPD data */ 4773 /* Save information as VPD data */
4397 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 4774 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
@@ -4428,7 +4805,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4428 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4805 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4429 if (unlikely(rc)) { 4806 if (unlikely(rc)) {
4430 rc = -EIO; 4807 rc = -EIO;
4431 goto out_free_vpd; 4808 goto out_free_mbox;
4432 } 4809 }
4433 4810
4434 /* 4811 /*
@@ -4476,7 +4853,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4476 if (rc) { 4853 if (rc) {
4477 phba->link_state = LPFC_HBA_ERROR; 4854 phba->link_state = LPFC_HBA_ERROR;
4478 rc = -ENOMEM; 4855 rc = -ENOMEM;
4479 goto out_free_vpd; 4856 goto out_free_mbox;
4480 } 4857 }
4481 4858
4482 mboxq->vport = vport; 4859 mboxq->vport = vport;
@@ -4501,7 +4878,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4501 rc, bf_get(lpfc_mqe_status, mqe)); 4878 rc, bf_get(lpfc_mqe_status, mqe));
4502 phba->link_state = LPFC_HBA_ERROR; 4879 phba->link_state = LPFC_HBA_ERROR;
4503 rc = -EIO; 4880 rc = -EIO;
4504 goto out_free_vpd; 4881 goto out_free_mbox;
4505 } 4882 }
4506 4883
4507 if (phba->cfg_soft_wwnn) 4884 if (phba->cfg_soft_wwnn)
@@ -4526,7 +4903,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4526 "0582 Error %d during sgl post operation\n", 4903 "0582 Error %d during sgl post operation\n",
4527 rc); 4904 rc);
4528 rc = -ENODEV; 4905 rc = -ENODEV;
4529 goto out_free_vpd; 4906 goto out_free_mbox;
4530 } 4907 }
4531 4908
4532 /* Register SCSI SGL pool to the device */ 4909 /* Register SCSI SGL pool to the device */
@@ -4538,7 +4915,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4538 /* Some Scsi buffers were moved to the abort scsi list */ 4915 /* Some Scsi buffers were moved to the abort scsi list */
4539 /* A pci function reset will repost them */ 4916 /* A pci function reset will repost them */
4540 rc = -ENODEV; 4917 rc = -ENODEV;
4541 goto out_free_vpd; 4918 goto out_free_mbox;
4542 } 4919 }
4543 4920
4544 /* Post the rpi header region to the device. */ 4921 /* Post the rpi header region to the device. */
@@ -4548,7 +4925,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4548 "0393 Error %d during rpi post operation\n", 4925 "0393 Error %d during rpi post operation\n",
4549 rc); 4926 rc);
4550 rc = -ENODEV; 4927 rc = -ENODEV;
4551 goto out_free_vpd; 4928 goto out_free_mbox;
4552 } 4929 }
4553 4930
4554 /* Set up all the queues to the device */ 4931 /* Set up all the queues to the device */
@@ -4608,33 +4985,33 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4608 } 4985 }
4609 } 4986 }
4610 4987
4988 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4989 /*
4990 * The FC Port needs to register FCFI (index 0)
4991 */
4992 lpfc_reg_fcfi(phba, mboxq);
4993 mboxq->vport = phba->pport;
4994 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4995 if (rc == MBX_SUCCESS)
4996 rc = 0;
4997 else
4998 goto out_unset_queue;
4999 }
4611 /* 5000 /*
4612 * The port is ready, set the host's link state to LINK_DOWN 5001 * The port is ready, set the host's link state to LINK_DOWN
4613 * in preparation for link interrupts. 5002 * in preparation for link interrupts.
4614 */ 5003 */
4615 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4616 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4617 lpfc_set_loopback_flag(phba);
4618 /* Change driver state to LPFC_LINK_DOWN right before init link */
4619 spin_lock_irq(&phba->hbalock); 5004 spin_lock_irq(&phba->hbalock);
4620 phba->link_state = LPFC_LINK_DOWN; 5005 phba->link_state = LPFC_LINK_DOWN;
4621 spin_unlock_irq(&phba->hbalock); 5006 spin_unlock_irq(&phba->hbalock);
4622 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 5007 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
4623 if (unlikely(rc != MBX_NOT_FINISHED)) { 5008out_unset_queue:
4624 kfree(vpd);
4625 return 0;
4626 } else
4627 rc = -EIO;
4628
4629 /* Unset all the queues set up in this routine when error out */ 5009 /* Unset all the queues set up in this routine when error out */
4630 if (rc) 5010 if (rc)
4631 lpfc_sli4_queue_unset(phba); 5011 lpfc_sli4_queue_unset(phba);
4632
4633out_stop_timers: 5012out_stop_timers:
4634 if (rc) 5013 if (rc)
4635 lpfc_stop_hba_timers(phba); 5014 lpfc_stop_hba_timers(phba);
4636out_free_vpd:
4637 kfree(vpd);
4638out_free_mbox: 5015out_free_mbox:
4639 mempool_free(mboxq, phba->mbox_mem_pool); 5016 mempool_free(mboxq, phba->mbox_mem_pool);
4640 return rc; 5017 return rc;
@@ -5863,6 +6240,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5863 IOCB_t *icmd; 6240 IOCB_t *icmd;
5864 int numBdes = 0; 6241 int numBdes = 0;
5865 int i = 0; 6242 int i = 0;
6243 uint32_t offset = 0; /* accumulated offset in the sg request list */
6244 int inbound = 0; /* number of sg reply entries inbound from firmware */
5866 6245
5867 if (!piocbq || !sglq) 6246 if (!piocbq || !sglq)
5868 return xritag; 6247 return xritag;
@@ -5897,6 +6276,20 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5897 */ 6276 */
5898 bde.tus.w = le32_to_cpu(bpl->tus.w); 6277 bde.tus.w = le32_to_cpu(bpl->tus.w);
5899 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 6278 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
6279 /* The offsets in the sgl need to be accumulated
6280 * separately for the request and reply lists.
6281 * The request is always first, the reply follows.
6282 */
6283 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
6284 /* add up the reply sg entries */
6285 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
6286 inbound++;
6287 /* first inbound? reset the offset */
6288 if (inbound == 1)
6289 offset = 0;
6290 bf_set(lpfc_sli4_sge_offset, sgl, offset);
6291 offset += bde.tus.f.bdeSize;
6292 }
5900 bpl++; 6293 bpl++;
5901 sgl++; 6294 sgl++;
5902 } 6295 }
@@ -6028,11 +6421,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6028 bf_set(els_req64_vf, &wqe->els_req, 0); 6421 bf_set(els_req64_vf, &wqe->els_req, 0);
6029 /* And a VFID for word 12 */ 6422 /* And a VFID for word 12 */
6030 bf_set(els_req64_vfid, &wqe->els_req, 0); 6423 bf_set(els_req64_vfid, &wqe->els_req, 0);
6031 /*
6032 * Set ct field to 3, indicates that the context_tag field
6033 * contains the FCFI and remote N_Port_ID is
6034 * in word 5.
6035 */
6036 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6424 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
6037 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 6425 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
6038 iocbq->iocb.ulpContext); 6426 iocbq->iocb.ulpContext);
@@ -6140,6 +6528,18 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6140 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 6528 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
6141 break; 6529 break;
6142 case CMD_GEN_REQUEST64_CR: 6530 case CMD_GEN_REQUEST64_CR:
6531 /* For this command calculate the xmit length of the
6532 * request bde.
6533 */
6534 xmit_len = 0;
6535 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
6536 sizeof(struct ulp_bde64);
6537 for (i = 0; i < numBdes; i++) {
6538 if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
6539 break;
6540 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
6541 xmit_len += bde.tus.f.bdeSize;
6542 }
6143 /* word3 iocb=IO_TAG wqe=request_payload_len */ 6543 /* word3 iocb=IO_TAG wqe=request_payload_len */
6144 wqe->gen_req.request_payload_len = xmit_len; 6544 wqe->gen_req.request_payload_len = xmit_len;
6145 /* word4 iocb=parameter wqe=relative_offset memcpy */ 6545 /* word4 iocb=parameter wqe=relative_offset memcpy */
@@ -6320,7 +6720,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6320 return IOCB_BUSY; 6720 return IOCB_BUSY;
6321 } 6721 }
6322 } else { 6722 } else {
6323 sglq = __lpfc_sli_get_sglq(phba); 6723 sglq = __lpfc_sli_get_sglq(phba, piocb);
6324 if (!sglq) { 6724 if (!sglq) {
6325 if (!(flag & SLI_IOCB_RET_IOCB)) { 6725 if (!(flag & SLI_IOCB_RET_IOCB)) {
6326 __lpfc_sli_ringtx_put(phba, 6726 __lpfc_sli_ringtx_put(phba,
@@ -8033,29 +8433,66 @@ static int
8033lpfc_sli4_eratt_read(struct lpfc_hba *phba) 8433lpfc_sli4_eratt_read(struct lpfc_hba *phba)
8034{ 8434{
8035 uint32_t uerr_sta_hi, uerr_sta_lo; 8435 uint32_t uerr_sta_hi, uerr_sta_lo;
8436 uint32_t if_type, portsmphr;
8437 struct lpfc_register portstat_reg;
8036 8438
8037 /* For now, use the SLI4 device internal unrecoverable error 8439 /*
8440 * For now, use the SLI4 device internal unrecoverable error
8038 * registers for error attention. This can be changed later. 8441 * registers for error attention. This can be changed later.
8039 */ 8442 */
8040 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); 8443 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8041 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); 8444 switch (if_type) {
8042 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 8445 case LPFC_SLI_INTF_IF_TYPE_0:
8043 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 8446 uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
8447 uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
8448 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
8449 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
8450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8451 "1423 HBA Unrecoverable error: "
8452 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
8453 "ue_mask_lo_reg=0x%x, "
8454 "ue_mask_hi_reg=0x%x\n",
8455 uerr_sta_lo, uerr_sta_hi,
8456 phba->sli4_hba.ue_mask_lo,
8457 phba->sli4_hba.ue_mask_hi);
8458 phba->work_status[0] = uerr_sta_lo;
8459 phba->work_status[1] = uerr_sta_hi;
8460 phba->work_ha |= HA_ERATT;
8461 phba->hba_flag |= HBA_ERATT_HANDLED;
8462 return 1;
8463 }
8464 break;
8465 case LPFC_SLI_INTF_IF_TYPE_2:
8466 portstat_reg.word0 =
8467 readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
8468 portsmphr = readl(phba->sli4_hba.PSMPHRregaddr);
8469 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
8470 phba->work_status[0] =
8471 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
8472 phba->work_status[1] =
8473 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
8474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8475 "2885 Port Error Detected: "
8476 "port status reg 0x%x, "
8477 "port smphr reg 0x%x, "
8478 "error 1=0x%x, error 2=0x%x\n",
8479 portstat_reg.word0,
8480 portsmphr,
8481 phba->work_status[0],
8482 phba->work_status[1]);
8483 phba->work_ha |= HA_ERATT;
8484 phba->hba_flag |= HBA_ERATT_HANDLED;
8485 return 1;
8486 }
8487 break;
8488 case LPFC_SLI_INTF_IF_TYPE_1:
8489 default:
8044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8045 "1423 HBA Unrecoverable error: " 8491 "2886 HBA Error Attention on unsupported "
8046 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 8492 "if type %d.", if_type);
8047 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
8048 uerr_sta_lo, uerr_sta_hi,
8049 phba->sli4_hba.ue_mask_lo,
8050 phba->sli4_hba.ue_mask_hi);
8051 phba->work_status[0] = uerr_sta_lo;
8052 phba->work_status[1] = uerr_sta_hi;
8053 /* Set the driver HA work bitmap */
8054 phba->work_ha |= HA_ERATT;
8055 /* Indicate polling handles this ERATT */
8056 phba->hba_flag |= HBA_ERATT_HANDLED;
8057 return 1; 8493 return 1;
8058 } 8494 }
8495
8059 return 0; 8496 return 0;
8060} 8497}
8061 8498
@@ -8110,7 +8547,7 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
8110 ha_copy = lpfc_sli_eratt_read(phba); 8547 ha_copy = lpfc_sli_eratt_read(phba);
8111 break; 8548 break;
8112 case LPFC_SLI_REV4: 8549 case LPFC_SLI_REV4:
8113 /* Read devcie Uncoverable Error (UERR) registers */ 8550 /* Read device Uncoverable Error (UERR) registers */
8114 ha_copy = lpfc_sli4_eratt_read(phba); 8551 ha_copy = lpfc_sli4_eratt_read(phba);
8115 break; 8552 break;
8116 default: 8553 default:
@@ -10155,16 +10592,20 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
10155 length, LPFC_SLI4_MBX_EMBED); 10592 length, LPFC_SLI4_MBX_EMBED);
10156 10593
10157 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 10594 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
10158 bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, 10595 bf_set(lpfc_mbx_mq_create_ext_num_pages,
10159 mq->page_count); 10596 &mq_create_ext->u.request, mq->page_count);
10160 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request, 10597 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
10161 1); 10598 &mq_create_ext->u.request, 1);
10162 bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste, 10599 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
10163 &mq_create_ext->u.request, 1); 10600 &mq_create_ext->u.request, 1);
10164 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 10601 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
10165 &mq_create_ext->u.request, 1); 10602 &mq_create_ext->u.request, 1);
10166 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 10603 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
10167 cq->queue_id); 10604 &mq_create_ext->u.request, 1);
10605 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
10606 &mq_create_ext->u.request, 1);
10607 bf_set(lpfc_mq_context_cq_id,
10608 &mq_create_ext->u.request.context, cq->queue_id);
10168 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 10609 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
10169 switch (mq->entry_count) { 10610 switch (mq->entry_count) {
10170 default: 10611 default:
@@ -11137,7 +11578,8 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11137static int 11578static int
11138lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 11579lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
11139{ 11580{
11140 char *rctl_names[] = FC_RCTL_NAMES_INIT; 11581 /* make rctl_names static to save stack space */
11582 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
11141 char *type_names[] = FC_TYPE_NAMES_INIT; 11583 char *type_names[] = FC_TYPE_NAMES_INIT;
11142 struct fc_vft_header *fc_vft_hdr; 11584 struct fc_vft_header *fc_vft_hdr;
11143 11585
@@ -11538,6 +11980,10 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11538 "SID:x%x\n", oxid, sid); 11980 "SID:x%x\n", oxid, sid);
11539 return; 11981 return;
11540 } 11982 }
11983 if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
11984 && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
11985 + phba->sli4_hba.max_cfg_param.xri_base))
11986 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
11541 11987
11542 /* Allocate buffer for acc iocb */ 11988 /* Allocate buffer for acc iocb */
11543 ctiocb = lpfc_sli_get_iocbq(phba); 11989 ctiocb = lpfc_sli_get_iocbq(phba);
@@ -11560,6 +12006,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11560 icmd->ulpLe = 1; 12006 icmd->ulpLe = 1;
11561 icmd->ulpClass = CLASS3; 12007 icmd->ulpClass = CLASS3;
11562 icmd->ulpContext = ndlp->nlp_rpi; 12008 icmd->ulpContext = ndlp->nlp_rpi;
12009 ctiocb->context1 = ndlp;
11563 12010
11564 ctiocb->iocb_cmpl = NULL; 12011 ctiocb->iocb_cmpl = NULL;
11565 ctiocb->vport = phba->pport; 12012 ctiocb->vport = phba->pport;
@@ -12129,42 +12576,37 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
12129 12576
12130/** 12577/**
12131 * lpfc_sli4_init_vpi - Initialize a vpi with the port 12578 * lpfc_sli4_init_vpi - Initialize a vpi with the port
12132 * @phba: pointer to lpfc hba data structure. 12579 * @vport: Pointer to the vport for which the vpi is being initialized
12133 * @vpi: vpi value to activate with the port.
12134 * 12580 *
12135 * This routine is invoked to activate a vpi with the 12581 * This routine is invoked to activate a vpi with the port.
12136 * port when the host intends to use vports with a
12137 * nonzero vpi.
12138 * 12582 *
12139 * Returns: 12583 * Returns:
12140 * 0 success 12584 * 0 success
12141 * -Evalue otherwise 12585 * -Evalue otherwise
12142 **/ 12586 **/
12143int 12587int
12144lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) 12588lpfc_sli4_init_vpi(struct lpfc_vport *vport)
12145{ 12589{
12146 LPFC_MBOXQ_t *mboxq; 12590 LPFC_MBOXQ_t *mboxq;
12147 int rc = 0; 12591 int rc = 0;
12148 int retval = MBX_SUCCESS; 12592 int retval = MBX_SUCCESS;
12149 uint32_t mbox_tmo; 12593 uint32_t mbox_tmo;
12150 12594 struct lpfc_hba *phba = vport->phba;
12151 if (vpi == 0)
12152 return -EINVAL;
12153 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12595 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12154 if (!mboxq) 12596 if (!mboxq)
12155 return -ENOMEM; 12597 return -ENOMEM;
12156 lpfc_init_vpi(phba, mboxq, vpi); 12598 lpfc_init_vpi(phba, mboxq, vport->vpi);
12157 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 12599 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
12158 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 12600 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12159 if (rc != MBX_SUCCESS) { 12601 if (rc != MBX_SUCCESS) {
12160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12602 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
12161 "2022 INIT VPI Mailbox failed " 12603 "2022 INIT VPI Mailbox failed "
12162 "status %d, mbxStatus x%x\n", rc, 12604 "status %d, mbxStatus x%x\n", rc,
12163 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 12605 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
12164 retval = -EIO; 12606 retval = -EIO;
12165 } 12607 }
12166 if (rc != MBX_TIMEOUT) 12608 if (rc != MBX_TIMEOUT)
12167 mempool_free(mboxq, phba->mbox_mem_pool); 12609 mempool_free(mboxq, vport->phba->mbox_mem_pool);
12168 12610
12169 return retval; 12611 return retval;
12170} 12612}
@@ -12854,6 +13296,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12854 struct lpfc_nodelist *act_mbx_ndlp = NULL; 13296 struct lpfc_nodelist *act_mbx_ndlp = NULL;
12855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 13297 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
12856 LIST_HEAD(mbox_cmd_list); 13298 LIST_HEAD(mbox_cmd_list);
13299 uint8_t restart_loop;
12857 13300
12858 /* Clean up internally queued mailbox commands with the vport */ 13301 /* Clean up internally queued mailbox commands with the vport */
12859 spin_lock_irq(&phba->hbalock); 13302 spin_lock_irq(&phba->hbalock);
@@ -12882,15 +13325,44 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12882 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 13325 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
12883 } 13326 }
12884 } 13327 }
13328 /* Cleanup any mailbox completions which are not yet processed */
13329 do {
13330 restart_loop = 0;
13331 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
13332 /*
13333 * If this mailox is already processed or it is
13334 * for another vport ignore it.
13335 */
13336 if ((mb->vport != vport) ||
13337 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
13338 continue;
13339
13340 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
13341 (mb->u.mb.mbxCommand != MBX_REG_VPI))
13342 continue;
13343
13344 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13345 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
13346 ndlp = (struct lpfc_nodelist *)mb->context2;
13347 /* Unregister the RPI when mailbox complete */
13348 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
13349 restart_loop = 1;
13350 spin_unlock_irq(&phba->hbalock);
13351 spin_lock(shost->host_lock);
13352 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
13353 spin_unlock(shost->host_lock);
13354 spin_lock_irq(&phba->hbalock);
13355 break;
13356 }
13357 }
13358 } while (restart_loop);
13359
12885 spin_unlock_irq(&phba->hbalock); 13360 spin_unlock_irq(&phba->hbalock);
12886 13361
12887 /* Release the cleaned-up mailbox commands */ 13362 /* Release the cleaned-up mailbox commands */
12888 while (!list_empty(&mbox_cmd_list)) { 13363 while (!list_empty(&mbox_cmd_list)) {
12889 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 13364 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
12890 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 13365 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12891 if (phba->sli_rev == LPFC_SLI_REV4)
12892 __lpfc_sli4_free_rpi(phba,
12893 mb->u.mb.un.varRegLogin.rpi);
12894 mp = (struct lpfc_dmabuf *) (mb->context1); 13366 mp = (struct lpfc_dmabuf *) (mb->context1);
12895 if (mp) { 13367 if (mp) {
12896 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 13368 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -12948,12 +13420,13 @@ lpfc_drain_txq(struct lpfc_hba *phba)
12948 while (pring->txq_cnt) { 13420 while (pring->txq_cnt) {
12949 spin_lock_irqsave(&phba->hbalock, iflags); 13421 spin_lock_irqsave(&phba->hbalock, iflags);
12950 13422
12951 sglq = __lpfc_sli_get_sglq(phba); 13423 piocbq = lpfc_sli_ringtx_get(phba, pring);
13424 sglq = __lpfc_sli_get_sglq(phba, piocbq);
12952 if (!sglq) { 13425 if (!sglq) {
13426 __lpfc_sli_ringtx_put(phba, pring, piocbq);
12953 spin_unlock_irqrestore(&phba->hbalock, iflags); 13427 spin_unlock_irqrestore(&phba->hbalock, iflags);
12954 break; 13428 break;
12955 } else { 13429 } else {
12956 piocbq = lpfc_sli_ringtx_get(phba, pring);
12957 if (!piocbq) { 13430 if (!piocbq) {
12958 /* The txq_cnt out of sync. This should 13431 /* The txq_cnt out of sync. This should
12959 * never happen 13432 * never happen
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index cd56d6cce6c3..453577c21c14 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -34,9 +34,11 @@ struct lpfc_cq_event {
34 union { 34 union {
35 struct lpfc_mcqe mcqe_cmpl; 35 struct lpfc_mcqe mcqe_cmpl;
36 struct lpfc_acqe_link acqe_link; 36 struct lpfc_acqe_link acqe_link;
37 struct lpfc_acqe_fcoe acqe_fcoe; 37 struct lpfc_acqe_fip acqe_fip;
38 struct lpfc_acqe_dcbx acqe_dcbx; 38 struct lpfc_acqe_dcbx acqe_dcbx;
39 struct lpfc_acqe_grp5 acqe_grp5; 39 struct lpfc_acqe_grp5 acqe_grp5;
40 struct lpfc_acqe_fc_la acqe_fc;
41 struct lpfc_acqe_sli acqe_sli;
40 struct lpfc_rcqe rcqe_cmpl; 42 struct lpfc_rcqe rcqe_cmpl;
41 struct sli4_wcqe_xri_aborted wcqe_axri; 43 struct sli4_wcqe_xri_aborted wcqe_axri;
42 struct lpfc_wcqe_complete wcqe_cmpl; 44 struct lpfc_wcqe_complete wcqe_cmpl;
@@ -82,6 +84,7 @@ struct lpfc_iocbq {
82 struct lpfc_iocbq *rsp_iocb; 84 struct lpfc_iocbq *rsp_iocb;
83 struct lpfcMboxq *mbox; 85 struct lpfcMboxq *mbox;
84 struct lpfc_nodelist *ndlp; 86 struct lpfc_nodelist *ndlp;
87 struct lpfc_node_rrq *rrq;
85 } context_un; 88 } context_un;
86 89
87 void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 90 void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c4483feb8b71..c7217d579e0f 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -137,9 +137,11 @@ struct lpfc_sli4_link {
137 uint8_t speed; 137 uint8_t speed;
138 uint8_t duplex; 138 uint8_t duplex;
139 uint8_t status; 139 uint8_t status;
140 uint8_t physical; 140 uint8_t type;
141 uint8_t number;
141 uint8_t fault; 142 uint8_t fault;
142 uint16_t logical_speed; 143 uint16_t logical_speed;
144 uint16_t topology;
143}; 145};
144 146
145struct lpfc_fcf_rec { 147struct lpfc_fcf_rec {
@@ -367,23 +369,39 @@ struct lpfc_sli4_hba {
367 PCI BAR1, control registers */ 369 PCI BAR1, control registers */
368 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for 370 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
369 PCI BAR2, doorbell registers */ 371 PCI BAR2, doorbell registers */
370 /* BAR0 PCI config space register memory map */ 372 union {
371 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ 373 struct {
372 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ 374 /* IF Type 0, BAR 0 PCI cfg space reg mem map */
373 void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */ 375 void __iomem *UERRLOregaddr;
374 void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */ 376 void __iomem *UERRHIregaddr;
375 void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */ 377 void __iomem *UEMASKLOregaddr;
376 /* BAR1 FCoE function CSR register memory map */ 378 void __iomem *UEMASKHIregaddr;
377 void __iomem *STAregaddr; /* Address to HST_STATE register */ 379 } if_type0;
378 void __iomem *ISRregaddr; /* Address to HST_ISR register */ 380 struct {
379 void __iomem *IMRregaddr; /* Address to HST_IMR register */ 381 /* IF Type 2, BAR 0 PCI cfg space reg mem map. */
380 void __iomem *ISCRregaddr; /* Address to HST_ISCR register */ 382 void __iomem *STATUSregaddr;
381 /* BAR2 VF-0 doorbell register memory map */ 383 void __iomem *CTRLregaddr;
382 void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */ 384 void __iomem *ERR1regaddr;
383 void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */ 385 void __iomem *ERR2regaddr;
384 void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */ 386 } if_type2;
385 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ 387 } u;
386 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ 388
389 /* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
390 void __iomem *PSMPHRregaddr;
391
392 /* Well-known SLI INTF register memory map. */
393 void __iomem *SLIINTFregaddr;
394
395 /* IF type 0, BAR 1 function CSR register memory map */
396 void __iomem *ISRregaddr; /* HST_ISR register */
397 void __iomem *IMRregaddr; /* HST_IMR register */
398 void __iomem *ISCRregaddr; /* HST_ISCR register */
399 /* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
400 void __iomem *RQDBregaddr; /* RQ_DOORBELL register */
401 void __iomem *WQDBregaddr; /* WQ_DOORBELL register */
402 void __iomem *EQCQDBregaddr; /* EQCQ_DOORBELL register */
403 void __iomem *MQDBregaddr; /* MQ_DOORBELL register */
404 void __iomem *BMBXregaddr; /* BootStrap MBX register */
387 405
388 uint32_t ue_mask_lo; 406 uint32_t ue_mask_lo;
389 uint32_t ue_mask_hi; 407 uint32_t ue_mask_hi;
@@ -466,6 +484,7 @@ struct lpfc_sglq {
466 struct list_head clist; 484 struct list_head clist;
467 enum lpfc_sge_type buff_type; /* is this a scsi sgl */ 485 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
468 enum lpfc_sgl_state state; 486 enum lpfc_sgl_state state;
487 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
469 uint16_t iotag; /* pre-assigned IO tag */ 488 uint16_t iotag; /* pre-assigned IO tag */
470 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 489 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
471 struct sli4_sge *sgl; /* pre-assigned SGL */ 490 struct sli4_sge *sgl; /* pre-assigned SGL */
@@ -532,7 +551,6 @@ int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
532struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); 551struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
533void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); 552void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
534int lpfc_sli4_alloc_rpi(struct lpfc_hba *); 553int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
535void __lpfc_sli4_free_rpi(struct lpfc_hba *, int);
536void lpfc_sli4_free_rpi(struct lpfc_hba *, int); 554void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
537void lpfc_sli4_remove_rpis(struct lpfc_hba *); 555void lpfc_sli4_remove_rpis(struct lpfc_hba *);
538void lpfc_sli4_async_event_proc(struct lpfc_hba *); 556void lpfc_sli4_async_event_proc(struct lpfc_hba *);
@@ -548,7 +566,7 @@ int lpfc_sli4_brdreset(struct lpfc_hba *);
548int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); 566int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
549void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); 567void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
550int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); 568int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
551int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); 569int lpfc_sli4_init_vpi(struct lpfc_vport *);
552uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); 570uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
553uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); 571uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
554void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); 572void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 7a1b5b112a0b..386cf92de492 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.18" 21#define LPFC_DRIVER_VERSION "8.3.20"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a5281ce893d0..6b8d2952e32f 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -395,8 +395,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
395 * by the port. 395 * by the port.
396 */ 396 */
397 if ((phba->sli_rev == LPFC_SLI_REV4) && 397 if ((phba->sli_rev == LPFC_SLI_REV4) &&
398 (pport->fc_flag & FC_VFI_REGISTERED)) { 398 (pport->fc_flag & FC_VFI_REGISTERED)) {
399 rc = lpfc_sli4_init_vpi(phba, vpi); 399 rc = lpfc_sli4_init_vpi(vport);
400 if (rc) { 400 if (rc) {
401 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 401 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
402 "1838 Failed to INIT_VPI on vpi %d " 402 "1838 Failed to INIT_VPI on vpi %d "
@@ -418,7 +418,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
418 418
419 if ((phba->link_state < LPFC_LINK_UP) || 419 if ((phba->link_state < LPFC_LINK_UP) ||
420 (pport->port_state < LPFC_FABRIC_CFG_LINK) || 420 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
421 (phba->fc_topology == TOPOLOGY_LOOP)) { 421 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
422 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 422 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
423 rc = VPORT_OK; 423 rc = VPORT_OK;
424 goto out; 424 goto out;
@@ -514,7 +514,7 @@ enable_vport(struct fc_vport *fc_vport)
514 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 514 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
515 515
516 if ((phba->link_state < LPFC_LINK_UP) || 516 if ((phba->link_state < LPFC_LINK_UP) ||
517 (phba->fc_topology == TOPOLOGY_LOOP)) { 517 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
518 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 518 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
519 return VPORT_OK; 519 return VPORT_OK;
520 } 520 }
@@ -665,7 +665,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
665 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 665 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
666 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 666 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
667 phba->link_state >= LPFC_LINK_UP && 667 phba->link_state >= LPFC_LINK_UP &&
668 phba->fc_topology != TOPOLOGY_LOOP) { 668 phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
669 if (vport->cfg_enable_da_id) { 669 if (vport->cfg_enable_da_id) {
670 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 670 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
671 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) 671 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index f469915b97c3..5826ed509e3e 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -1,3 +1,5 @@
1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o 1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o 2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
3obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o 3obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
4megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
5 megaraid_sas_fp.o
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ad16f5e60046..1b5e375732c0 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,15 +1,30 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers
2 * 3 *
3 * Linux MegaRAID driver for SAS based RAID controllers 4 * Copyright (c) 2009-2011 LSI Corporation.
4 * 5 *
5 * Copyright (c) 2003-2005 LSI Corporation. 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
6 * 10 *
7 * This program is free software; you can redistribute it and/or 11 * This program is distributed in the hope that it will be useful,
8 * modify it under the terms of the GNU General Public License 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * as published by the Free Software Foundation; either version 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * 2 of the License, or (at your option) any later version. 14 * GNU General Public License for more details.
11 * 15 *
12 * FILE : megaraid_sas.h 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * FILE: megaraid_sas.h
21 *
22 * Authors: LSI Corporation
23 *
24 * Send feedback to: <megaraidlinux@lsi.com>
25 *
26 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
27 * ATTN: Linuxraid
13 */ 28 */
14 29
15#ifndef LSI_MEGARAID_SAS_H 30#ifndef LSI_MEGARAID_SAS_H
@@ -18,9 +33,9 @@
18/* 33/*
19 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
20 */ 35 */
21#define MEGASAS_VERSION "00.00.04.31-rc1" 36#define MEGASAS_VERSION "00.00.05.29-rc1"
22#define MEGASAS_RELDATE "May 3, 2010" 37#define MEGASAS_RELDATE "Dec. 7, 2010"
23#define MEGASAS_EXT_VERSION "Mon. May 3, 11:41:51 PST 2010" 38#define MEGASAS_EXT_VERSION "Tue. Dec. 7 17:00:00 PDT 2010"
24 39
25/* 40/*
26 * Device IDs 41 * Device IDs
@@ -32,6 +47,7 @@
32#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 47#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079
33#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 48#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
34#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 49#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
50#define PCI_DEVICE_ID_LSI_FUSION 0x005b
35 51
36/* 52/*
37 * ===================================== 53 * =====================================
@@ -421,7 +437,6 @@ struct megasas_ctrl_prop {
421 * Add properties that can be controlled by 437 * Add properties that can be controlled by
422 * a bit in the following structure. 438 * a bit in the following structure.
423 */ 439 */
424
425 struct { 440 struct {
426 u32 copyBackDisabled : 1; 441 u32 copyBackDisabled : 1;
427 u32 SMARTerEnabled : 1; 442 u32 SMARTerEnabled : 1;
@@ -701,6 +716,7 @@ struct megasas_ctrl_info {
701#define MEGASAS_DEFAULT_INIT_ID -1 716#define MEGASAS_DEFAULT_INIT_ID -1
702#define MEGASAS_MAX_LUN 8 717#define MEGASAS_MAX_LUN 8
703#define MEGASAS_MAX_LD 64 718#define MEGASAS_MAX_LD 64
719#define MEGASAS_DEFAULT_CMD_PER_LUN 128
704#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ 720#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
705 MEGASAS_MAX_DEV_PER_CHANNEL) 721 MEGASAS_MAX_DEV_PER_CHANNEL)
706#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ 722#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
@@ -769,7 +785,10 @@ struct megasas_ctrl_info {
769*/ 785*/
770 786
771struct megasas_register_set { 787struct megasas_register_set {
772 u32 reserved_0[4]; /*0000h*/ 788 u32 doorbell; /*0000h*/
789 u32 fusion_seq_offset; /*0004h*/
790 u32 fusion_host_diag; /*0008h*/
791 u32 reserved_01; /*000Ch*/
773 792
774 u32 inbound_msg_0; /*0010h*/ 793 u32 inbound_msg_0; /*0010h*/
775 u32 inbound_msg_1; /*0014h*/ 794 u32 inbound_msg_1; /*0014h*/
@@ -789,15 +808,18 @@ struct megasas_register_set {
789 u32 inbound_queue_port; /*0040h*/ 808 u32 inbound_queue_port; /*0040h*/
790 u32 outbound_queue_port; /*0044h*/ 809 u32 outbound_queue_port; /*0044h*/
791 810
792 u32 reserved_2[22]; /*0048h*/ 811 u32 reserved_2[9]; /*0048h*/
812 u32 reply_post_host_index; /*006Ch*/
813 u32 reserved_2_2[12]; /*0070h*/
793 814
794 u32 outbound_doorbell_clear; /*00A0h*/ 815 u32 outbound_doorbell_clear; /*00A0h*/
795 816
796 u32 reserved_3[3]; /*00A4h*/ 817 u32 reserved_3[3]; /*00A4h*/
797 818
798 u32 outbound_scratch_pad ; /*00B0h*/ 819 u32 outbound_scratch_pad ; /*00B0h*/
820 u32 outbound_scratch_pad_2; /*00B4h*/
799 821
800 u32 reserved_4[3]; /*00B4h*/ 822 u32 reserved_4[2]; /*00B8h*/
801 823
802 u32 inbound_low_queue_port ; /*00C0h*/ 824 u32 inbound_low_queue_port ; /*00C0h*/
803 825
@@ -1272,6 +1294,9 @@ struct megasas_instance {
1272 1294
1273 u16 max_num_sge; 1295 u16 max_num_sge;
1274 u16 max_fw_cmds; 1296 u16 max_fw_cmds;
1297 /* For Fusion its num IOCTL cmds, for others MFI based its
1298 max_fw_cmds */
1299 u16 max_mfi_cmds;
1275 u32 max_sectors_per_req; 1300 u32 max_sectors_per_req;
1276 struct megasas_aen_event *ev; 1301 struct megasas_aen_event *ev;
1277 1302
@@ -1320,6 +1345,16 @@ struct megasas_instance {
1320 1345
1321 struct timer_list io_completion_timer; 1346 struct timer_list io_completion_timer;
1322 struct list_head internal_reset_pending_q; 1347 struct list_head internal_reset_pending_q;
1348
1349 /* Ptr to hba specfic information */
1350 void *ctrl_context;
1351 u8 msi_flag;
1352 struct msix_entry msixentry;
1353 u64 map_id;
1354 struct megasas_cmd *map_update_cmd;
1355 unsigned long bar;
1356 long reset_flags;
1357 struct mutex reset_mutex;
1323}; 1358};
1324 1359
1325enum { 1360enum {
@@ -1345,6 +1380,13 @@ struct megasas_instance_template {
1345 struct megasas_register_set __iomem *); 1380 struct megasas_register_set __iomem *);
1346 int (*check_reset)(struct megasas_instance *, \ 1381 int (*check_reset)(struct megasas_instance *, \
1347 struct megasas_register_set __iomem *); 1382 struct megasas_register_set __iomem *);
1383 irqreturn_t (*service_isr)(int irq, void *devp);
1384 void (*tasklet)(unsigned long);
1385 u32 (*init_adapter)(struct megasas_instance *);
1386 u32 (*build_and_issue_cmd) (struct megasas_instance *,
1387 struct scsi_cmnd *);
1388 void (*issue_dcmd) (struct megasas_instance *instance,
1389 struct megasas_cmd *cmd);
1348}; 1390};
1349 1391
1350#define MEGASAS_IS_LOGICAL(scp) \ 1392#define MEGASAS_IS_LOGICAL(scp) \
@@ -1371,7 +1413,13 @@ struct megasas_cmd {
1371 struct list_head list; 1413 struct list_head list;
1372 struct scsi_cmnd *scmd; 1414 struct scsi_cmnd *scmd;
1373 struct megasas_instance *instance; 1415 struct megasas_instance *instance;
1374 u32 frame_count; 1416 union {
1417 struct {
1418 u16 smid;
1419 u16 resvd;
1420 } context;
1421 u32 frame_count;
1422 };
1375}; 1423};
1376 1424
1377#define MAX_MGMT_ADAPTERS 1024 1425#define MAX_MGMT_ADAPTERS 1024
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7451bc096a01..5d6d07bd1cd0 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1,27 +1,34 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers
2 * 3 *
3 * Linux MegaRAID driver for SAS based RAID controllers 4 * Copyright (c) 2009-2011 LSI Corporation.
4 * 5 *
5 * Copyright (c) 2003-2005 LSI Corporation. 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
6 * 10 *
7 * This program is free software; you can redistribute it and/or 11 * This program is distributed in the hope that it will be useful,
8 * modify it under the terms of the GNU General Public License 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * as published by the Free Software Foundation; either version 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * 2 of the License, or (at your option) any later version. 14 * GNU General Public License for more details.
11 * 15 *
12 * FILE : megaraid_sas.c 16 * You should have received a copy of the GNU General Public License
13 * Version : v00.00.04.31-rc1 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 * 19 *
15 * Authors: 20 * FILE: megaraid_sas_base.c
16 * (email-id : megaraidlinux@lsi.com) 21 * Version : v00.00.05.29-rc1
17 * Sreenivas Bagalkote
18 * Sumant Patro
19 * Bo Yang
20 * 22 *
21 * List of supported controllers 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote
25 * Sumant Patro
26 * Bo Yang
22 * 27 *
23 * OEM Product Name VID DID SSVID SSID 28 * Send feedback to: <megaraidlinux@lsi.com>
24 * --- ------------ --- --- ---- ---- 29 *
30 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
31 * ATTN: Linuxraid
25 */ 32 */
26 33
27#include <linux/kernel.h> 34#include <linux/kernel.h>
@@ -46,6 +53,7 @@
46#include <scsi/scsi_cmnd.h> 53#include <scsi/scsi_cmnd.h>
47#include <scsi/scsi_device.h> 54#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h> 55#include <scsi/scsi_host.h>
56#include "megaraid_sas_fusion.h"
49#include "megaraid_sas.h" 57#include "megaraid_sas.h"
50 58
51/* 59/*
@@ -65,12 +73,16 @@ module_param_named(max_sectors, max_sectors, int, 0);
65MODULE_PARM_DESC(max_sectors, 73MODULE_PARM_DESC(max_sectors,
66 "Maximum number of sectors per IO command"); 74 "Maximum number of sectors per IO command");
67 75
76static int msix_disable;
77module_param(msix_disable, int, S_IRUGO);
78MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
79
68MODULE_LICENSE("GPL"); 80MODULE_LICENSE("GPL");
69MODULE_VERSION(MEGASAS_VERSION); 81MODULE_VERSION(MEGASAS_VERSION);
70MODULE_AUTHOR("megaraidlinux@lsi.com"); 82MODULE_AUTHOR("megaraidlinux@lsi.com");
71MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); 83MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
72 84
73static int megasas_transition_to_ready(struct megasas_instance *instance); 85int megasas_transition_to_ready(struct megasas_instance *instance);
74static int megasas_get_pd_list(struct megasas_instance *instance); 86static int megasas_get_pd_list(struct megasas_instance *instance);
75static int megasas_issue_init_mfi(struct megasas_instance *instance); 87static int megasas_issue_init_mfi(struct megasas_instance *instance);
76static int megasas_register_aen(struct megasas_instance *instance, 88static int megasas_register_aen(struct megasas_instance *instance,
@@ -98,6 +110,8 @@ static struct pci_device_id megasas_pci_table[] = {
98 /* xscale IOP, vega */ 110 /* xscale IOP, vega */
99 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 111 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
100 /* xscale IOP */ 112 /* xscale IOP */
113 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
114 /* Fusion */
101 {} 115 {}
102}; 116};
103 117
@@ -111,23 +125,55 @@ static DEFINE_MUTEX(megasas_async_queue_mutex);
111static int megasas_poll_wait_aen; 125static int megasas_poll_wait_aen;
112static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 126static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
113static u32 support_poll_for_event; 127static u32 support_poll_for_event;
114static u32 megasas_dbg_lvl; 128u32 megasas_dbg_lvl;
115static u32 support_device_change; 129static u32 support_device_change;
116 130
117/* define lock for aen poll */ 131/* define lock for aen poll */
118spinlock_t poll_aen_lock; 132spinlock_t poll_aen_lock;
119 133
120static void 134void
121megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 135megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
122 u8 alt_status); 136 u8 alt_status);
123 137
138static irqreturn_t megasas_isr(int irq, void *devp);
139static u32
140megasas_init_adapter_mfi(struct megasas_instance *instance);
141u32
142megasas_build_and_issue_cmd(struct megasas_instance *instance,
143 struct scsi_cmnd *scmd);
144static void megasas_complete_cmd_dpc(unsigned long instance_addr);
145void
146megasas_release_fusion(struct megasas_instance *instance);
147int
148megasas_ioc_init_fusion(struct megasas_instance *instance);
149void
150megasas_free_cmds_fusion(struct megasas_instance *instance);
151u8
152megasas_get_map_info(struct megasas_instance *instance);
153int
154megasas_sync_map_info(struct megasas_instance *instance);
155int
156wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
157void megasas_reset_reply_desc(struct megasas_instance *instance);
158u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
159 struct LD_LOAD_BALANCE_INFO *lbInfo);
160int megasas_reset_fusion(struct Scsi_Host *shost);
161void megasas_fusion_ocr_wq(struct work_struct *work);
162
163void
164megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
165{
166 instance->instancet->fire_cmd(instance,
167 cmd->frame_phys_addr, 0, instance->reg_set);
168}
169
124/** 170/**
125 * megasas_get_cmd - Get a command from the free pool 171 * megasas_get_cmd - Get a command from the free pool
126 * @instance: Adapter soft state 172 * @instance: Adapter soft state
127 * 173 *
128 * Returns a free command from the pool 174 * Returns a free command from the pool
129 */ 175 */
130static struct megasas_cmd *megasas_get_cmd(struct megasas_instance 176struct megasas_cmd *megasas_get_cmd(struct megasas_instance
131 *instance) 177 *instance)
132{ 178{
133 unsigned long flags; 179 unsigned long flags;
@@ -152,7 +198,7 @@ static struct megasas_cmd *megasas_get_cmd(struct megasas_instance
152 * @instance: Adapter soft state 198 * @instance: Adapter soft state
153 * @cmd: Command packet to be returned to free command pool 199 * @cmd: Command packet to be returned to free command pool
154 */ 200 */
155static inline void 201inline void
156megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 202megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
157{ 203{
158 unsigned long flags; 204 unsigned long flags;
@@ -160,6 +206,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
160 spin_lock_irqsave(&instance->cmd_pool_lock, flags); 206 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
161 207
162 cmd->scmd = NULL; 208 cmd->scmd = NULL;
209 cmd->frame_count = 0;
163 list_add_tail(&cmd->list, &instance->cmd_pool); 210 list_add_tail(&cmd->list, &instance->cmd_pool);
164 211
165 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); 212 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
@@ -167,7 +214,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
167 214
168 215
169/** 216/**
170* The following functions are defined for xscale 217* The following functions are defined for xscale
171* (deviceid : 1064R, PERC5) controllers 218* (deviceid : 1064R, PERC5) controllers
172*/ 219*/
173 220
@@ -210,7 +257,7 @@ megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
210 * megasas_clear_interrupt_xscale - Check & clear interrupt 257 * megasas_clear_interrupt_xscale - Check & clear interrupt
211 * @regs: MFI register set 258 * @regs: MFI register set
212 */ 259 */
213static int 260static int
214megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) 261megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
215{ 262{
216 u32 status; 263 u32 status;
@@ -243,7 +290,7 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
243 * @frame_count : Number of frames for the command 290 * @frame_count : Number of frames for the command
244 * @regs : MFI register set 291 * @regs : MFI register set
245 */ 292 */
246static inline void 293static inline void
247megasas_fire_cmd_xscale(struct megasas_instance *instance, 294megasas_fire_cmd_xscale(struct megasas_instance *instance,
248 dma_addr_t frame_phys_addr, 295 dma_addr_t frame_phys_addr,
249 u32 frame_count, 296 u32 frame_count,
@@ -323,15 +370,20 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
323 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 370 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
324 .adp_reset = megasas_adp_reset_xscale, 371 .adp_reset = megasas_adp_reset_xscale,
325 .check_reset = megasas_check_reset_xscale, 372 .check_reset = megasas_check_reset_xscale,
373 .service_isr = megasas_isr,
374 .tasklet = megasas_complete_cmd_dpc,
375 .init_adapter = megasas_init_adapter_mfi,
376 .build_and_issue_cmd = megasas_build_and_issue_cmd,
377 .issue_dcmd = megasas_issue_dcmd,
326}; 378};
327 379
328/** 380/**
329* This is the end of set of functions & definitions specific 381* This is the end of set of functions & definitions specific
330* to xscale (deviceid : 1064R, PERC5) controllers 382* to xscale (deviceid : 1064R, PERC5) controllers
331*/ 383*/
332 384
333/** 385/**
334* The following functions are defined for ppc (deviceid : 0x60) 386* The following functions are defined for ppc (deviceid : 0x60)
335* controllers 387* controllers
336*/ 388*/
337 389
@@ -343,7 +395,7 @@ static inline void
343megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) 395megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
344{ 396{
345 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 397 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
346 398
347 writel(~0x80000000, &(regs)->outbound_intr_mask); 399 writel(~0x80000000, &(regs)->outbound_intr_mask);
348 400
349 /* Dummy readl to force pci flush */ 401 /* Dummy readl to force pci flush */
@@ -377,7 +429,7 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
377 * megasas_clear_interrupt_ppc - Check & clear interrupt 429 * megasas_clear_interrupt_ppc - Check & clear interrupt
378 * @regs: MFI register set 430 * @regs: MFI register set
379 */ 431 */
380static int 432static int
381megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 433megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
382{ 434{
383 u32 status; 435 u32 status;
@@ -406,7 +458,7 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
406 * @frame_count : Number of frames for the command 458 * @frame_count : Number of frames for the command
407 * @regs : MFI register set 459 * @regs : MFI register set
408 */ 460 */
409static inline void 461static inline void
410megasas_fire_cmd_ppc(struct megasas_instance *instance, 462megasas_fire_cmd_ppc(struct megasas_instance *instance,
411 dma_addr_t frame_phys_addr, 463 dma_addr_t frame_phys_addr,
412 u32 frame_count, 464 u32 frame_count,
@@ -414,7 +466,7 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
414{ 466{
415 unsigned long flags; 467 unsigned long flags;
416 spin_lock_irqsave(&instance->hba_lock, flags); 468 spin_lock_irqsave(&instance->hba_lock, flags);
417 writel((frame_phys_addr | (frame_count<<1))|1, 469 writel((frame_phys_addr | (frame_count<<1))|1,
418 &(regs)->inbound_queue_port); 470 &(regs)->inbound_queue_port);
419 spin_unlock_irqrestore(&instance->hba_lock, flags); 471 spin_unlock_irqrestore(&instance->hba_lock, flags);
420} 472}
@@ -441,7 +493,7 @@ megasas_check_reset_ppc(struct megasas_instance *instance,
441 return 0; 493 return 0;
442} 494}
443static struct megasas_instance_template megasas_instance_template_ppc = { 495static struct megasas_instance_template megasas_instance_template_ppc = {
444 496
445 .fire_cmd = megasas_fire_cmd_ppc, 497 .fire_cmd = megasas_fire_cmd_ppc,
446 .enable_intr = megasas_enable_intr_ppc, 498 .enable_intr = megasas_enable_intr_ppc,
447 .disable_intr = megasas_disable_intr_ppc, 499 .disable_intr = megasas_disable_intr_ppc,
@@ -449,6 +501,11 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
449 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 501 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
450 .adp_reset = megasas_adp_reset_ppc, 502 .adp_reset = megasas_adp_reset_ppc,
451 .check_reset = megasas_check_reset_ppc, 503 .check_reset = megasas_check_reset_ppc,
504 .service_isr = megasas_isr,
505 .tasklet = megasas_complete_cmd_dpc,
506 .init_adapter = megasas_init_adapter_mfi,
507 .build_and_issue_cmd = megasas_build_and_issue_cmd,
508 .issue_dcmd = megasas_issue_dcmd,
452}; 509};
453 510
454/** 511/**
@@ -570,6 +627,11 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
570 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 627 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
571 .adp_reset = megasas_adp_reset_skinny, 628 .adp_reset = megasas_adp_reset_skinny,
572 .check_reset = megasas_check_reset_skinny, 629 .check_reset = megasas_check_reset_skinny,
630 .service_isr = megasas_isr,
631 .tasklet = megasas_complete_cmd_dpc,
632 .init_adapter = megasas_init_adapter_mfi,
633 .build_and_issue_cmd = megasas_build_and_issue_cmd,
634 .issue_dcmd = megasas_issue_dcmd,
573}; 635};
574 636
575 637
@@ -744,6 +806,11 @@ static struct megasas_instance_template megasas_instance_template_gen2 = {
744 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 806 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
745 .adp_reset = megasas_adp_reset_gen2, 807 .adp_reset = megasas_adp_reset_gen2,
746 .check_reset = megasas_check_reset_gen2, 808 .check_reset = megasas_check_reset_gen2,
809 .service_isr = megasas_isr,
810 .tasklet = megasas_complete_cmd_dpc,
811 .init_adapter = megasas_init_adapter_mfi,
812 .build_and_issue_cmd = megasas_build_and_issue_cmd,
813 .issue_dcmd = megasas_issue_dcmd,
747}; 814};
748 815
749/** 816/**
@@ -751,18 +818,21 @@ static struct megasas_instance_template megasas_instance_template_gen2 = {
751* specific to gen2 (deviceid : 0x78, 0x79) controllers 818* specific to gen2 (deviceid : 0x78, 0x79) controllers
752*/ 819*/
753 820
821/*
822 * Template added for TB (Fusion)
823 */
824extern struct megasas_instance_template megasas_instance_template_fusion;
825
754/** 826/**
755 * megasas_issue_polled - Issues a polling command 827 * megasas_issue_polled - Issues a polling command
756 * @instance: Adapter soft state 828 * @instance: Adapter soft state
757 * @cmd: Command packet to be issued 829 * @cmd: Command packet to be issued
758 * 830 *
759 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 831 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
760 */ 832 */
761static int 833int
762megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 834megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
763{ 835{
764 int i;
765 u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
766 836
767 struct megasas_header *frame_hdr = &cmd->frame->hdr; 837 struct megasas_header *frame_hdr = &cmd->frame->hdr;
768 838
@@ -772,21 +842,12 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
772 /* 842 /*
773 * Issue the frame using inbound queue port 843 * Issue the frame using inbound queue port
774 */ 844 */
775 instance->instancet->fire_cmd(instance, 845 instance->instancet->issue_dcmd(instance, cmd);
776 cmd->frame_phys_addr, 0, instance->reg_set);
777 846
778 /* 847 /*
779 * Wait for cmd_status to change 848 * Wait for cmd_status to change
780 */ 849 */
781 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) { 850 return wait_and_poll(instance, cmd);
782 rmb();
783 msleep(1);
784 }
785
786 if (frame_hdr->cmd_status == 0xff)
787 return -ETIME;
788
789 return 0;
790} 851}
791 852
792/** 853/**
@@ -804,8 +865,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
804{ 865{
805 cmd->cmd_status = ENODATA; 866 cmd->cmd_status = ENODATA;
806 867
807 instance->instancet->fire_cmd(instance, 868 instance->instancet->issue_dcmd(instance, cmd);
808 cmd->frame_phys_addr, 0, instance->reg_set);
809 869
810 wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); 870 wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
811 871
@@ -849,8 +909,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
849 cmd->sync_cmd = 1; 909 cmd->sync_cmd = 1;
850 cmd->cmd_status = 0xFF; 910 cmd->cmd_status = 0xFF;
851 911
852 instance->instancet->fire_cmd(instance, 912 instance->instancet->issue_dcmd(instance, cmd);
853 cmd->frame_phys_addr, 0, instance->reg_set);
854 913
855 /* 914 /*
856 * Wait for this cmd to complete 915 * Wait for this cmd to complete
@@ -1242,11 +1301,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1242/** 1301/**
1243 * megasas_is_ldio - Checks if the cmd is for logical drive 1302 * megasas_is_ldio - Checks if the cmd is for logical drive
1244 * @scmd: SCSI command 1303 * @scmd: SCSI command
1245 * 1304 *
1246 * Called by megasas_queue_command to find out if the command to be queued 1305 * Called by megasas_queue_command to find out if the command to be queued
1247 * is a logical drive command 1306 * is a logical drive command
1248 */ 1307 */
1249static inline int megasas_is_ldio(struct scsi_cmnd *cmd) 1308inline int megasas_is_ldio(struct scsi_cmnd *cmd)
1250{ 1309{
1251 if (!MEGASAS_IS_LOGICAL(cmd)) 1310 if (!MEGASAS_IS_LOGICAL(cmd))
1252 return 0; 1311 return 0;
@@ -1328,6 +1387,51 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
1328 printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no); 1387 printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
1329} 1388}
1330 1389
1390u32
1391megasas_build_and_issue_cmd(struct megasas_instance *instance,
1392 struct scsi_cmnd *scmd)
1393{
1394 struct megasas_cmd *cmd;
1395 u32 frame_count;
1396
1397 cmd = megasas_get_cmd(instance);
1398 if (!cmd)
1399 return SCSI_MLQUEUE_HOST_BUSY;
1400
1401 /*
1402 * Logical drive command
1403 */
1404 if (megasas_is_ldio(scmd))
1405 frame_count = megasas_build_ldio(instance, scmd, cmd);
1406 else
1407 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1408
1409 if (!frame_count)
1410 goto out_return_cmd;
1411
1412 cmd->scmd = scmd;
1413 scmd->SCp.ptr = (char *)cmd;
1414
1415 /*
1416 * Issue the command to the FW
1417 */
1418 atomic_inc(&instance->fw_outstanding);
1419
1420 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1421 cmd->frame_count-1, instance->reg_set);
1422 /*
1423 * Check if we have pend cmds to be completed
1424 */
1425 if (poll_mode_io && atomic_read(&instance->fw_outstanding))
1426 tasklet_schedule(&instance->isr_tasklet);
1427
1428 return 0;
1429out_return_cmd:
1430 megasas_return_cmd(instance, cmd);
1431 return 1;
1432}
1433
1434
1331/** 1435/**
1332 * megasas_queue_command - Queue entry point 1436 * megasas_queue_command - Queue entry point
1333 * @scmd: SCSI command to be queued 1437 * @scmd: SCSI command to be queued
@@ -1336,8 +1440,6 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
1336static int 1440static int
1337megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) 1441megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
1338{ 1442{
1339 u32 frame_count;
1340 struct megasas_cmd *cmd;
1341 struct megasas_instance *instance; 1443 struct megasas_instance *instance;
1342 unsigned long flags; 1444 unsigned long flags;
1343 1445
@@ -1376,42 +1478,13 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
1376 break; 1478 break;
1377 } 1479 }
1378 1480
1379 cmd = megasas_get_cmd(instance); 1481 if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
1380 if (!cmd) 1482 printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n");
1381 return SCSI_MLQUEUE_HOST_BUSY; 1483 return SCSI_MLQUEUE_HOST_BUSY;
1382 1484 }
1383 /*
1384 * Logical drive command
1385 */
1386 if (megasas_is_ldio(scmd))
1387 frame_count = megasas_build_ldio(instance, scmd, cmd);
1388 else
1389 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1390
1391 if (!frame_count)
1392 goto out_return_cmd;
1393
1394 cmd->scmd = scmd;
1395 scmd->SCp.ptr = (char *)cmd;
1396
1397 /*
1398 * Issue the command to the FW
1399 */
1400 atomic_inc(&instance->fw_outstanding);
1401
1402 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1403 cmd->frame_count-1, instance->reg_set);
1404 /*
1405 * Check if we have pend cmds to be completed
1406 */
1407 if (poll_mode_io && atomic_read(&instance->fw_outstanding))
1408 tasklet_schedule(&instance->isr_tasklet);
1409
1410 1485
1411 return 0; 1486 return 0;
1412 1487
1413 out_return_cmd:
1414 megasas_return_cmd(instance, cmd);
1415 out_done: 1488 out_done:
1416 done(scmd); 1489 done(scmd);
1417 return 0; 1490 return 0;
@@ -1492,15 +1565,44 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
1492 return 0; 1565 return 0;
1493} 1566}
1494 1567
1495static void megaraid_sas_kill_hba(struct megasas_instance *instance) 1568void megaraid_sas_kill_hba(struct megasas_instance *instance)
1496{ 1569{
1497 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 1570 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1498 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 1571 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
1499 writel(MFI_STOP_ADP, 1572 (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) {
1500 &instance->reg_set->reserved_0[0]); 1573 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
1501 } else { 1574 } else {
1502 writel(MFI_STOP_ADP, 1575 writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
1503 &instance->reg_set->inbound_doorbell); 1576 }
1577}
1578
1579 /**
1580 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
1581 * restored to max value
1582 * @instance: Adapter soft state
1583 *
1584 */
1585void
1586megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
1587{
1588 unsigned long flags;
1589 if (instance->flag & MEGASAS_FW_BUSY
1590 && time_after(jiffies, instance->last_time + 5 * HZ)
1591 && atomic_read(&instance->fw_outstanding) < 17) {
1592
1593 spin_lock_irqsave(instance->host->host_lock, flags);
1594 instance->flag &= ~MEGASAS_FW_BUSY;
1595 if ((instance->pdev->device ==
1596 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1597 (instance->pdev->device ==
1598 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1599 instance->host->can_queue =
1600 instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
1601 } else
1602 instance->host->can_queue =
1603 instance->max_fw_cmds - MEGASAS_INT_CMDS;
1604
1605 spin_unlock_irqrestore(instance->host->host_lock, flags);
1504 } 1606 }
1505} 1607}
1506 1608
@@ -1554,24 +1656,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1554 /* 1656 /*
1555 * Check if we can restore can_queue 1657 * Check if we can restore can_queue
1556 */ 1658 */
1557 if (instance->flag & MEGASAS_FW_BUSY 1659 megasas_check_and_restore_queue_depth(instance);
1558 && time_after(jiffies, instance->last_time + 5 * HZ)
1559 && atomic_read(&instance->fw_outstanding) < 17) {
1560
1561 spin_lock_irqsave(instance->host->host_lock, flags);
1562 instance->flag &= ~MEGASAS_FW_BUSY;
1563 if ((instance->pdev->device ==
1564 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1565 (instance->pdev->device ==
1566 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1567 instance->host->can_queue =
1568 instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
1569 } else
1570 instance->host->can_queue =
1571 instance->max_fw_cmds - MEGASAS_INT_CMDS;
1572
1573 spin_unlock_irqrestore(instance->host->host_lock, flags);
1574 }
1575} 1660}
1576 1661
1577static void 1662static void
@@ -1749,7 +1834,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1749 (instance->pdev->device == 1834 (instance->pdev->device ==
1750 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 1835 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1751 writel(MFI_STOP_ADP, 1836 writel(MFI_STOP_ADP,
1752 &instance->reg_set->reserved_0[0]); 1837 &instance->reg_set->doorbell);
1753 } else { 1838 } else {
1754 writel(MFI_STOP_ADP, 1839 writel(MFI_STOP_ADP,
1755 &instance->reg_set->inbound_doorbell); 1840 &instance->reg_set->inbound_doorbell);
@@ -1853,11 +1938,16 @@ static int megasas_reset_device(struct scsi_cmnd *scmd)
1853static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 1938static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
1854{ 1939{
1855 int ret; 1940 int ret;
1941 struct megasas_instance *instance;
1942 instance = (struct megasas_instance *)scmd->device->host->hostdata;
1856 1943
1857 /* 1944 /*
1858 * First wait for all commands to complete 1945 * First wait for all commands to complete
1859 */ 1946 */
1860 ret = megasas_generic_reset(scmd); 1947 if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
1948 ret = megasas_reset_fusion(scmd->device->host);
1949 else
1950 ret = megasas_generic_reset(scmd);
1861 1951
1862 return ret; 1952 return ret;
1863} 1953}
@@ -2000,8 +2090,8 @@ megasas_complete_int_cmd(struct megasas_instance *instance,
2000 * @instance: Adapter soft state 2090 * @instance: Adapter soft state
2001 * @cmd: Cmd that was issued to abort another cmd 2091 * @cmd: Cmd that was issued to abort another cmd
2002 * 2092 *
2003 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 2093 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
2004 * after it issues an abort on a previously issued command. This function 2094 * after it issues an abort on a previously issued command. This function
2005 * wakes up all functions waiting on the same wait queue. 2095 * wakes up all functions waiting on the same wait queue.
2006 */ 2096 */
2007static void 2097static void
@@ -2021,19 +2111,20 @@ megasas_complete_abort(struct megasas_instance *instance,
2021 * megasas_complete_cmd - Completes a command 2111 * megasas_complete_cmd - Completes a command
2022 * @instance: Adapter soft state 2112 * @instance: Adapter soft state
2023 * @cmd: Command to be completed 2113 * @cmd: Command to be completed
2024 * @alt_status: If non-zero, use this value as status to 2114 * @alt_status: If non-zero, use this value as status to
2025 * SCSI mid-layer instead of the value returned 2115 * SCSI mid-layer instead of the value returned
2026 * by the FW. This should be used if caller wants 2116 * by the FW. This should be used if caller wants
2027 * an alternate status (as in the case of aborted 2117 * an alternate status (as in the case of aborted
2028 * commands) 2118 * commands)
2029 */ 2119 */
2030static void 2120void
2031megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 2121megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2032 u8 alt_status) 2122 u8 alt_status)
2033{ 2123{
2034 int exception = 0; 2124 int exception = 0;
2035 struct megasas_header *hdr = &cmd->frame->hdr; 2125 struct megasas_header *hdr = &cmd->frame->hdr;
2036 unsigned long flags; 2126 unsigned long flags;
2127 struct fusion_context *fusion = instance->ctrl_context;
2037 2128
2038 /* flag for the retry reset */ 2129 /* flag for the retry reset */
2039 cmd->retry_for_fw_reset = 0; 2130 cmd->retry_for_fw_reset = 0;
@@ -2126,6 +2217,37 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2126 case MFI_CMD_SMP: 2217 case MFI_CMD_SMP:
2127 case MFI_CMD_STP: 2218 case MFI_CMD_STP:
2128 case MFI_CMD_DCMD: 2219 case MFI_CMD_DCMD:
2220 /* Check for LD map update */
2221 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
2222 (cmd->frame->dcmd.mbox.b[1] == 1)) {
2223 spin_lock_irqsave(instance->host->host_lock, flags);
2224 if (cmd->frame->hdr.cmd_status != 0) {
2225 if (cmd->frame->hdr.cmd_status !=
2226 MFI_STAT_NOT_FOUND)
2227 printk(KERN_WARNING "megasas: map sync"
2228 "failed, status = 0x%x.\n",
2229 cmd->frame->hdr.cmd_status);
2230 else {
2231 megasas_return_cmd(instance, cmd);
2232 spin_unlock_irqrestore(
2233 instance->host->host_lock,
2234 flags);
2235 break;
2236 }
2237 } else
2238 instance->map_id++;
2239 megasas_return_cmd(instance, cmd);
2240 if (MR_ValidateMapInfo(
2241 fusion->ld_map[(instance->map_id & 1)],
2242 fusion->load_balance_info))
2243 fusion->fast_path_io = 1;
2244 else
2245 fusion->fast_path_io = 0;
2246 megasas_sync_map_info(instance);
2247 spin_unlock_irqrestore(instance->host->host_lock,
2248 flags);
2249 break;
2250 }
2129 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 2251 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
2130 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 2252 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
2131 spin_lock_irqsave(&poll_aen_lock, flags); 2253 spin_lock_irqsave(&poll_aen_lock, flags);
@@ -2464,7 +2586,7 @@ static irqreturn_t megasas_isr(int irq, void *devp)
2464 * states, driver must take steps to bring it to ready state. Otherwise, it 2586 * states, driver must take steps to bring it to ready state. Otherwise, it
2465 * has to wait for the ready state. 2587 * has to wait for the ready state.
2466 */ 2588 */
2467static int 2589int
2468megasas_transition_to_ready(struct megasas_instance* instance) 2590megasas_transition_to_ready(struct megasas_instance* instance)
2469{ 2591{
2470 int i; 2592 int i;
@@ -2476,8 +2598,8 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2476 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2598 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2477 2599
2478 if (fw_state != MFI_STATE_READY) 2600 if (fw_state != MFI_STATE_READY)
2479 printk(KERN_INFO "megasas: Waiting for FW to come to ready" 2601 printk(KERN_INFO "megasas: Waiting for FW to come to ready"
2480 " state\n"); 2602 " state\n");
2481 2603
2482 while (fw_state != MFI_STATE_READY) { 2604 while (fw_state != MFI_STATE_READY) {
2483 2605
@@ -2498,11 +2620,12 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2498 if ((instance->pdev->device == 2620 if ((instance->pdev->device ==
2499 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2621 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2500 (instance->pdev->device == 2622 (instance->pdev->device ==
2501 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 2623 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2502 2624 (instance->pdev->device ==
2625 PCI_DEVICE_ID_LSI_FUSION)) {
2503 writel( 2626 writel(
2504 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 2627 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
2505 &instance->reg_set->reserved_0[0]); 2628 &instance->reg_set->doorbell);
2506 } else { 2629 } else {
2507 writel( 2630 writel(
2508 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 2631 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
@@ -2515,11 +2638,13 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2515 2638
2516 case MFI_STATE_BOOT_MESSAGE_PENDING: 2639 case MFI_STATE_BOOT_MESSAGE_PENDING:
2517 if ((instance->pdev->device == 2640 if ((instance->pdev->device ==
2518 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2641 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2519 (instance->pdev->device == 2642 (instance->pdev->device ==
2520 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 2643 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2644 (instance->pdev->device ==
2645 PCI_DEVICE_ID_LSI_FUSION)) {
2521 writel(MFI_INIT_HOTPLUG, 2646 writel(MFI_INIT_HOTPLUG,
2522 &instance->reg_set->reserved_0[0]); 2647 &instance->reg_set->doorbell);
2523 } else 2648 } else
2524 writel(MFI_INIT_HOTPLUG, 2649 writel(MFI_INIT_HOTPLUG,
2525 &instance->reg_set->inbound_doorbell); 2650 &instance->reg_set->inbound_doorbell);
@@ -2536,9 +2661,23 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2536 if ((instance->pdev->device == 2661 if ((instance->pdev->device ==
2537 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2662 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2538 (instance->pdev->device == 2663 (instance->pdev->device ==
2539 PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 2664 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2665 (instance->pdev->device
2666 == PCI_DEVICE_ID_LSI_FUSION)) {
2540 writel(MFI_RESET_FLAGS, 2667 writel(MFI_RESET_FLAGS,
2541 &instance->reg_set->reserved_0[0]); 2668 &instance->reg_set->doorbell);
2669 if (instance->pdev->device ==
2670 PCI_DEVICE_ID_LSI_FUSION) {
2671 for (i = 0; i < (10 * 1000); i += 20) {
2672 if (readl(
2673 &instance->
2674 reg_set->
2675 doorbell) & 1)
2676 msleep(20);
2677 else
2678 break;
2679 }
2680 }
2542 } else 2681 } else
2543 writel(MFI_RESET_FLAGS, 2682 writel(MFI_RESET_FLAGS,
2544 &instance->reg_set->inbound_doorbell); 2683 &instance->reg_set->inbound_doorbell);
@@ -2590,7 +2729,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2590 * The cur_state should not last for more than max_wait secs 2729 * The cur_state should not last for more than max_wait secs
2591 */ 2730 */
2592 for (i = 0; i < (max_wait * 1000); i++) { 2731 for (i = 0; i < (max_wait * 1000); i++) {
2593 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & 2732 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &
2594 MFI_STATE_MASK ; 2733 MFI_STATE_MASK ;
2595 curr_abs_state = 2734 curr_abs_state =
2596 instance->instancet->read_fw_status_reg(instance->reg_set); 2735 instance->instancet->read_fw_status_reg(instance->reg_set);
@@ -2610,7 +2749,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2610 return -ENODEV; 2749 return -ENODEV;
2611 } 2750 }
2612 } 2751 }
2613 printk(KERN_INFO "megasas: FW now in Ready state\n"); 2752 printk(KERN_INFO "megasas: FW now in Ready state\n");
2614 2753
2615 return 0; 2754 return 0;
2616} 2755}
@@ -2622,7 +2761,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2622static void megasas_teardown_frame_pool(struct megasas_instance *instance) 2761static void megasas_teardown_frame_pool(struct megasas_instance *instance)
2623{ 2762{
2624 int i; 2763 int i;
2625 u32 max_cmd = instance->max_fw_cmds; 2764 u32 max_cmd = instance->max_mfi_cmds;
2626 struct megasas_cmd *cmd; 2765 struct megasas_cmd *cmd;
2627 2766
2628 if (!instance->frame_dma_pool) 2767 if (!instance->frame_dma_pool)
@@ -2673,7 +2812,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
2673 u32 frame_count; 2812 u32 frame_count;
2674 struct megasas_cmd *cmd; 2813 struct megasas_cmd *cmd;
2675 2814
2676 max_cmd = instance->max_fw_cmds; 2815 max_cmd = instance->max_mfi_cmds;
2677 2816
2678 /* 2817 /*
2679 * Size of our frame is 64 bytes for MFI frame, followed by max SG 2818 * Size of our frame is 64 bytes for MFI frame, followed by max SG
@@ -2760,14 +2899,15 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
2760 * megasas_free_cmds - Free all the cmds in the free cmd pool 2899 * megasas_free_cmds - Free all the cmds in the free cmd pool
2761 * @instance: Adapter soft state 2900 * @instance: Adapter soft state
2762 */ 2901 */
2763static void megasas_free_cmds(struct megasas_instance *instance) 2902void megasas_free_cmds(struct megasas_instance *instance)
2764{ 2903{
2765 int i; 2904 int i;
2766 /* First free the MFI frame pool */ 2905 /* First free the MFI frame pool */
2767 megasas_teardown_frame_pool(instance); 2906 megasas_teardown_frame_pool(instance);
2768 2907
2769 /* Free all the commands in the cmd_list */ 2908 /* Free all the commands in the cmd_list */
2770 for (i = 0; i < instance->max_fw_cmds; i++) 2909 for (i = 0; i < instance->max_mfi_cmds; i++)
2910
2771 kfree(instance->cmd_list[i]); 2911 kfree(instance->cmd_list[i]);
2772 2912
2773 /* Free the cmd_list buffer itself */ 2913 /* Free the cmd_list buffer itself */
@@ -2795,14 +2935,14 @@ static void megasas_free_cmds(struct megasas_instance *instance)
2795 * This array is used only to look up the megasas_cmd given the context. The 2935 * This array is used only to look up the megasas_cmd given the context. The
2796 * free commands themselves are maintained in a linked list called cmd_pool. 2936 * free commands themselves are maintained in a linked list called cmd_pool.
2797 */ 2937 */
2798static int megasas_alloc_cmds(struct megasas_instance *instance) 2938int megasas_alloc_cmds(struct megasas_instance *instance)
2799{ 2939{
2800 int i; 2940 int i;
2801 int j; 2941 int j;
2802 u32 max_cmd; 2942 u32 max_cmd;
2803 struct megasas_cmd *cmd; 2943 struct megasas_cmd *cmd;
2804 2944
2805 max_cmd = instance->max_fw_cmds; 2945 max_cmd = instance->max_mfi_cmds;
2806 2946
2807 /* 2947 /*
2808 * instance->cmd_list is an array of struct megasas_cmd pointers. 2948 * instance->cmd_list is an array of struct megasas_cmd pointers.
@@ -2816,6 +2956,7 @@ static int megasas_alloc_cmds(struct megasas_instance *instance)
2816 return -ENOMEM; 2956 return -ENOMEM;
2817 } 2957 }
2818 2958
2959 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
2819 2960
2820 for (i = 0; i < max_cmd; i++) { 2961 for (i = 0; i < max_cmd; i++) {
2821 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 2962 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
@@ -3210,76 +3351,15 @@ megasas_io_completion_timer(unsigned long instance_addr)
3210 jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL); 3351 jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
3211} 3352}
3212 3353
3213/** 3354static u32
3214 * megasas_init_mfi - Initializes the FW 3355megasas_init_adapter_mfi(struct megasas_instance *instance)
3215 * @instance: Adapter soft state
3216 *
3217 * This is the main function for initializing MFI firmware.
3218 */
3219static int megasas_init_mfi(struct megasas_instance *instance)
3220{ 3356{
3357 struct megasas_register_set __iomem *reg_set;
3221 u32 context_sz; 3358 u32 context_sz;
3222 u32 reply_q_sz; 3359 u32 reply_q_sz;
3223 u32 max_sectors_1;
3224 u32 max_sectors_2;
3225 u32 tmp_sectors;
3226 struct megasas_register_set __iomem *reg_set;
3227 struct megasas_ctrl_info *ctrl_info;
3228 /*
3229 * Map the message registers
3230 */
3231 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
3232 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3233 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3234 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) {
3235 instance->base_addr = pci_resource_start(instance->pdev, 1);
3236 } else {
3237 instance->base_addr = pci_resource_start(instance->pdev, 0);
3238 }
3239
3240 if (pci_request_selected_regions(instance->pdev,
3241 pci_select_bars(instance->pdev, IORESOURCE_MEM),
3242 "megasas: LSI")) {
3243 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
3244 return -EBUSY;
3245 }
3246
3247 instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
3248
3249 if (!instance->reg_set) {
3250 printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
3251 goto fail_ioremap;
3252 }
3253 3360
3254 reg_set = instance->reg_set; 3361 reg_set = instance->reg_set;
3255 3362
3256 switch(instance->pdev->device)
3257 {
3258 case PCI_DEVICE_ID_LSI_SAS1078R:
3259 case PCI_DEVICE_ID_LSI_SAS1078DE:
3260 instance->instancet = &megasas_instance_template_ppc;
3261 break;
3262 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
3263 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
3264 instance->instancet = &megasas_instance_template_gen2;
3265 break;
3266 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
3267 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
3268 instance->instancet = &megasas_instance_template_skinny;
3269 break;
3270 case PCI_DEVICE_ID_LSI_SAS1064R:
3271 case PCI_DEVICE_ID_DELL_PERC5:
3272 default:
3273 instance->instancet = &megasas_instance_template_xscale;
3274 break;
3275 }
3276
3277 /*
3278 * We expect the FW state to be READY
3279 */
3280 if (megasas_transition_to_ready(instance))
3281 goto fail_ready_state;
3282
3283 /* 3363 /*
3284 * Get various operational parameters from status register 3364 * Get various operational parameters from status register
3285 */ 3365 */
@@ -3290,7 +3370,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
3290 * does not exceed max cmds that the FW can support 3370 * does not exceed max cmds that the FW can support
3291 */ 3371 */
3292 instance->max_fw_cmds = instance->max_fw_cmds-1; 3372 instance->max_fw_cmds = instance->max_fw_cmds-1;
3293 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 3373 instance->max_mfi_cmds = instance->max_fw_cmds;
3374 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
3294 0x10; 3375 0x10;
3295 /* 3376 /*
3296 * Create a pool of commands 3377 * Create a pool of commands
@@ -3333,6 +3414,90 @@ static int megasas_init_mfi(struct megasas_instance *instance)
3333 if (instance->fw_support_ieee) 3414 if (instance->fw_support_ieee)
3334 instance->flag_ieee = 1; 3415 instance->flag_ieee = 1;
3335 3416
3417 return 0;
3418
3419fail_fw_init:
3420
3421 pci_free_consistent(instance->pdev, reply_q_sz,
3422 instance->reply_queue, instance->reply_queue_h);
3423fail_reply_queue:
3424 megasas_free_cmds(instance);
3425
3426fail_alloc_cmds:
3427 iounmap(instance->reg_set);
3428 return 1;
3429}
3430
3431/**
3432 * megasas_init_fw - Initializes the FW
3433 * @instance: Adapter soft state
3434 *
3435 * This is the main function for initializing firmware
3436 */
3437
3438static int megasas_init_fw(struct megasas_instance *instance)
3439{
3440 u32 max_sectors_1;
3441 u32 max_sectors_2;
3442 u32 tmp_sectors;
3443 struct megasas_register_set __iomem *reg_set;
3444 struct megasas_ctrl_info *ctrl_info;
3445 unsigned long bar_list;
3446
3447 /* Find first memory bar */
3448 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
3449 instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
3450 instance->base_addr = pci_resource_start(instance->pdev, instance->bar);
3451 if (pci_request_selected_regions(instance->pdev, instance->bar,
3452 "megasas: LSI")) {
3453 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
3454 return -EBUSY;
3455 }
3456
3457 instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
3458
3459 if (!instance->reg_set) {
3460 printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
3461 goto fail_ioremap;
3462 }
3463
3464 reg_set = instance->reg_set;
3465
3466 switch (instance->pdev->device) {
3467 case PCI_DEVICE_ID_LSI_FUSION:
3468 instance->instancet = &megasas_instance_template_fusion;
3469 break;
3470 case PCI_DEVICE_ID_LSI_SAS1078R:
3471 case PCI_DEVICE_ID_LSI_SAS1078DE:
3472 instance->instancet = &megasas_instance_template_ppc;
3473 break;
3474 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
3475 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
3476 instance->instancet = &megasas_instance_template_gen2;
3477 break;
3478 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
3479 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
3480 instance->instancet = &megasas_instance_template_skinny;
3481 break;
3482 case PCI_DEVICE_ID_LSI_SAS1064R:
3483 case PCI_DEVICE_ID_DELL_PERC5:
3484 default:
3485 instance->instancet = &megasas_instance_template_xscale;
3486 break;
3487 }
3488
3489 /*
3490 * We expect the FW state to be READY
3491 */
3492 if (megasas_transition_to_ready(instance))
3493 goto fail_ready_state;
3494
3495 /* Get operational params, sge flags, send init cmd to controller */
3496 if (instance->instancet->init_adapter(instance))
3497 return -ENODEV;
3498
3499 printk(KERN_ERR "megasas: INIT adapter done\n");
3500
3336 /** for passthrough 3501 /** for passthrough
3337 * the following function will get the PD LIST. 3502 * the following function will get the PD LIST.
3338 */ 3503 */
@@ -3388,20 +3553,11 @@ static int megasas_init_mfi(struct megasas_instance *instance)
3388 MEGASAS_COMPLETION_TIMER_INTERVAL); 3553 MEGASAS_COMPLETION_TIMER_INTERVAL);
3389 return 0; 3554 return 0;
3390 3555
3391 fail_fw_init: 3556fail_ready_state:
3392
3393 pci_free_consistent(instance->pdev, reply_q_sz,
3394 instance->reply_queue, instance->reply_queue_h);
3395 fail_reply_queue:
3396 megasas_free_cmds(instance);
3397
3398 fail_alloc_cmds:
3399 fail_ready_state:
3400 iounmap(instance->reg_set); 3557 iounmap(instance->reg_set);
3401 3558
3402 fail_ioremap: 3559 fail_ioremap:
3403 pci_release_selected_regions(instance->pdev, 3560 pci_release_selected_regions(instance->pdev, instance->bar);
3404 pci_select_bars(instance->pdev, IORESOURCE_MEM));
3405 3561
3406 return -EINVAL; 3562 return -EINVAL;
3407} 3563}
@@ -3412,17 +3568,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
3412 */ 3568 */
3413static void megasas_release_mfi(struct megasas_instance *instance) 3569static void megasas_release_mfi(struct megasas_instance *instance)
3414{ 3570{
3415 u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1); 3571 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
3416 3572
3417 pci_free_consistent(instance->pdev, reply_q_sz, 3573 if (instance->reply_queue)
3574 pci_free_consistent(instance->pdev, reply_q_sz,
3418 instance->reply_queue, instance->reply_queue_h); 3575 instance->reply_queue, instance->reply_queue_h);
3419 3576
3420 megasas_free_cmds(instance); 3577 megasas_free_cmds(instance);
3421 3578
3422 iounmap(instance->reg_set); 3579 iounmap(instance->reg_set);
3423 3580
3424 pci_release_selected_regions(instance->pdev, 3581 pci_release_selected_regions(instance->pdev, instance->bar);
3425 pci_select_bars(instance->pdev, IORESOURCE_MEM));
3426} 3582}
3427 3583
3428/** 3584/**
@@ -3609,8 +3765,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
3609 /* 3765 /*
3610 * Issue the aen registration frame 3766 * Issue the aen registration frame
3611 */ 3767 */
3612 instance->instancet->fire_cmd(instance, 3768 instance->instancet->issue_dcmd(instance, cmd);
3613 cmd->frame_phys_addr, 0, instance->reg_set);
3614 3769
3615 return 0; 3770 return 0;
3616} 3771}
@@ -3687,12 +3842,18 @@ static int megasas_io_attach(struct megasas_instance *instance)
3687 } 3842 }
3688 3843
3689 host->max_sectors = instance->max_sectors_per_req; 3844 host->max_sectors = instance->max_sectors_per_req;
3690 host->cmd_per_lun = 128; 3845 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
3691 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 3846 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
3692 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 3847 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
3693 host->max_lun = MEGASAS_MAX_LUN; 3848 host->max_lun = MEGASAS_MAX_LUN;
3694 host->max_cmd_len = 16; 3849 host->max_cmd_len = 16;
3695 3850
3851 /* Fusion only supports host reset */
3852 if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) {
3853 host->hostt->eh_device_reset_handler = NULL;
3854 host->hostt->eh_bus_reset_handler = NULL;
3855 }
3856
3696 /* 3857 /*
3697 * Notify the mid-layer about the new controller 3858 * Notify the mid-layer about the new controller
3698 */ 3859 */
@@ -3733,7 +3894,7 @@ fail_set_dma_mask:
3733/** 3894/**
3734 * megasas_probe_one - PCI hotplug entry point 3895 * megasas_probe_one - PCI hotplug entry point
3735 * @pdev: PCI device structure 3896 * @pdev: PCI device structure
3736 * @id: PCI ids of supported hotplugged adapter 3897 * @id: PCI ids of supported hotplugged adapter
3737 */ 3898 */
3738static int __devinit 3899static int __devinit
3739megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 3900megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -3777,20 +3938,45 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3777 instance = (struct megasas_instance *)host->hostdata; 3938 instance = (struct megasas_instance *)host->hostdata;
3778 memset(instance, 0, sizeof(*instance)); 3939 memset(instance, 0, sizeof(*instance));
3779 atomic_set( &instance->fw_reset_no_pci_access, 0 ); 3940 atomic_set( &instance->fw_reset_no_pci_access, 0 );
3941 instance->pdev = pdev;
3780 3942
3781 instance->producer = pci_alloc_consistent(pdev, sizeof(u32), 3943 switch (instance->pdev->device) {
3782 &instance->producer_h); 3944 case PCI_DEVICE_ID_LSI_FUSION:
3783 instance->consumer = pci_alloc_consistent(pdev, sizeof(u32), 3945 {
3784 &instance->consumer_h); 3946 struct fusion_context *fusion;
3947
3948 instance->ctrl_context =
3949 kzalloc(sizeof(struct fusion_context), GFP_KERNEL);
3950 if (!instance->ctrl_context) {
3951 printk(KERN_DEBUG "megasas: Failed to allocate "
3952 "memory for Fusion context info\n");
3953 goto fail_alloc_dma_buf;
3954 }
3955 fusion = instance->ctrl_context;
3956 INIT_LIST_HEAD(&fusion->cmd_pool);
3957 spin_lock_init(&fusion->cmd_pool_lock);
3958 }
3959 break;
3960 default: /* For all other supported controllers */
3961
3962 instance->producer =
3963 pci_alloc_consistent(pdev, sizeof(u32),
3964 &instance->producer_h);
3965 instance->consumer =
3966 pci_alloc_consistent(pdev, sizeof(u32),
3967 &instance->consumer_h);
3968
3969 if (!instance->producer || !instance->consumer) {
3970 printk(KERN_DEBUG "megasas: Failed to allocate"
3971 "memory for producer, consumer\n");
3972 goto fail_alloc_dma_buf;
3973 }
3785 3974
3786 if (!instance->producer || !instance->consumer) { 3975 *instance->producer = 0;
3787 printk(KERN_DEBUG "megasas: Failed to allocate memory for " 3976 *instance->consumer = 0;
3788 "producer, consumer\n"); 3977 break;
3789 goto fail_alloc_dma_buf;
3790 } 3978 }
3791 3979
3792 *instance->producer = 0;
3793 *instance->consumer = 0;
3794 megasas_poll_wait_aen = 0; 3980 megasas_poll_wait_aen = 0;
3795 instance->flag_ieee = 0; 3981 instance->flag_ieee = 0;
3796 instance->ev = NULL; 3982 instance->ev = NULL;
@@ -3826,11 +4012,11 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3826 spin_lock_init(&poll_aen_lock); 4012 spin_lock_init(&poll_aen_lock);
3827 4013
3828 mutex_init(&instance->aen_mutex); 4014 mutex_init(&instance->aen_mutex);
4015 mutex_init(&instance->reset_mutex);
3829 4016
3830 /* 4017 /*
3831 * Initialize PCI related and misc parameters 4018 * Initialize PCI related and misc parameters
3832 */ 4019 */
3833 instance->pdev = pdev;
3834 instance->host = host; 4020 instance->host = host;
3835 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 4021 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
3836 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 4022 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
@@ -3848,18 +4034,31 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3848 instance->last_time = 0; 4034 instance->last_time = 0;
3849 instance->disableOnlineCtrlReset = 1; 4035 instance->disableOnlineCtrlReset = 1;
3850 4036
3851 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 4037 if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
4038 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
4039 else
4040 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
3852 4041
3853 /* 4042 /*
3854 * Initialize MFI Firmware 4043 * Initialize MFI Firmware
3855 */ 4044 */
3856 if (megasas_init_mfi(instance)) 4045 if (megasas_init_fw(instance))
3857 goto fail_init_mfi; 4046 goto fail_init_mfi;
3858 4047
4048 /* Try to enable MSI-X */
4049 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
4050 (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
4051 (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
4052 !msix_disable && !pci_enable_msix(instance->pdev,
4053 &instance->msixentry, 1))
4054 instance->msi_flag = 1;
4055
3859 /* 4056 /*
3860 * Register IRQ 4057 * Register IRQ
3861 */ 4058 */
3862 if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, "megasas", instance)) { 4059 if (request_irq(instance->msi_flag ? instance->msixentry.vector :
4060 pdev->irq, instance->instancet->service_isr,
4061 IRQF_SHARED, "megasas", instance)) {
3863 printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); 4062 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
3864 goto fail_irq; 4063 goto fail_irq;
3865 } 4064 }
@@ -3904,9 +4103,10 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3904 4103
3905 pci_set_drvdata(pdev, NULL); 4104 pci_set_drvdata(pdev, NULL);
3906 instance->instancet->disable_intr(instance->reg_set); 4105 instance->instancet->disable_intr(instance->reg_set);
3907 free_irq(instance->pdev->irq, instance); 4106 free_irq(instance->msi_flag ? instance->msixentry.vector :
3908 4107 instance->pdev->irq, instance);
3909 megasas_release_mfi(instance); 4108 if (instance->msi_flag)
4109 pci_disable_msix(instance->pdev);
3910 4110
3911 fail_irq: 4111 fail_irq:
3912 fail_init_mfi: 4112 fail_init_mfi:
@@ -3916,9 +4116,13 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3916 instance->evt_detail, 4116 instance->evt_detail,
3917 instance->evt_detail_h); 4117 instance->evt_detail_h);
3918 4118
3919 if (instance->producer) 4119 if (instance->producer) {
3920 pci_free_consistent(pdev, sizeof(u32), instance->producer, 4120 pci_free_consistent(pdev, sizeof(u32), instance->producer,
3921 instance->producer_h); 4121 instance->producer_h);
4122 megasas_release_mfi(instance);
4123 } else {
4124 megasas_release_fusion(instance);
4125 }
3922 if (instance->consumer) 4126 if (instance->consumer)
3923 pci_free_consistent(pdev, sizeof(u32), instance->consumer, 4127 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
3924 instance->consumer_h); 4128 instance->consumer_h);
@@ -3990,7 +4194,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
3990 4194
3991 if (instance->aen_cmd) 4195 if (instance->aen_cmd)
3992 megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); 4196 megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
3993 4197 if (instance->map_update_cmd)
4198 megasas_issue_blocked_abort_cmd(instance,
4199 instance->map_update_cmd);
3994 dcmd = &cmd->frame->dcmd; 4200 dcmd = &cmd->frame->dcmd;
3995 4201
3996 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4202 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4046,7 +4252,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4046 4252
4047 pci_set_drvdata(instance->pdev, instance); 4253 pci_set_drvdata(instance->pdev, instance);
4048 instance->instancet->disable_intr(instance->reg_set); 4254 instance->instancet->disable_intr(instance->reg_set);
4049 free_irq(instance->pdev->irq, instance); 4255 free_irq(instance->msi_flag ? instance->msixentry.vector :
4256 instance->pdev->irq, instance);
4257 if (instance->msi_flag)
4258 pci_disable_msix(instance->pdev);
4050 4259
4051 pci_save_state(pdev); 4260 pci_save_state(pdev);
4052 pci_disable_device(pdev); 4261 pci_disable_device(pdev);
@@ -4092,9 +4301,6 @@ megasas_resume(struct pci_dev *pdev)
4092 * Initialize MFI Firmware 4301 * Initialize MFI Firmware
4093 */ 4302 */
4094 4303
4095 *instance->producer = 0;
4096 *instance->consumer = 0;
4097
4098 atomic_set(&instance->fw_outstanding, 0); 4304 atomic_set(&instance->fw_outstanding, 0);
4099 4305
4100 /* 4306 /*
@@ -4103,17 +4309,40 @@ megasas_resume(struct pci_dev *pdev)
4103 if (megasas_transition_to_ready(instance)) 4309 if (megasas_transition_to_ready(instance))
4104 goto fail_ready_state; 4310 goto fail_ready_state;
4105 4311
4106 if (megasas_issue_init_mfi(instance)) 4312 switch (instance->pdev->device) {
4107 goto fail_init_mfi; 4313 case PCI_DEVICE_ID_LSI_FUSION:
4314 {
4315 megasas_reset_reply_desc(instance);
4316 if (megasas_ioc_init_fusion(instance)) {
4317 megasas_free_cmds(instance);
4318 megasas_free_cmds_fusion(instance);
4319 goto fail_init_mfi;
4320 }
4321 if (!megasas_get_map_info(instance))
4322 megasas_sync_map_info(instance);
4323 }
4324 break;
4325 default:
4326 *instance->producer = 0;
4327 *instance->consumer = 0;
4328 if (megasas_issue_init_mfi(instance))
4329 goto fail_init_mfi;
4330 break;
4331 }
4108 4332
4109 tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc, 4333 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
4110 (unsigned long)instance); 4334 (unsigned long)instance);
4335
4336 /* Now re-enable MSI-X */
4337 if (instance->msi_flag)
4338 pci_enable_msix(instance->pdev, &instance->msixentry, 1);
4111 4339
4112 /* 4340 /*
4113 * Register IRQ 4341 * Register IRQ
4114 */ 4342 */
4115 if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, 4343 if (request_irq(instance->msi_flag ? instance->msixentry.vector :
4116 "megasas", instance)) { 4344 pdev->irq, instance->instancet->service_isr,
4345 IRQF_SHARED, "megasas", instance)) {
4117 printk(KERN_ERR "megasas: Failed to register IRQ\n"); 4346 printk(KERN_ERR "megasas: Failed to register IRQ\n");
4118 goto fail_irq; 4347 goto fail_irq;
4119 } 4348 }
@@ -4171,10 +4400,12 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
4171 int i; 4400 int i;
4172 struct Scsi_Host *host; 4401 struct Scsi_Host *host;
4173 struct megasas_instance *instance; 4402 struct megasas_instance *instance;
4403 struct fusion_context *fusion;
4174 4404
4175 instance = pci_get_drvdata(pdev); 4405 instance = pci_get_drvdata(pdev);
4176 instance->unload = 1; 4406 instance->unload = 1;
4177 host = instance->host; 4407 host = instance->host;
4408 fusion = instance->ctrl_context;
4178 4409
4179 if (poll_mode_io) 4410 if (poll_mode_io)
4180 del_timer_sync(&instance->io_completion_timer); 4411 del_timer_sync(&instance->io_completion_timer);
@@ -4211,18 +4442,37 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
4211 4442
4212 instance->instancet->disable_intr(instance->reg_set); 4443 instance->instancet->disable_intr(instance->reg_set);
4213 4444
4214 free_irq(instance->pdev->irq, instance); 4445 free_irq(instance->msi_flag ? instance->msixentry.vector :
4215 4446 instance->pdev->irq, instance);
4216 megasas_release_mfi(instance); 4447 if (instance->msi_flag)
4217 4448 pci_disable_msix(instance->pdev);
4218 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 4449
4219 instance->evt_detail, instance->evt_detail_h); 4450 switch (instance->pdev->device) {
4220 4451 case PCI_DEVICE_ID_LSI_FUSION:
4221 pci_free_consistent(pdev, sizeof(u32), instance->producer, 4452 megasas_release_fusion(instance);
4222 instance->producer_h); 4453 for (i = 0; i < 2 ; i++)
4223 4454 if (fusion->ld_map[i])
4224 pci_free_consistent(pdev, sizeof(u32), instance->consumer, 4455 dma_free_coherent(&instance->pdev->dev,
4225 instance->consumer_h); 4456 fusion->map_sz,
4457 fusion->ld_map[i],
4458 fusion->
4459 ld_map_phys[i]);
4460 kfree(instance->ctrl_context);
4461 break;
4462 default:
4463 megasas_release_mfi(instance);
4464 pci_free_consistent(pdev,
4465 sizeof(struct megasas_evt_detail),
4466 instance->evt_detail,
4467 instance->evt_detail_h);
4468 pci_free_consistent(pdev, sizeof(u32),
4469 instance->producer,
4470 instance->producer_h);
4471 pci_free_consistent(pdev, sizeof(u32),
4472 instance->consumer,
4473 instance->consumer_h);
4474 break;
4475 }
4226 4476
4227 scsi_host_put(host); 4477 scsi_host_put(host);
4228 4478
@@ -4986,6 +5236,7 @@ megasas_aen_polling(struct work_struct *work)
4986 break; 5236 break;
4987 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 5237 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4988 case MR_EVT_FOREIGN_CFG_IMPORTED: 5238 case MR_EVT_FOREIGN_CFG_IMPORTED:
5239 case MR_EVT_LD_STATE_CHANGE:
4989 doscan = 1; 5240 doscan = 1;
4990 break; 5241 break;
4991 default: 5242 default:
@@ -5165,7 +5416,7 @@ err_dcf_attr_ver:
5165 pci_unregister_driver(&megasas_pci_driver); 5416 pci_unregister_driver(&megasas_pci_driver);
5166err_pcidrv: 5417err_pcidrv:
5167 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 5418 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
5168 return rval; 5419 return rval;
5169} 5420}
5170 5421
5171/** 5422/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
new file mode 100644
index 000000000000..53fa96ae2b3e
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -0,0 +1,516 @@
1/*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2009-2011 LSI Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * FILE: megaraid_sas_fp.c
21 *
22 * Authors: LSI Corporation
23 * Sumant Patro
24 * Varad Talamacki
25 * Manoj Jose
26 *
27 * Send feedback to: <megaraidlinux@lsi.com>
28 *
29 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
30 * ATTN: Linuxraid
31 */
32
33#include <linux/kernel.h>
34#include <linux/types.h>
35#include <linux/pci.h>
36#include <linux/list.h>
37#include <linux/moduleparam.h>
38#include <linux/module.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
41#include <linux/delay.h>
42#include <linux/smp_lock.h>
43#include <linux/uio.h>
44#include <linux/uaccess.h>
45#include <linux/fs.h>
46#include <linux/compat.h>
47#include <linux/blkdev.h>
48#include <linux/poll.h>
49
50#include <scsi/scsi.h>
51#include <scsi/scsi_cmnd.h>
52#include <scsi/scsi_device.h>
53#include <scsi/scsi_host.h>
54
55#include "megaraid_sas_fusion.h"
56#include <asm/div64.h>
57
58#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
59#define MR_LD_STATE_OPTIMAL 3
60#define FALSE 0
61#define TRUE 1
62
63/* Prototypes */
64void
65mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
66 struct LD_LOAD_BALANCE_INFO *lbInfo);
67
68u32 mega_mod64(u64 dividend, u32 divisor)
69{
70 u64 d;
71 u32 remainder;
72
73 if (!divisor)
74 printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
75 d = dividend;
76 remainder = do_div(d, divisor);
77 return remainder;
78}
79
80/**
81 * @param dividend : Dividend
82 * @param divisor : Divisor
83 *
84 * @return quotient
85 **/
86u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
87{
88 u32 remainder;
89 u64 d;
90
91 if (!divisor)
92 printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
93
94 d = dividend;
95 remainder = do_div(d, divisor);
96
97 return d;
98}
99
100struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
101{
102 return &map->raidMap.ldSpanMap[ld].ldRaid;
103}
104
105static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
106 struct MR_FW_RAID_MAP_ALL
107 *map)
108{
109 return &map->raidMap.ldSpanMap[ld].spanBlock[0];
110}
111
112static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
113{
114 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
115}
116
117static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
118{
119 return map->raidMap.arMapInfo[ar].pd[arm];
120}
121
122static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
123{
124 return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
125}
126
127static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
128{
129 return map->raidMap.devHndlInfo[pd].curDevHdl;
130}
131
132u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
133{
134 return map->raidMap.ldSpanMap[ld].ldRaid.targetId;
135}
136
137u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
138{
139 return map->raidMap.ldTgtIdToLd[ldTgtId];
140}
141
142static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
143 struct MR_FW_RAID_MAP_ALL *map)
144{
145 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
146}
147
148/*
149 * This function will validate Map info data provided by FW
150 */
151u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
152 struct LD_LOAD_BALANCE_INFO *lbInfo)
153{
154 struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
155
156 if (pFwRaidMap->totalSize !=
157 (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
158 (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) {
159 printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
160 (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
161 sizeof(struct MR_LD_SPAN_MAP)) +
162 (sizeof(struct MR_LD_SPAN_MAP) *
163 pFwRaidMap->ldCount)));
164 printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
165 ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
166 pFwRaidMap->totalSize);
167 return 0;
168 }
169
170 mr_update_load_balance_params(map, lbInfo);
171
172 return 1;
173}
174
175u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
176 struct MR_FW_RAID_MAP_ALL *map, int *div_error)
177{
178 struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
179 struct MR_QUAD_ELEMENT *quad;
180 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
181 u32 span, j;
182
183 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
184
185 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
186 quad = &pSpanBlock->block_span_info.quad[j];
187
188 if (quad->diff == 0) {
189 *div_error = 1;
190 return span;
191 }
192 if (quad->logStart <= row && row <= quad->logEnd &&
193 (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
194 if (span_blk != NULL) {
195 u64 blk, debugBlk;
196 blk =
197 mega_div64_32(
198 (row-quad->logStart),
199 quad->diff);
200 debugBlk = blk;
201
202 blk = (blk + quad->offsetInSpan) <<
203 raid->stripeShift;
204 *span_blk = blk;
205 }
206 return span;
207 }
208 }
209 }
210 return span;
211}
212
213/*
214******************************************************************************
215*
216* This routine calculates the arm, span and block for the specified stripe and
217* reference in stripe.
218*
219* Inputs :
220*
221* ld - Logical drive number
222* stripRow - Stripe number
223* stripRef - Reference in stripe
224*
225* Outputs :
226*
227* span - Span number
228* block - Absolute Block number in the physical disk
229*/
230u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
231 u16 *pDevHandle, struct RAID_CONTEXT *pRAID_Context,
232 struct MR_FW_RAID_MAP_ALL *map)
233{
234 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
235 u32 pd, arRef;
236 u8 physArm, span;
237 u64 row;
238 u8 retval = TRUE;
239 int error_code = 0;
240
241 row = mega_div64_32(stripRow, raid->rowDataSize);
242
243 if (raid->level == 6) {
244 /* logical arm within row */
245 u32 logArm = mega_mod64(stripRow, raid->rowDataSize);
246 u32 rowMod, armQ, arm;
247
248 if (raid->rowSize == 0)
249 return FALSE;
250 /* get logical row mod */
251 rowMod = mega_mod64(row, raid->rowSize);
252 armQ = raid->rowSize-1-rowMod; /* index of Q drive */
253 arm = armQ+1+logArm; /* data always logically follows Q */
254 if (arm >= raid->rowSize) /* handle wrap condition */
255 arm -= raid->rowSize;
256 physArm = (u8)arm;
257 } else {
258 if (raid->modFactor == 0)
259 return FALSE;
260 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow,
261 raid->modFactor),
262 map);
263 }
264
265 if (raid->spanDepth == 1) {
266 span = 0;
267 *pdBlock = row << raid->stripeShift;
268 } else {
269 span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
270 if (error_code == 1)
271 return FALSE;
272 }
273
274 /* Get the array on which this span is present */
275 arRef = MR_LdSpanArrayGet(ld, span, map);
276 pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
277
278 if (pd != MR_PD_INVALID)
279 /* Get dev handle from Pd. */
280 *pDevHandle = MR_PdDevHandleGet(pd, map);
281 else {
282 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
283 if (raid->level >= 5)
284 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
285 else if (raid->level == 1) {
286 /* Get alternate Pd. */
287 pd = MR_ArPdGet(arRef, physArm + 1, map);
288 if (pd != MR_PD_INVALID)
289 /* Get dev handle from Pd */
290 *pDevHandle = MR_PdDevHandleGet(pd, map);
291 }
292 retval = FALSE;
293 }
294
295 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
296 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
297 physArm;
298 return retval;
299}
300
301/*
302******************************************************************************
303*
304* MR_BuildRaidContext function
305*
306* This function will initiate command processing. The start/end row and strip
307* information is calculated then the lock is acquired.
308* This function will return 0 if region lock was acquired OR return num strips
309*/
310u8
311MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
312 struct RAID_CONTEXT *pRAID_Context,
313 struct MR_FW_RAID_MAP_ALL *map)
314{
315 struct MR_LD_RAID *raid;
316 u32 ld, stripSize, stripe_mask;
317 u64 endLba, endStrip, endRow, start_row, start_strip;
318 u64 regStart;
319 u32 regSize;
320 u8 num_strips, numRows;
321 u16 ref_in_start_stripe, ref_in_end_stripe;
322 u64 ldStartBlock;
323 u32 numBlocks, ldTgtId;
324 u8 isRead;
325 u8 retval = 0;
326
327 ldStartBlock = io_info->ldStartBlock;
328 numBlocks = io_info->numBlocks;
329 ldTgtId = io_info->ldTgtId;
330 isRead = io_info->isRead;
331
332 ld = MR_TargetIdToLdGet(ldTgtId, map);
333 raid = MR_LdRaidGet(ld, map);
334
335 stripSize = 1 << raid->stripeShift;
336 stripe_mask = stripSize-1;
337 /*
338 * calculate starting row and stripe, and number of strips and rows
339 */
340 start_strip = ldStartBlock >> raid->stripeShift;
341 ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
342 endLba = ldStartBlock + numBlocks - 1;
343 ref_in_end_stripe = (u16)(endLba & stripe_mask);
344 endStrip = endLba >> raid->stripeShift;
345 num_strips = (u8)(endStrip - start_strip + 1); /* End strip */
346 if (raid->rowDataSize == 0)
347 return FALSE;
348 start_row = mega_div64_32(start_strip, raid->rowDataSize);
349 endRow = mega_div64_32(endStrip, raid->rowDataSize);
350 numRows = (u8)(endRow - start_row + 1);
351
352 /*
353 * calculate region info.
354 */
355
356 /* assume region is at the start of the first row */
357 regStart = start_row << raid->stripeShift;
358 /* assume this IO needs the full row - we'll adjust if not true */
359 regSize = stripSize;
360
361 /* If IO spans more than 1 strip, fp is not possible
362 FP is not possible for writes on non-0 raid levels
363 FP is not possible if LD is not capable */
364 if (num_strips > 1 || (!isRead && raid->level != 0) ||
365 !raid->capability.fpCapable) {
366 io_info->fpOkForIo = FALSE;
367 } else {
368 io_info->fpOkForIo = TRUE;
369 }
370
371 if (numRows == 1) {
372 /* single-strip IOs can always lock only the data needed */
373 if (num_strips == 1) {
374 regStart += ref_in_start_stripe;
375 regSize = numBlocks;
376 }
377 /* multi-strip IOs always need to full stripe locked */
378 } else {
379 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
380 /* If the start strip is the last in the start row */
381 regStart += ref_in_start_stripe;
382 regSize = stripSize - ref_in_start_stripe;
383 /* initialize count to sectors from startref to end
384 of strip */
385 }
386
387 if (numRows > 2)
388 /* Add complete rows in the middle of the transfer */
389 regSize += (numRows-2) << raid->stripeShift;
390
391 /* if IO ends within first strip of last row */
392 if (endStrip == endRow*raid->rowDataSize)
393 regSize += ref_in_end_stripe+1;
394 else
395 regSize += stripSize;
396 }
397
398 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
399 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ :
400 raid->regTypeReqOnWrite;
401 pRAID_Context->VirtualDiskTgtId = raid->targetId;
402 pRAID_Context->regLockRowLBA = regStart;
403 pRAID_Context->regLockLength = regSize;
404 pRAID_Context->configSeqNum = raid->seqNum;
405
406 /*Get Phy Params only if FP capable, or else leave it to MR firmware
407 to do the calculation.*/
408 if (io_info->fpOkForIo) {
409 retval = MR_GetPhyParams(ld, start_strip, ref_in_start_stripe,
410 &io_info->pdBlock,
411 &io_info->devHandle, pRAID_Context,
412 map);
413 /* If IO on an invalid Pd, then FP i snot possible */
414 if (io_info->devHandle == MR_PD_INVALID)
415 io_info->fpOkForIo = FALSE;
416 return retval;
417 } else if (isRead) {
418 uint stripIdx;
419 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
420 if (!MR_GetPhyParams(ld, start_strip + stripIdx,
421 ref_in_start_stripe,
422 &io_info->pdBlock,
423 &io_info->devHandle,
424 pRAID_Context, map))
425 return TRUE;
426 }
427 }
428 return TRUE;
429}
430
431void
432mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
433 struct LD_LOAD_BALANCE_INFO *lbInfo)
434{
435 int ldCount;
436 u16 ld;
437 struct MR_LD_RAID *raid;
438
439 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
440 ld = MR_TargetIdToLdGet(ldCount, map);
441 if (ld >= MAX_LOGICAL_DRIVES) {
442 lbInfo[ldCount].loadBalanceFlag = 0;
443 continue;
444 }
445
446 raid = MR_LdRaidGet(ld, map);
447
448 /* Two drive Optimal RAID 1 */
449 if ((raid->level == 1) && (raid->rowSize == 2) &&
450 (raid->spanDepth == 1) && raid->ldState ==
451 MR_LD_STATE_OPTIMAL) {
452 u32 pd, arRef;
453
454 lbInfo[ldCount].loadBalanceFlag = 1;
455
456 /* Get the array on which this span is present */
457 arRef = MR_LdSpanArrayGet(ld, 0, map);
458
459 /* Get the Pd */
460 pd = MR_ArPdGet(arRef, 0, map);
461 /* Get dev handle from Pd */
462 lbInfo[ldCount].raid1DevHandle[0] =
463 MR_PdDevHandleGet(pd, map);
464 /* Get the Pd */
465 pd = MR_ArPdGet(arRef, 1, map);
466
467 /* Get the dev handle from Pd */
468 lbInfo[ldCount].raid1DevHandle[1] =
469 MR_PdDevHandleGet(pd, map);
470 } else
471 lbInfo[ldCount].loadBalanceFlag = 0;
472 }
473}
474
475u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
476 u32 count)
477{
478 u16 pend0, pend1;
479 u64 diff0, diff1;
480 u8 bestArm;
481
482 /* get the pending cmds for the data and mirror arms */
483 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
484 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
485
486 /* Determine the disk whose head is nearer to the req. block */
487 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
488 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
489 bestArm = (diff0 <= diff1 ? 0 : 1);
490
491 if ((bestArm == arm && pend0 > pend1 + 16) ||
492 (bestArm != arm && pend1 > pend0 + 16))
493 bestArm ^= 1;
494
495 /* Update the last accessed block on the correct pd */
496 lbInfo->last_accessed_block[bestArm] = block + count - 1;
497
498 return bestArm;
499}
500
501u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
502 struct IO_REQUEST_INFO *io_info)
503{
504 u8 arm, old_arm;
505 u16 devHandle;
506
507 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
508
509 /* get best new arm */
510 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
511 io_info->numBlocks);
512 devHandle = lbInfo->raid1DevHandle[arm];
513 atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
514
515 return devHandle;
516}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
new file mode 100644
index 000000000000..c1e09d5a6196
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -0,0 +1,2248 @@
1/*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2009-2011 LSI Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * FILE: megaraid_sas_fusion.c
21 *
22 * Authors: LSI Corporation
23 * Sumant Patro
24 * Adam Radford <linuxraid@lsi.com>
25 *
26 * Send feedback to: <megaraidlinux@lsi.com>
27 *
28 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
29 * ATTN: Linuxraid
30 */
31
32#include <linux/kernel.h>
33#include <linux/types.h>
34#include <linux/pci.h>
35#include <linux/list.h>
36#include <linux/moduleparam.h>
37#include <linux/module.h>
38#include <linux/spinlock.h>
39#include <linux/interrupt.h>
40#include <linux/delay.h>
41#include <linux/smp_lock.h>
42#include <linux/uio.h>
43#include <linux/uaccess.h>
44#include <linux/fs.h>
45#include <linux/compat.h>
46#include <linux/blkdev.h>
47#include <linux/mutex.h>
48#include <linux/poll.h>
49
50#include <scsi/scsi.h>
51#include <scsi/scsi_cmnd.h>
52#include <scsi/scsi_device.h>
53#include <scsi/scsi_host.h>
54
55#include "megaraid_sas_fusion.h"
56#include "megaraid_sas.h"
57
58extern void megasas_free_cmds(struct megasas_instance *instance);
59extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
60 *instance);
61extern void
62megasas_complete_cmd(struct megasas_instance *instance,
63 struct megasas_cmd *cmd, u8 alt_status);
64int megasas_is_ldio(struct scsi_cmnd *cmd);
65int
66wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
67
68void
69megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
70int megasas_alloc_cmds(struct megasas_instance *instance);
71int
72megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
73int
74megasas_issue_polled(struct megasas_instance *instance,
75 struct megasas_cmd *cmd);
76
77u8
78MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
79 struct RAID_CONTEXT *pRAID_Context,
80 struct MR_FW_RAID_MAP_ALL *map);
81u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
82struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
83
84u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
85u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
86 struct LD_LOAD_BALANCE_INFO *lbInfo);
87u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
88 struct IO_REQUEST_INFO *in_info);
89int megasas_transition_to_ready(struct megasas_instance *instance);
90void megaraid_sas_kill_hba(struct megasas_instance *instance);
91
92extern u32 megasas_dbg_lvl;
93
94/**
95 * megasas_enable_intr_fusion - Enables interrupts
96 * @regs: MFI register set
97 */
98void
99megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs)
100{
101 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
102
103 /* Dummy readl to force pci flush */
104 readl(&regs->outbound_intr_mask);
105}
106
107/**
108 * megasas_disable_intr_fusion - Disables interrupt
109 * @regs: MFI register set
110 */
111void
112megasas_disable_intr_fusion(struct megasas_register_set __iomem *regs)
113{
114 u32 mask = 0xFFFFFFFF;
115 u32 status;
116
117 writel(mask, &regs->outbound_intr_mask);
118 /* Dummy readl to force pci flush */
119 status = readl(&regs->outbound_intr_mask);
120}
121
122int
123megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
124{
125 u32 status;
126 /*
127 * Check if it is our interrupt
128 */
129 status = readl(&regs->outbound_intr_status);
130
131 if (status & 1) {
132 writel(status, &regs->outbound_intr_status);
133 readl(&regs->outbound_intr_status);
134 return 1;
135 }
136 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
137 return 0;
138
139 /*
140 * dummy read to flush PCI
141 */
142 readl(&regs->outbound_intr_status);
143
144 return 1;
145}
146
147/**
148 * megasas_get_cmd_fusion - Get a command from the free pool
149 * @instance: Adapter soft state
150 *
151 * Returns a free command from the pool
152 */
153struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
154 *instance)
155{
156 unsigned long flags;
157 struct fusion_context *fusion =
158 (struct fusion_context *)instance->ctrl_context;
159 struct megasas_cmd_fusion *cmd = NULL;
160
161 spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
162
163 if (!list_empty(&fusion->cmd_pool)) {
164 cmd = list_entry((&fusion->cmd_pool)->next,
165 struct megasas_cmd_fusion, list);
166 list_del_init(&cmd->list);
167 } else {
168 printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
169 }
170
171 spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
172 return cmd;
173}
174
175/**
176 * megasas_return_cmd_fusion - Return a cmd to free command pool
177 * @instance: Adapter soft state
178 * @cmd: Command packet to be returned to free command pool
179 */
180static inline void
181megasas_return_cmd_fusion(struct megasas_instance *instance,
182 struct megasas_cmd_fusion *cmd)
183{
184 unsigned long flags;
185 struct fusion_context *fusion =
186 (struct fusion_context *)instance->ctrl_context;
187
188 spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
189
190 cmd->scmd = NULL;
191 cmd->sync_cmd_idx = (u32)ULONG_MAX;
192 list_add_tail(&cmd->list, &fusion->cmd_pool);
193
194 spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
195}
196
197/**
198 * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
199 * @instance: Adapter soft state
200 */
201static void megasas_teardown_frame_pool_fusion(
202 struct megasas_instance *instance)
203{
204 int i;
205 struct fusion_context *fusion = instance->ctrl_context;
206
207 u16 max_cmd = instance->max_fw_cmds;
208
209 struct megasas_cmd_fusion *cmd;
210
211 if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
212 printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, "
213 "sense pool : %p\n", fusion->sg_dma_pool,
214 fusion->sense_dma_pool);
215 return;
216 }
217
218 /*
219 * Return all frames to pool
220 */
221 for (i = 0; i < max_cmd; i++) {
222
223 cmd = fusion->cmd_list[i];
224
225 if (cmd->sg_frame)
226 pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
227 cmd->sg_frame_phys_addr);
228
229 if (cmd->sense)
230 pci_pool_free(fusion->sense_dma_pool, cmd->sense,
231 cmd->sense_phys_addr);
232 }
233
234 /*
235 * Now destroy the pool itself
236 */
237 pci_pool_destroy(fusion->sg_dma_pool);
238 pci_pool_destroy(fusion->sense_dma_pool);
239
240 fusion->sg_dma_pool = NULL;
241 fusion->sense_dma_pool = NULL;
242}
243
244/**
245 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
246 * @instance: Adapter soft state
247 */
248void
249megasas_free_cmds_fusion(struct megasas_instance *instance)
250{
251 int i;
252 struct fusion_context *fusion = instance->ctrl_context;
253
254 u32 max_cmds, req_sz, reply_sz, io_frames_sz;
255
256
257 req_sz = fusion->request_alloc_sz;
258 reply_sz = fusion->reply_alloc_sz;
259 io_frames_sz = fusion->io_frames_alloc_sz;
260
261 max_cmds = instance->max_fw_cmds;
262
263 /* Free descriptors and request Frames memory */
264 if (fusion->req_frames_desc)
265 dma_free_coherent(&instance->pdev->dev, req_sz,
266 fusion->req_frames_desc,
267 fusion->req_frames_desc_phys);
268
269 if (fusion->reply_frames_desc) {
270 pci_pool_free(fusion->reply_frames_desc_pool,
271 fusion->reply_frames_desc,
272 fusion->reply_frames_desc_phys);
273 pci_pool_destroy(fusion->reply_frames_desc_pool);
274 }
275
276 if (fusion->io_request_frames) {
277 pci_pool_free(fusion->io_request_frames_pool,
278 fusion->io_request_frames,
279 fusion->io_request_frames_phys);
280 pci_pool_destroy(fusion->io_request_frames_pool);
281 }
282
283 /* Free the Fusion frame pool */
284 megasas_teardown_frame_pool_fusion(instance);
285
286 /* Free all the commands in the cmd_list */
287 for (i = 0; i < max_cmds; i++)
288 kfree(fusion->cmd_list[i]);
289
290 /* Free the cmd_list buffer itself */
291 kfree(fusion->cmd_list);
292 fusion->cmd_list = NULL;
293
294 INIT_LIST_HEAD(&fusion->cmd_pool);
295}
296
297/**
298 * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
299 * @instance: Adapter soft state
300 *
301 */
302static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
303{
304 int i;
305 u32 max_cmd;
306 struct fusion_context *fusion;
307 struct megasas_cmd_fusion *cmd;
308 u32 total_sz_chain_frame;
309
310 fusion = instance->ctrl_context;
311 max_cmd = instance->max_fw_cmds;
312
313 total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME;
314
315 /*
316 * Use DMA pool facility provided by PCI layer
317 */
318
319 fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion",
320 instance->pdev,
321 total_sz_chain_frame, 4,
322 0);
323 if (!fusion->sg_dma_pool) {
324 printk(KERN_DEBUG "megasas: failed to setup request pool "
325 "fusion\n");
326 return -ENOMEM;
327 }
328 fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
329 instance->pdev,
330 SCSI_SENSE_BUFFERSIZE, 64, 0);
331
332 if (!fusion->sense_dma_pool) {
333 printk(KERN_DEBUG "megasas: failed to setup sense pool "
334 "fusion\n");
335 pci_pool_destroy(fusion->sg_dma_pool);
336 fusion->sg_dma_pool = NULL;
337 return -ENOMEM;
338 }
339
340 /*
341 * Allocate and attach a frame to each of the commands in cmd_list
342 */
343 for (i = 0; i < max_cmd; i++) {
344
345 cmd = fusion->cmd_list[i];
346
347 cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
348 GFP_KERNEL,
349 &cmd->sg_frame_phys_addr);
350
351 cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
352 GFP_KERNEL, &cmd->sense_phys_addr);
353 /*
354 * megasas_teardown_frame_pool_fusion() takes care of freeing
355 * whatever has been allocated
356 */
357 if (!cmd->sg_frame || !cmd->sense) {
358 printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n");
359 megasas_teardown_frame_pool_fusion(instance);
360 return -ENOMEM;
361 }
362 }
363 return 0;
364}
365
366/**
367 * megasas_alloc_cmds_fusion - Allocates the command packets
368 * @instance: Adapter soft state
369 *
370 *
371 * Each frame has a 32-bit field called context. This context is used to get
372 * back the megasas_cmd_fusion from the frame when a frame gets completed
373 * In this driver, the 32 bit values are the indices into an array cmd_list.
374 * This array is used only to look up the megasas_cmd_fusion given the context.
375 * The free commands themselves are maintained in a linked list called cmd_pool.
376 *
377 * cmds are formed in the io_request and sg_frame members of the
378 * megasas_cmd_fusion. The context field is used to get a request descriptor
379 * and is used as SMID of the cmd.
380 * SMID value range is from 1 to max_fw_cmds.
381 */
382int
383megasas_alloc_cmds_fusion(struct megasas_instance *instance)
384{
385 int i, j;
386 u32 max_cmd, io_frames_sz;
387 struct fusion_context *fusion;
388 struct megasas_cmd_fusion *cmd;
389 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
390 u32 offset;
391 dma_addr_t io_req_base_phys;
392 u8 *io_req_base;
393
394 fusion = instance->ctrl_context;
395
396 max_cmd = instance->max_fw_cmds;
397
398 fusion->req_frames_desc =
399 dma_alloc_coherent(&instance->pdev->dev,
400 fusion->request_alloc_sz,
401 &fusion->req_frames_desc_phys, GFP_KERNEL);
402
403 if (!fusion->req_frames_desc) {
404 printk(KERN_ERR "megasas; Could not allocate memory for "
405 "request_frames\n");
406 goto fail_req_desc;
407 }
408
409 fusion->reply_frames_desc_pool =
410 pci_pool_create("reply_frames pool", instance->pdev,
411 fusion->reply_alloc_sz, 16, 0);
412
413 if (!fusion->reply_frames_desc_pool) {
414 printk(KERN_ERR "megasas; Could not allocate memory for "
415 "reply_frame pool\n");
416 goto fail_reply_desc;
417 }
418
419 fusion->reply_frames_desc =
420 pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
421 &fusion->reply_frames_desc_phys);
422 if (!fusion->reply_frames_desc) {
423 printk(KERN_ERR "megasas; Could not allocate memory for "
424 "reply_frame pool\n");
425 pci_pool_destroy(fusion->reply_frames_desc_pool);
426 goto fail_reply_desc;
427 }
428
429 reply_desc = fusion->reply_frames_desc;
430 for (i = 0; i < fusion->reply_q_depth; i++, reply_desc++)
431 reply_desc->Words = ULLONG_MAX;
432
433 io_frames_sz = fusion->io_frames_alloc_sz;
434
435 fusion->io_request_frames_pool =
436 pci_pool_create("io_request_frames pool", instance->pdev,
437 fusion->io_frames_alloc_sz, 16, 0);
438
439 if (!fusion->io_request_frames_pool) {
440 printk(KERN_ERR "megasas: Could not allocate memory for "
441 "io_request_frame pool\n");
442 goto fail_io_frames;
443 }
444
445 fusion->io_request_frames =
446 pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
447 &fusion->io_request_frames_phys);
448 if (!fusion->io_request_frames) {
449 printk(KERN_ERR "megasas: Could not allocate memory for "
450 "io_request_frames frames\n");
451 pci_pool_destroy(fusion->io_request_frames_pool);
452 goto fail_io_frames;
453 }
454
455 /*
456 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
457 * Allocate the dynamic array first and then allocate individual
458 * commands.
459 */
460 fusion->cmd_list = kmalloc(sizeof(struct megasas_cmd_fusion *)
461 *max_cmd, GFP_KERNEL);
462
463 if (!fusion->cmd_list) {
464 printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
465 "memory for cmd_list_fusion\n");
466 goto fail_cmd_list;
467 }
468
469 memset(fusion->cmd_list, 0, sizeof(struct megasas_cmd_fusion *)
470 *max_cmd);
471
472 max_cmd = instance->max_fw_cmds;
473 for (i = 0; i < max_cmd; i++) {
474 fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
475 GFP_KERNEL);
476 if (!fusion->cmd_list[i]) {
477 printk(KERN_ERR "Could not alloc cmd list fusion\n");
478
479 for (j = 0; j < i; j++)
480 kfree(fusion->cmd_list[j]);
481
482 kfree(fusion->cmd_list);
483 fusion->cmd_list = NULL;
484 goto fail_cmd_list;
485 }
486 }
487
488 /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
489 io_req_base = fusion->io_request_frames +
490 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
491 io_req_base_phys = fusion->io_request_frames_phys +
492 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
493
494 /*
495 * Add all the commands to command pool (fusion->cmd_pool)
496 */
497
498 /* SMID 0 is reserved. Set SMID/index from 1 */
499 for (i = 0; i < max_cmd; i++) {
500 cmd = fusion->cmd_list[i];
501 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
502 memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
503 cmd->index = i + 1;
504 cmd->scmd = NULL;
505 cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */
506 cmd->instance = instance;
507 cmd->io_request =
508 (struct MPI2_RAID_SCSI_IO_REQUEST *)
509 (io_req_base + offset);
510 memset(cmd->io_request, 0,
511 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
512 cmd->io_request_phys_addr = io_req_base_phys + offset;
513
514 list_add_tail(&cmd->list, &fusion->cmd_pool);
515 }
516
517 /*
518 * Create a frame pool and assign one frame to each cmd
519 */
520 if (megasas_create_frame_pool_fusion(instance)) {
521 printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
522 megasas_free_cmds_fusion(instance);
523 goto fail_req_desc;
524 }
525
526 return 0;
527
528fail_cmd_list:
529 pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
530 fusion->io_request_frames_phys);
531 pci_pool_destroy(fusion->io_request_frames_pool);
532fail_io_frames:
533 dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
534 fusion->reply_frames_desc,
535 fusion->reply_frames_desc_phys);
536 pci_pool_free(fusion->reply_frames_desc_pool,
537 fusion->reply_frames_desc,
538 fusion->reply_frames_desc_phys);
539 pci_pool_destroy(fusion->reply_frames_desc_pool);
540
541fail_reply_desc:
542 dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
543 fusion->req_frames_desc,
544 fusion->req_frames_desc_phys);
545fail_req_desc:
546 return -ENOMEM;
547}
548
549/**
550 * wait_and_poll - Issues a polling command
551 * @instance: Adapter soft state
552 * @cmd: Command packet to be issued
553 *
554 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
555 */
556int
557wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd)
558{
559 int i;
560 struct megasas_header *frame_hdr = &cmd->frame->hdr;
561
562 u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
563
564 /*
565 * Wait for cmd_status to change
566 */
567 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
568 rmb();
569 msleep(20);
570 }
571
572 if (frame_hdr->cmd_status == 0xff)
573 return -ETIME;
574
575 return 0;
576}
577
578/**
579 * megasas_ioc_init_fusion - Initializes the FW
580 * @instance: Adapter soft state
581 *
582 * Issues the IOC Init cmd
583 */
584int
585megasas_ioc_init_fusion(struct megasas_instance *instance)
586{
587 struct megasas_init_frame *init_frame;
588 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
589 dma_addr_t ioc_init_handle;
590 u32 context;
591 struct megasas_cmd *cmd;
592 u8 ret;
593 struct fusion_context *fusion;
594 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
595 int i;
596 struct megasas_header *frame_hdr;
597
598 fusion = instance->ctrl_context;
599
600 cmd = megasas_get_cmd(instance);
601
602 if (!cmd) {
603 printk(KERN_ERR "Could not allocate cmd for INIT Frame\n");
604 ret = 1;
605 goto fail_get_cmd;
606 }
607
608 IOCInitMessage =
609 dma_alloc_coherent(&instance->pdev->dev,
610 sizeof(struct MPI2_IOC_INIT_REQUEST),
611 &ioc_init_handle, GFP_KERNEL);
612
613 if (!IOCInitMessage) {
614 printk(KERN_ERR "Could not allocate memory for "
615 "IOCInitMessage\n");
616 ret = 1;
617 goto fail_fw_init;
618 }
619
620 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
621
622 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
623 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
624 IOCInitMessage->MsgVersion = MPI2_VERSION;
625 IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION;
626 IOCInitMessage->SystemRequestFrameSize =
627 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
628
629 IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth;
630 IOCInitMessage->ReplyDescriptorPostQueueAddress =
631 fusion->reply_frames_desc_phys;
632 IOCInitMessage->SystemRequestFrameBaseAddress =
633 fusion->io_request_frames_phys;
634
635 init_frame = (struct megasas_init_frame *)cmd->frame;
636 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
637
638 frame_hdr = &cmd->frame->hdr;
639 context = init_frame->context;
640 init_frame->context = context;
641
642 frame_hdr->cmd_status = 0xFF;
643 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
644
645 init_frame->cmd = MFI_CMD_INIT;
646 init_frame->cmd_status = 0xFF;
647
648 init_frame->queue_info_new_phys_addr_lo = ioc_init_handle;
649 init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
650
651 req_desc =
652 (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
653
654 req_desc->Words = cmd->frame_phys_addr;
655 req_desc->MFAIo.RequestFlags =
656 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
657 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
658
659 /*
660 * disable the intr before firing the init frame
661 */
662 instance->instancet->disable_intr(instance->reg_set);
663
664 for (i = 0; i < (10 * 1000); i += 20) {
665 if (readl(&instance->reg_set->doorbell) & 1)
666 msleep(20);
667 else
668 break;
669 }
670
671 instance->instancet->fire_cmd(instance, req_desc->u.low,
672 req_desc->u.high, instance->reg_set);
673
674 wait_and_poll(instance, cmd);
675
676 frame_hdr = &cmd->frame->hdr;
677 if (frame_hdr->cmd_status != 0) {
678 ret = 1;
679 goto fail_fw_init;
680 }
681 printk(KERN_ERR "megasas:IOC Init cmd success\n");
682
683 ret = 0;
684
685fail_fw_init:
686 megasas_return_cmd(instance, cmd);
687 if (IOCInitMessage)
688 dma_free_coherent(&instance->pdev->dev,
689 sizeof(struct MPI2_IOC_INIT_REQUEST),
690 IOCInitMessage, ioc_init_handle);
691fail_get_cmd:
692 return ret;
693}
694
695/*
696 * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
697 * @instance: Adapter soft state
698 *
699 */
700void
701megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
702{
703 struct fusion_context *fusion;
704 struct megasas_cmd_fusion *cmd;
705
706 fusion = instance->ctrl_context;
707 cmd = fusion->cmd_list[smid - 1];
708 megasas_return_cmd_fusion(instance, cmd);
709}
710
711/*
712 * megasas_get_ld_map_info - Returns FW's ld_map structure
713 * @instance: Adapter soft state
714 * @pend: Pend the command or not
715 * Issues an internal command (DCMD) to get the FW's controller PD
716 * list structure. This information is mainly used to find out SYSTEM
717 * supported by the FW.
718 */
719static int
720megasas_get_ld_map_info(struct megasas_instance *instance)
721{
722 int ret = 0;
723 struct megasas_cmd *cmd;
724 struct megasas_dcmd_frame *dcmd;
725 struct MR_FW_RAID_MAP_ALL *ci;
726 dma_addr_t ci_h = 0;
727 u32 size_map_info;
728 struct fusion_context *fusion;
729
730 cmd = megasas_get_cmd(instance);
731
732 if (!cmd) {
733 printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n");
734 return -ENOMEM;
735 }
736
737 fusion = instance->ctrl_context;
738
739 if (!fusion) {
740 megasas_return_cmd(instance, cmd);
741 return 1;
742 }
743
744 dcmd = &cmd->frame->dcmd;
745
746 size_map_info = sizeof(struct MR_FW_RAID_MAP) +
747 (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
748
749 ci = fusion->ld_map[(instance->map_id & 1)];
750 ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
751
752 if (!ci) {
753 printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n");
754 megasas_return_cmd(instance, cmd);
755 return -ENOMEM;
756 }
757
758 memset(ci, 0, sizeof(*ci));
759 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
760
761 dcmd->cmd = MFI_CMD_DCMD;
762 dcmd->cmd_status = 0xFF;
763 dcmd->sge_count = 1;
764 dcmd->flags = MFI_FRAME_DIR_READ;
765 dcmd->timeout = 0;
766 dcmd->pad_0 = 0;
767 dcmd->data_xfer_len = size_map_info;
768 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
769 dcmd->sgl.sge32[0].phys_addr = ci_h;
770 dcmd->sgl.sge32[0].length = size_map_info;
771
772 if (!megasas_issue_polled(instance, cmd))
773 ret = 0;
774 else {
775 printk(KERN_ERR "megasas: Get LD Map Info Failed\n");
776 ret = -1;
777 }
778
779 megasas_return_cmd(instance, cmd);
780
781 return ret;
782}
783
784u8
785megasas_get_map_info(struct megasas_instance *instance)
786{
787 struct fusion_context *fusion = instance->ctrl_context;
788
789 fusion->fast_path_io = 0;
790 if (!megasas_get_ld_map_info(instance)) {
791 if (MR_ValidateMapInfo(fusion->ld_map[(instance->map_id & 1)],
792 fusion->load_balance_info)) {
793 fusion->fast_path_io = 1;
794 return 0;
795 }
796 }
797 return 1;
798}
799
800/*
801 * megasas_sync_map_info - Returns FW's ld_map structure
802 * @instance: Adapter soft state
803 *
804 * Issues an internal command (DCMD) to get the FW's controller PD
805 * list structure. This information is mainly used to find out SYSTEM
806 * supported by the FW.
807 */
808int
809megasas_sync_map_info(struct megasas_instance *instance)
810{
811 int ret = 0, i;
812 struct megasas_cmd *cmd;
813 struct megasas_dcmd_frame *dcmd;
814 u32 size_sync_info, num_lds;
815 struct fusion_context *fusion;
816 struct MR_LD_TARGET_SYNC *ci = NULL;
817 struct MR_FW_RAID_MAP_ALL *map;
818 struct MR_LD_RAID *raid;
819 struct MR_LD_TARGET_SYNC *ld_sync;
820 dma_addr_t ci_h = 0;
821 u32 size_map_info;
822
823 cmd = megasas_get_cmd(instance);
824
825 if (!cmd) {
826 printk(KERN_DEBUG "megasas: Failed to get cmd for sync"
827 "info.\n");
828 return -ENOMEM;
829 }
830
831 fusion = instance->ctrl_context;
832
833 if (!fusion) {
834 megasas_return_cmd(instance, cmd);
835 return 1;
836 }
837
838 map = fusion->ld_map[instance->map_id & 1];
839
840 num_lds = map->raidMap.ldCount;
841
842 dcmd = &cmd->frame->dcmd;
843
844 size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
845
846 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
847
848 ci = (struct MR_LD_TARGET_SYNC *)
849 fusion->ld_map[(instance->map_id - 1) & 1];
850 memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL));
851
852 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
853
854 ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
855
856 for (i = 0; i < num_lds; i++, ld_sync++) {
857 raid = MR_LdRaidGet(i, map);
858 ld_sync->targetId = MR_GetLDTgtId(i, map);
859 ld_sync->seqNum = raid->seqNum;
860 }
861
862 size_map_info = sizeof(struct MR_FW_RAID_MAP) +
863 (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
864
865 dcmd->cmd = MFI_CMD_DCMD;
866 dcmd->cmd_status = 0xFF;
867 dcmd->sge_count = 1;
868 dcmd->flags = MFI_FRAME_DIR_WRITE;
869 dcmd->timeout = 0;
870 dcmd->pad_0 = 0;
871 dcmd->data_xfer_len = size_map_info;
872 dcmd->mbox.b[0] = num_lds;
873 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
874 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
875 dcmd->sgl.sge32[0].phys_addr = ci_h;
876 dcmd->sgl.sge32[0].length = size_map_info;
877
878 instance->map_update_cmd = cmd;
879
880 instance->instancet->issue_dcmd(instance, cmd);
881
882 return ret;
883}
884
885/**
886 * megasas_init_adapter_fusion - Initializes the FW
887 * @instance: Adapter soft state
888 *
889 * This is the main function for initializing firmware.
890 */
891u32
892megasas_init_adapter_fusion(struct megasas_instance *instance)
893{
894 struct megasas_register_set __iomem *reg_set;
895 struct fusion_context *fusion;
896 u32 max_cmd;
897 int i = 0;
898
899 fusion = instance->ctrl_context;
900
901 reg_set = instance->reg_set;
902
903 /*
904 * Get various operational parameters from status register
905 */
906 instance->max_fw_cmds =
907 instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
908 instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
909
910 /*
911 * Reduce the max supported cmds by 1. This is to ensure that the
912 * reply_q_sz (1 more than the max cmd that driver may send)
913 * does not exceed max cmds that the FW can support
914 */
915 instance->max_fw_cmds = instance->max_fw_cmds-1;
916 /* Only internal cmds (DCMD) need to have MFI frames */
917 instance->max_mfi_cmds = MEGASAS_INT_CMDS;
918
919 max_cmd = instance->max_fw_cmds;
920
921 fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16;
922
923 fusion->request_alloc_sz =
924 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
925 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
926 *(fusion->reply_q_depth);
927 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
928 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
929 (max_cmd + 1)); /* Extra 1 for SMID 0 */
930
931 fusion->max_sge_in_main_msg =
932 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
933 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
934
935 fusion->max_sge_in_chain =
936 MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
937
938 instance->max_num_sge = fusion->max_sge_in_main_msg +
939 fusion->max_sge_in_chain - 2;
940
941 /* Used for pass thru MFI frame (DCMD) */
942 fusion->chain_offset_mfi_pthru =
943 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
944
945 fusion->chain_offset_io_request =
946 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
947 sizeof(union MPI2_SGE_IO_UNION))/16;
948
949 fusion->last_reply_idx = 0;
950
951 /*
952 * Allocate memory for descriptors
953 * Create a pool of commands
954 */
955 if (megasas_alloc_cmds(instance))
956 goto fail_alloc_mfi_cmds;
957 if (megasas_alloc_cmds_fusion(instance))
958 goto fail_alloc_cmds;
959
960 if (megasas_ioc_init_fusion(instance))
961 goto fail_ioc_init;
962
963 instance->flag_ieee = 1;
964
965 fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) +
966 (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
967
968 fusion->fast_path_io = 0;
969
970 for (i = 0; i < 2; i++) {
971 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
972 fusion->map_sz,
973 &fusion->ld_map_phys[i],
974 GFP_KERNEL);
975 if (!fusion->ld_map[i]) {
976 printk(KERN_ERR "megasas: Could not allocate memory "
977 "for map info\n");
978 goto fail_map_info;
979 }
980 }
981
982 if (!megasas_get_map_info(instance))
983 megasas_sync_map_info(instance);
984
985 return 0;
986
987fail_alloc_cmds:
988fail_alloc_mfi_cmds:
989fail_map_info:
990 if (i == 1)
991 dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
992 fusion->ld_map[0], fusion->ld_map_phys[0]);
993fail_ioc_init:
994 return 1;
995}
996
997/**
998 * megasas_fire_cmd_fusion - Sends command to the FW
999 * @frame_phys_addr : Physical address of cmd
1000 * @frame_count : Number of frames for the command
1001 * @regs : MFI register set
1002 */
1003void
1004megasas_fire_cmd_fusion(struct megasas_instance *instance,
1005 dma_addr_t req_desc_lo,
1006 u32 req_desc_hi,
1007 struct megasas_register_set __iomem *regs)
1008{
1009 unsigned long flags;
1010
1011 spin_lock_irqsave(&instance->hba_lock, flags);
1012
1013 writel(req_desc_lo,
1014 &(regs)->inbound_low_queue_port);
1015 writel(req_desc_hi, &(regs)->inbound_high_queue_port);
1016 spin_unlock_irqrestore(&instance->hba_lock, flags);
1017}
1018
1019/**
1020 * map_cmd_status - Maps FW cmd status to OS cmd status
1021 * @cmd : Pointer to cmd
1022 * @status : status of cmd returned by FW
1023 * @ext_status : ext status of cmd returned by FW
1024 */
1025
1026void
1027map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
1028{
1029
1030 switch (status) {
1031
1032 case MFI_STAT_OK:
1033 cmd->scmd->result = DID_OK << 16;
1034 break;
1035
1036 case MFI_STAT_SCSI_IO_FAILED:
1037 case MFI_STAT_LD_INIT_IN_PROGRESS:
1038 cmd->scmd->result = (DID_ERROR << 16) | ext_status;
1039 break;
1040
1041 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1042
1043 cmd->scmd->result = (DID_OK << 16) | ext_status;
1044 if (ext_status == SAM_STAT_CHECK_CONDITION) {
1045 memset(cmd->scmd->sense_buffer, 0,
1046 SCSI_SENSE_BUFFERSIZE);
1047 memcpy(cmd->scmd->sense_buffer, cmd->sense,
1048 SCSI_SENSE_BUFFERSIZE);
1049 cmd->scmd->result |= DRIVER_SENSE << 24;
1050 }
1051 break;
1052
1053 case MFI_STAT_LD_OFFLINE:
1054 case MFI_STAT_DEVICE_NOT_FOUND:
1055 cmd->scmd->result = DID_BAD_TARGET << 16;
1056 break;
1057
1058 default:
1059 printk(KERN_DEBUG "megasas: FW status %#x\n", status);
1060 cmd->scmd->result = DID_ERROR << 16;
1061 break;
1062 }
1063}
1064
1065/**
1066 * megasas_make_sgl_fusion - Prepares 32-bit SGL
1067 * @instance: Adapter soft state
1068 * @scp: SCSI command from the mid-layer
1069 * @sgl_ptr: SGL to be filled in
1070 * @cmd: cmd we are working on
1071 *
1072 * If successful, this function returns the number of SG elements.
1073 */
1074static int
1075megasas_make_sgl_fusion(struct megasas_instance *instance,
1076 struct scsi_cmnd *scp,
1077 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1078 struct megasas_cmd_fusion *cmd)
1079{
1080 int i, sg_processed;
1081 int sge_count, sge_idx;
1082 struct scatterlist *os_sgl;
1083 struct fusion_context *fusion;
1084
1085 fusion = instance->ctrl_context;
1086
1087 cmd->io_request->ChainOffset = 0;
1088
1089 sge_count = scsi_dma_map(scp);
1090
1091 BUG_ON(sge_count < 0);
1092
1093 if (sge_count > instance->max_num_sge || !sge_count)
1094 return sge_count;
1095
1096 if (sge_count > fusion->max_sge_in_main_msg) {
1097 /* One element to store the chain info */
1098 sge_idx = fusion->max_sge_in_main_msg - 1;
1099 } else
1100 sge_idx = sge_count;
1101
1102 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1103 sgl_ptr->Length = sg_dma_len(os_sgl);
1104 sgl_ptr->Address = sg_dma_address(os_sgl);
1105 sgl_ptr->Flags = 0;
1106 sgl_ptr++;
1107
1108 sg_processed = i + 1;
1109
1110 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
1111 (sge_count > fusion->max_sge_in_main_msg)) {
1112
1113 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1114 cmd->io_request->ChainOffset =
1115 fusion->chain_offset_io_request;
1116 sg_chain = sgl_ptr;
1117 /* Prepare chain element */
1118 sg_chain->NextChainOffset = 0;
1119 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1120 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1121 sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION)
1122 *(sge_count - sg_processed));
1123 sg_chain->Address = cmd->sg_frame_phys_addr;
1124
1125 sgl_ptr =
1126 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
1127 }
1128 }
1129
1130 return sge_count;
1131}
1132
1133/**
1134 * megasas_set_pd_lba - Sets PD LBA
1135 * @cdb: CDB
1136 * @cdb_len: cdb length
1137 * @start_blk: Start block of IO
1138 *
1139 * Used to set the PD LBA in CDB for FP IOs
1140 */
1141void
1142megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1143 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
1144 struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1145{
1146 struct MR_LD_RAID *raid;
1147 u32 ld;
1148 u64 start_blk = io_info->pdBlock;
1149 u8 *cdb = io_request->CDB.CDB32;
1150 u32 num_blocks = io_info->numBlocks;
1151 u8 opcode, flagvals, groupnum, control;
1152
1153 /* Check if T10 PI (DIF) is enabled for this LD */
1154 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
1155 raid = MR_LdRaidGet(ld, local_map_ptr);
1156 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
1157 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1158 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
1159 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
1160
1161 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1162 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
1163 else
1164 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
1165 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
1166
1167 /* LBA */
1168 cdb[12] = (u8)((start_blk >> 56) & 0xff);
1169 cdb[13] = (u8)((start_blk >> 48) & 0xff);
1170 cdb[14] = (u8)((start_blk >> 40) & 0xff);
1171 cdb[15] = (u8)((start_blk >> 32) & 0xff);
1172 cdb[16] = (u8)((start_blk >> 24) & 0xff);
1173 cdb[17] = (u8)((start_blk >> 16) & 0xff);
1174 cdb[18] = (u8)((start_blk >> 8) & 0xff);
1175 cdb[19] = (u8)(start_blk & 0xff);
1176
1177 /* Logical block reference tag */
1178 io_request->CDB.EEDP32.PrimaryReferenceTag =
1179 cpu_to_be32(ref_tag);
1180 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
1181
1182 io_request->DataLength = num_blocks * 512;
1183 io_request->IoFlags = 32; /* Specify 32-byte cdb */
1184
1185 /* Transfer length */
1186 cdb[28] = (u8)((num_blocks >> 24) & 0xff);
1187 cdb[29] = (u8)((num_blocks >> 16) & 0xff);
1188 cdb[30] = (u8)((num_blocks >> 8) & 0xff);
1189 cdb[31] = (u8)(num_blocks & 0xff);
1190
1191 /* set SCSI IO EEDPFlags */
1192 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
1193 io_request->EEDPFlags =
1194 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1195 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1196 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1197 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1198 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
1199 } else {
1200 io_request->EEDPFlags =
1201 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1202 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
1203 }
1204 io_request->Control |= (0x4 << 26);
1205 io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
1206 } else {
1207 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
1208 if (((cdb_len == 12) || (cdb_len == 16)) &&
1209 (start_blk <= 0xffffffff)) {
1210 if (cdb_len == 16) {
1211 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
1212 flagvals = cdb[1];
1213 groupnum = cdb[14];
1214 control = cdb[15];
1215 } else {
1216 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
1217 flagvals = cdb[1];
1218 groupnum = cdb[10];
1219 control = cdb[11];
1220 }
1221
1222 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1223
1224 cdb[0] = opcode;
1225 cdb[1] = flagvals;
1226 cdb[6] = groupnum;
1227 cdb[9] = control;
1228
1229 /* Transfer length */
1230 cdb[8] = (u8)(num_blocks & 0xff);
1231 cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1232
1233 cdb_len = 10;
1234 }
1235
1236 /* Normal case, just load LBA here */
1237 switch (cdb_len) {
1238 case 6:
1239 {
1240 u8 val = cdb[1] & 0xE0;
1241 cdb[3] = (u8)(start_blk & 0xff);
1242 cdb[2] = (u8)((start_blk >> 8) & 0xff);
1243 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
1244 break;
1245 }
1246 case 10:
1247 cdb[5] = (u8)(start_blk & 0xff);
1248 cdb[4] = (u8)((start_blk >> 8) & 0xff);
1249 cdb[3] = (u8)((start_blk >> 16) & 0xff);
1250 cdb[2] = (u8)((start_blk >> 24) & 0xff);
1251 break;
1252 case 12:
1253 cdb[5] = (u8)(start_blk & 0xff);
1254 cdb[4] = (u8)((start_blk >> 8) & 0xff);
1255 cdb[3] = (u8)((start_blk >> 16) & 0xff);
1256 cdb[2] = (u8)((start_blk >> 24) & 0xff);
1257 break;
1258 case 16:
1259 cdb[9] = (u8)(start_blk & 0xff);
1260 cdb[8] = (u8)((start_blk >> 8) & 0xff);
1261 cdb[7] = (u8)((start_blk >> 16) & 0xff);
1262 cdb[6] = (u8)((start_blk >> 24) & 0xff);
1263 cdb[5] = (u8)((start_blk >> 32) & 0xff);
1264 cdb[4] = (u8)((start_blk >> 40) & 0xff);
1265 cdb[3] = (u8)((start_blk >> 48) & 0xff);
1266 cdb[2] = (u8)((start_blk >> 56) & 0xff);
1267 break;
1268 }
1269 }
1270}
1271
1272/**
1273 * megasas_build_ldio_fusion - Prepares IOs to devices
1274 * @instance: Adapter soft state
1275 * @scp: SCSI command
1276 * @cmd: Command to be prepared
1277 *
1278 * Prepares the io_request and chain elements (sg_frame) for IO
1279 * The IO can be for PD (Fast Path) or LD
1280 */
1281void
1282megasas_build_ldio_fusion(struct megasas_instance *instance,
1283 struct scsi_cmnd *scp,
1284 struct megasas_cmd_fusion *cmd)
1285{
1286 u8 fp_possible;
1287 u32 start_lba_lo, start_lba_hi, device_id;
1288 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1289 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1290 struct IO_REQUEST_INFO io_info;
1291 struct fusion_context *fusion;
1292 struct MR_FW_RAID_MAP_ALL *local_map_ptr;
1293
1294 device_id = MEGASAS_DEV_INDEX(instance, scp);
1295
1296 fusion = instance->ctrl_context;
1297
1298 io_request = cmd->io_request;
1299 io_request->RaidContext.VirtualDiskTgtId = device_id;
1300 io_request->RaidContext.status = 0;
1301 io_request->RaidContext.exStatus = 0;
1302
1303 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
1304
1305 start_lba_lo = 0;
1306 start_lba_hi = 0;
1307 fp_possible = 0;
1308
1309 /*
1310 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1311 */
1312 if (scp->cmd_len == 6) {
1313 io_request->DataLength = (u32) scp->cmnd[4];
1314 start_lba_lo = ((u32) scp->cmnd[1] << 16) |
1315 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
1316
1317 start_lba_lo &= 0x1FFFFF;
1318 }
1319
1320 /*
1321 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1322 */
1323 else if (scp->cmd_len == 10) {
1324 io_request->DataLength = (u32) scp->cmnd[8] |
1325 ((u32) scp->cmnd[7] << 8);
1326 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1327 ((u32) scp->cmnd[3] << 16) |
1328 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1329 }
1330
1331 /*
1332 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1333 */
1334 else if (scp->cmd_len == 12) {
1335 io_request->DataLength = ((u32) scp->cmnd[6] << 24) |
1336 ((u32) scp->cmnd[7] << 16) |
1337 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1338 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1339 ((u32) scp->cmnd[3] << 16) |
1340 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1341 }
1342
1343 /*
1344 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1345 */
1346 else if (scp->cmd_len == 16) {
1347 io_request->DataLength = ((u32) scp->cmnd[10] << 24) |
1348 ((u32) scp->cmnd[11] << 16) |
1349 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
1350 start_lba_lo = ((u32) scp->cmnd[6] << 24) |
1351 ((u32) scp->cmnd[7] << 16) |
1352 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1353
1354 start_lba_hi = ((u32) scp->cmnd[2] << 24) |
1355 ((u32) scp->cmnd[3] << 16) |
1356 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1357 }
1358
1359 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1360 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1361 io_info.numBlocks = io_request->DataLength;
1362 io_info.ldTgtId = device_id;
1363
1364 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1365 io_info.isRead = 1;
1366
1367 local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
1368
1369 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
1370 MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) {
1371 io_request->RaidContext.regLockFlags = 0;
1372 fp_possible = 0;
1373 } else {
1374 if (MR_BuildRaidContext(&io_info, &io_request->RaidContext,
1375 local_map_ptr))
1376 fp_possible = io_info.fpOkForIo;
1377 }
1378
1379 if (fp_possible) {
1380 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1381 local_map_ptr, start_lba_lo);
1382 io_request->DataLength = scsi_bufflen(scp);
1383 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1384 cmd->request_desc->SCSIIO.RequestFlags =
1385 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
1386 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1387 if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
1388 (io_info.isRead)) {
1389 io_info.devHandle =
1390 get_updated_dev_handle(
1391 &fusion->load_balance_info[device_id],
1392 &io_info);
1393 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
1394 } else
1395 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1396 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1397 io_request->DevHandle = io_info.devHandle;
1398 } else {
1399 io_request->RaidContext.timeoutValue =
1400 local_map_ptr->raidMap.fpPdIoTimeoutSec;
1401 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1402 io_request->DevHandle = device_id;
1403 cmd->request_desc->SCSIIO.RequestFlags =
1404 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1405 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1406 } /* Not FP */
1407}
1408
1409/**
1410 * megasas_build_dcdb_fusion - Prepares IOs to devices
1411 * @instance: Adapter soft state
1412 * @scp: SCSI command
1413 * @cmd: Command to be prepared
1414 *
1415 * Prepares the io_request frame for non-io cmds
1416 */
1417static void
1418megasas_build_dcdb_fusion(struct megasas_instance *instance,
1419 struct scsi_cmnd *scmd,
1420 struct megasas_cmd_fusion *cmd)
1421{
1422 u32 device_id;
1423 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1424 u16 pd_index = 0;
1425 struct MR_FW_RAID_MAP_ALL *local_map_ptr;
1426 struct fusion_context *fusion = instance->ctrl_context;
1427
1428 io_request = cmd->io_request;
1429 device_id = MEGASAS_DEV_INDEX(instance, scmd);
1430 pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
1431 +scmd->device->id;
1432 local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
1433
1434 /* Check if this is a system PD I/O */
1435 if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) &&
1436 (instance->pd_list[pd_index].driveType == TYPE_DISK)) {
1437 io_request->Function = 0;
1438 io_request->DevHandle =
1439 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1440 io_request->RaidContext.timeoutValue =
1441 local_map_ptr->raidMap.fpPdIoTimeoutSec;
1442 io_request->RaidContext.regLockFlags = 0;
1443 io_request->RaidContext.regLockRowLBA = 0;
1444 io_request->RaidContext.regLockLength = 0;
1445 io_request->RaidContext.RAIDFlags =
1446 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1447 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1448 cmd->request_desc->SCSIIO.RequestFlags =
1449 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1450 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1451 } else {
1452 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1453 io_request->DevHandle = device_id;
1454 cmd->request_desc->SCSIIO.RequestFlags =
1455 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1456 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1457 }
1458 io_request->RaidContext.VirtualDiskTgtId = device_id;
1459 io_request->LUN[0] = scmd->device->lun;
1460 io_request->DataLength = scsi_bufflen(scmd);
1461}
1462
1463/**
1464 * megasas_build_io_fusion - Prepares IOs to devices
1465 * @instance: Adapter soft state
1466 * @scp: SCSI command
1467 * @cmd: Command to be prepared
1468 *
1469 * Invokes helper functions to prepare request frames
1470 * and sets flags appropriate for IO/Non-IO cmd
1471 */
1472int
1473megasas_build_io_fusion(struct megasas_instance *instance,
1474 struct scsi_cmnd *scp,
1475 struct megasas_cmd_fusion *cmd)
1476{
1477 u32 device_id, sge_count;
1478 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
1479
1480 device_id = MEGASAS_DEV_INDEX(instance, scp);
1481
1482 /* Zero out some fields so they don't get reused */
1483 io_request->LUN[0] = 0;
1484 io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
1485 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
1486 io_request->EEDPFlags = 0;
1487 io_request->Control = 0;
1488 io_request->EEDPBlockSize = 0;
1489 io_request->IoFlags = 0;
1490 io_request->RaidContext.RAIDFlags = 0;
1491
1492 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
1493 /*
1494 * Just the CDB length,rest of the Flags are zero
1495 * This will be modified for FP in build_ldio_fusion
1496 */
1497 io_request->IoFlags = scp->cmd_len;
1498
1499 if (megasas_is_ldio(scp))
1500 megasas_build_ldio_fusion(instance, scp, cmd);
1501 else
1502 megasas_build_dcdb_fusion(instance, scp, cmd);
1503
1504 /*
1505 * Construct SGL
1506 */
1507
1508 sge_count =
1509 megasas_make_sgl_fusion(instance, scp,
1510 (struct MPI25_IEEE_SGE_CHAIN64 *)
1511 &io_request->SGL, cmd);
1512
1513 if (sge_count > instance->max_num_sge) {
1514 printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds "
1515 "max (0x%x) allowed\n", sge_count,
1516 instance->max_num_sge);
1517 return 1;
1518 }
1519
1520 io_request->RaidContext.numSGE = sge_count;
1521
1522 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
1523
1524 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1525 io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
1526 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1527 io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
1528
1529 io_request->SGLOffset0 =
1530 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
1531
1532 io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
1533 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
1534
1535 cmd->scmd = scp;
1536 scp->SCp.ptr = (char *)cmd;
1537
1538 return 0;
1539}
1540
1541union MEGASAS_REQUEST_DESCRIPTOR_UNION *
1542megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
1543{
1544 u8 *p;
1545 struct fusion_context *fusion;
1546
1547 if (index >= instance->max_fw_cmds) {
1548 printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
1549 "descriptor\n", index);
1550 return NULL;
1551 }
1552 fusion = instance->ctrl_context;
1553 p = fusion->req_frames_desc
1554 +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
1555
1556 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
1557}
1558
1559/**
1560 * megasas_build_and_issue_cmd_fusion -Main routine for building and
1561 * issuing non IOCTL cmd
1562 * @instance: Adapter soft state
1563 * @scmd: pointer to scsi cmd from OS
1564 */
1565static u32
1566megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
1567 struct scsi_cmnd *scmd)
1568{
1569 struct megasas_cmd_fusion *cmd;
1570 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1571 u32 index;
1572 struct fusion_context *fusion;
1573
1574 fusion = instance->ctrl_context;
1575
1576 cmd = megasas_get_cmd_fusion(instance);
1577 if (!cmd)
1578 return SCSI_MLQUEUE_HOST_BUSY;
1579
1580 index = cmd->index;
1581
1582 req_desc = megasas_get_request_descriptor(instance, index-1);
1583 if (!req_desc)
1584 return 1;
1585
1586 req_desc->Words = 0;
1587 cmd->request_desc = req_desc;
1588 cmd->request_desc->Words = 0;
1589
1590 if (megasas_build_io_fusion(instance, scmd, cmd)) {
1591 megasas_return_cmd_fusion(instance, cmd);
1592 printk(KERN_ERR "megasas: Error building command.\n");
1593 cmd->request_desc = NULL;
1594 return 1;
1595 }
1596
1597 req_desc = cmd->request_desc;
1598 req_desc->SCSIIO.SMID = index;
1599
1600 if (cmd->io_request->ChainOffset != 0 &&
1601 cmd->io_request->ChainOffset != 0xF)
1602 printk(KERN_ERR "megasas: The chain offset value is not "
1603 "correct : %x\n", cmd->io_request->ChainOffset);
1604
1605 /*
1606 * Issue the command to the FW
1607 */
1608 atomic_inc(&instance->fw_outstanding);
1609
1610 instance->instancet->fire_cmd(instance,
1611 req_desc->u.low, req_desc->u.high,
1612 instance->reg_set);
1613
1614 return 0;
1615}
1616
1617/**
1618 * complete_cmd_fusion - Completes command
1619 * @instance: Adapter soft state
1620 * Completes all commands that is in reply descriptor queue
1621 */
1622int
1623complete_cmd_fusion(struct megasas_instance *instance)
1624{
1625 union MPI2_REPLY_DESCRIPTORS_UNION *desc;
1626 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1627 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
1628 struct fusion_context *fusion;
1629 struct megasas_cmd *cmd_mfi;
1630 struct megasas_cmd_fusion *cmd_fusion;
1631 u16 smid, num_completed;
1632 u8 reply_descript_type, arm;
1633 u32 status, extStatus, device_id;
1634 union desc_value d_val;
1635 struct LD_LOAD_BALANCE_INFO *lbinfo;
1636
1637 fusion = instance->ctrl_context;
1638
1639 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
1640 return IRQ_HANDLED;
1641
1642 desc = fusion->reply_frames_desc;
1643 desc += fusion->last_reply_idx;
1644
1645 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1646
1647 d_val.word = desc->Words;
1648
1649 reply_descript_type = reply_desc->ReplyFlags &
1650 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1651
1652 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1653 return IRQ_NONE;
1654
1655 d_val.word = desc->Words;
1656
1657 num_completed = 0;
1658
1659 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
1660 smid = reply_desc->SMID;
1661
1662 cmd_fusion = fusion->cmd_list[smid - 1];
1663
1664 scsi_io_req =
1665 (struct MPI2_RAID_SCSI_IO_REQUEST *)
1666 cmd_fusion->io_request;
1667
1668 if (cmd_fusion->scmd)
1669 cmd_fusion->scmd->SCp.ptr = NULL;
1670
1671 status = scsi_io_req->RaidContext.status;
1672 extStatus = scsi_io_req->RaidContext.exStatus;
1673
1674 switch (scsi_io_req->Function) {
1675 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
1676 /* Update load balancing info */
1677 device_id = MEGASAS_DEV_INDEX(instance,
1678 cmd_fusion->scmd);
1679 lbinfo = &fusion->load_balance_info[device_id];
1680 if (cmd_fusion->scmd->SCp.Status &
1681 MEGASAS_LOAD_BALANCE_FLAG) {
1682 arm = lbinfo->raid1DevHandle[0] ==
1683 cmd_fusion->io_request->DevHandle ? 0 :
1684 1;
1685 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1686 cmd_fusion->scmd->SCp.Status &=
1687 ~MEGASAS_LOAD_BALANCE_FLAG;
1688 }
1689 if (reply_descript_type ==
1690 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
1691 if (megasas_dbg_lvl == 5)
1692 printk(KERN_ERR "\nmegasas: FAST Path "
1693 "IO Success\n");
1694 }
1695 /* Fall thru and complete IO */
1696 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
1697 /* Map the FW Cmd Status */
1698 map_cmd_status(cmd_fusion, status, extStatus);
1699 scsi_dma_unmap(cmd_fusion->scmd);
1700 cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
1701 scsi_io_req->RaidContext.status = 0;
1702 scsi_io_req->RaidContext.exStatus = 0;
1703 megasas_return_cmd_fusion(instance, cmd_fusion);
1704 atomic_dec(&instance->fw_outstanding);
1705
1706 break;
1707 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1708 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1709 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
1710 cmd_fusion->flags = 0;
1711 megasas_return_cmd_fusion(instance, cmd_fusion);
1712
1713 break;
1714 }
1715
1716 fusion->last_reply_idx++;
1717 if (fusion->last_reply_idx >= fusion->reply_q_depth)
1718 fusion->last_reply_idx = 0;
1719
1720 desc->Words = ULLONG_MAX;
1721 num_completed++;
1722
1723 /* Get the next reply descriptor */
1724 if (!fusion->last_reply_idx)
1725 desc = fusion->reply_frames_desc;
1726 else
1727 desc++;
1728
1729 reply_desc =
1730 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1731
1732 d_val.word = desc->Words;
1733
1734 reply_descript_type = reply_desc->ReplyFlags &
1735 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1736
1737 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1738 break;
1739 }
1740
1741 if (!num_completed)
1742 return IRQ_NONE;
1743
1744 wmb();
1745 writel(fusion->last_reply_idx,
1746 &instance->reg_set->reply_post_host_index);
1747
1748 return IRQ_HANDLED;
1749}
1750
1751/**
1752 * megasas_complete_cmd_dpc_fusion - Completes command
1753 * @instance: Adapter soft state
1754 *
1755 * Tasklet to complete cmds
1756 */
1757void
1758megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
1759{
1760 struct megasas_instance *instance =
1761 (struct megasas_instance *)instance_addr;
1762 unsigned long flags;
1763
1764 /* If we have already declared adapter dead, donot complete cmds */
1765 spin_lock_irqsave(&instance->hba_lock, flags);
1766 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
1767 spin_unlock_irqrestore(&instance->hba_lock, flags);
1768 return;
1769 }
1770 spin_unlock_irqrestore(&instance->hba_lock, flags);
1771
1772 spin_lock_irqsave(&instance->completion_lock, flags);
1773 complete_cmd_fusion(instance);
1774 spin_unlock_irqrestore(&instance->completion_lock, flags);
1775}
1776
1777/**
1778 * megasas_isr_fusion - isr entry point
1779 */
1780irqreturn_t megasas_isr_fusion(int irq, void *devp)
1781{
1782 struct megasas_instance *instance = (struct megasas_instance *)devp;
1783 u32 mfiStatus, fw_state;
1784
1785 if (!instance->msi_flag) {
1786 mfiStatus = instance->instancet->clear_intr(instance->reg_set);
1787 if (!mfiStatus)
1788 return IRQ_NONE;
1789 }
1790
1791 /* If we are resetting, bail */
1792 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
1793 return IRQ_HANDLED;
1794
1795 if (!complete_cmd_fusion(instance)) {
1796 /* If we didn't complete any commands, check for FW fault */
1797 fw_state = instance->instancet->read_fw_status_reg(
1798 instance->reg_set) & MFI_STATE_MASK;
1799 if (fw_state == MFI_STATE_FAULT)
1800 schedule_work(&instance->work_init);
1801 }
1802
1803 return IRQ_HANDLED;
1804}
1805
1806/**
1807 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
1808 * @instance: Adapter soft state
1809 * mfi_cmd: megasas_cmd pointer
1810 *
1811 */
1812u8
1813build_mpt_mfi_pass_thru(struct megasas_instance *instance,
1814 struct megasas_cmd *mfi_cmd)
1815{
1816 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
1817 struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
1818 struct megasas_cmd_fusion *cmd;
1819 struct fusion_context *fusion;
1820 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
1821
1822 cmd = megasas_get_cmd_fusion(instance);
1823 if (!cmd)
1824 return 1;
1825
1826 /* Save the smid. To be used for returning the cmd */
1827 mfi_cmd->context.smid = cmd->index;
1828
1829 cmd->sync_cmd_idx = mfi_cmd->index;
1830
1831 /*
1832 * For cmds where the flag is set, store the flag and check
1833 * on completion. For cmds with this flag, don't call
1834 * megasas_complete_cmd
1835 */
1836
1837 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
1838 cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1839
1840 fusion = instance->ctrl_context;
1841 io_req = cmd->io_request;
1842 mpi25_ieee_chain =
1843 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
1844
1845 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
1846 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
1847 SGL) / 4;
1848 io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
1849
1850 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
1851
1852 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1853 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
1854
1855 mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME;
1856
1857 return 0;
1858}
1859
1860/**
1861 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
1862 * @instance: Adapter soft state
1863 * @cmd: mfi cmd to build
1864 *
1865 */
1866union MEGASAS_REQUEST_DESCRIPTOR_UNION *
1867build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
1868{
1869 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1870 u16 index;
1871
1872 if (build_mpt_mfi_pass_thru(instance, cmd)) {
1873 printk(KERN_ERR "Couldn't build MFI pass thru cmd\n");
1874 return NULL;
1875 }
1876
1877 index = cmd->context.smid;
1878
1879 req_desc = megasas_get_request_descriptor(instance, index - 1);
1880
1881 if (!req_desc)
1882 return NULL;
1883
1884 req_desc->Words = 0;
1885 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1886 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1887
1888 req_desc->SCSIIO.SMID = index;
1889
1890 return req_desc;
1891}
1892
1893/**
1894 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
1895 * @instance: Adapter soft state
1896 * @cmd: mfi cmd pointer
1897 *
1898 */
1899void
1900megasas_issue_dcmd_fusion(struct megasas_instance *instance,
1901 struct megasas_cmd *cmd)
1902{
1903 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1904 union desc_value d_val;
1905
1906 req_desc = build_mpt_cmd(instance, cmd);
1907 if (!req_desc) {
1908 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
1909 return;
1910 }
1911 d_val.word = req_desc->Words;
1912
1913 instance->instancet->fire_cmd(instance, req_desc->u.low,
1914 req_desc->u.high, instance->reg_set);
1915}
1916
1917/**
1918 * megasas_release_fusion - Reverses the FW initialization
1919 * @intance: Adapter soft state
1920 */
1921void
1922megasas_release_fusion(struct megasas_instance *instance)
1923{
1924 megasas_free_cmds(instance);
1925 megasas_free_cmds_fusion(instance);
1926
1927 iounmap(instance->reg_set);
1928
1929 pci_release_selected_regions(instance->pdev, instance->bar);
1930}
1931
1932/**
1933 * megasas_read_fw_status_reg_fusion - returns the current FW status value
1934 * @regs: MFI register set
1935 */
1936static u32
1937megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
1938{
1939 return readl(&(regs)->outbound_scratch_pad);
1940}
1941
1942/**
1943 * megasas_adp_reset_fusion - For controller reset
1944 * @regs: MFI register set
1945 */
1946static int
1947megasas_adp_reset_fusion(struct megasas_instance *instance,
1948 struct megasas_register_set __iomem *regs)
1949{
1950 return 0;
1951}
1952
1953/**
1954 * megasas_check_reset_fusion - For controller reset check
1955 * @regs: MFI register set
1956 */
1957static int
1958megasas_check_reset_fusion(struct megasas_instance *instance,
1959 struct megasas_register_set __iomem *regs)
1960{
1961 return 0;
1962}
1963
1964/* This function waits for outstanding commands on fusion to complete */
1965int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
1966{
1967 int i, outstanding, retval = 0;
1968 u32 fw_state, wait_time = MEGASAS_RESET_WAIT_TIME;
1969
1970 for (i = 0; i < wait_time; i++) {
1971 /* Check if firmware is in fault state */
1972 fw_state = instance->instancet->read_fw_status_reg(
1973 instance->reg_set) & MFI_STATE_MASK;
1974 if (fw_state == MFI_STATE_FAULT) {
1975 printk(KERN_WARNING "megasas: Found FW in FAULT state,"
1976 " will reset adapter.\n");
1977 retval = 1;
1978 goto out;
1979 }
1980
1981 outstanding = atomic_read(&instance->fw_outstanding);
1982 if (!outstanding)
1983 goto out;
1984
1985 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
1986 printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
1987 "commands to complete\n", i, outstanding);
1988 megasas_complete_cmd_dpc_fusion(
1989 (unsigned long)instance);
1990 }
1991 msleep(1000);
1992 }
1993
1994 if (atomic_read(&instance->fw_outstanding)) {
1995 printk("megaraid_sas: pending commands remain after waiting, "
1996 "will reset adapter.\n");
1997 retval = 1;
1998 }
1999out:
2000 return retval;
2001}
2002
2003void megasas_reset_reply_desc(struct megasas_instance *instance)
2004{
2005 int i;
2006 struct fusion_context *fusion;
2007 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2008
2009 fusion = instance->ctrl_context;
2010 fusion->last_reply_idx = 0;
2011 reply_desc = fusion->reply_frames_desc;
2012 for (i = 0 ; i < fusion->reply_q_depth; i++, reply_desc++)
2013 reply_desc->Words = ULLONG_MAX;
2014}
2015
2016/* Core fusion reset function */
2017int megasas_reset_fusion(struct Scsi_Host *shost)
2018{
2019 int retval = SUCCESS, i, j, retry = 0;
2020 struct megasas_instance *instance;
2021 struct megasas_cmd_fusion *cmd_fusion;
2022 struct fusion_context *fusion;
2023 struct megasas_cmd *cmd_mfi;
2024 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2025 u32 host_diag, abs_state;
2026
2027 instance = (struct megasas_instance *)shost->hostdata;
2028 fusion = instance->ctrl_context;
2029
2030 mutex_lock(&instance->reset_mutex);
2031 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2032 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2033 instance->instancet->disable_intr(instance->reg_set);
2034 msleep(1000);
2035
2036 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2037 printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
2038 "returning FAILED.\n");
2039 retval = FAILED;
2040 goto out;
2041 }
2042
2043 /* First try waiting for commands to complete */
2044 if (megasas_wait_for_outstanding_fusion(instance)) {
2045 printk(KERN_WARNING "megaraid_sas: resetting fusion "
2046 "adapter.\n");
2047 /* Now return commands back to the OS */
2048 for (i = 0 ; i < instance->max_fw_cmds; i++) {
2049 cmd_fusion = fusion->cmd_list[i];
2050 if (cmd_fusion->scmd) {
2051 scsi_dma_unmap(cmd_fusion->scmd);
2052 cmd_fusion->scmd->result = (DID_RESET << 16);
2053 cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
2054 megasas_return_cmd_fusion(instance, cmd_fusion);
2055 atomic_dec(&instance->fw_outstanding);
2056 }
2057 }
2058
2059 if (instance->disableOnlineCtrlReset == 1) {
2060 /* Reset not supported, kill adapter */
2061 printk(KERN_WARNING "megaraid_sas: Reset not supported"
2062 ", killing adapter.\n");
2063 megaraid_sas_kill_hba(instance);
2064 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
2065 retval = FAILED;
2066 goto out;
2067 }
2068
2069 /* Now try to reset the chip */
2070 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
2071 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
2072 &instance->reg_set->fusion_seq_offset);
2073 writel(MPI2_WRSEQ_1ST_KEY_VALUE,
2074 &instance->reg_set->fusion_seq_offset);
2075 writel(MPI2_WRSEQ_2ND_KEY_VALUE,
2076 &instance->reg_set->fusion_seq_offset);
2077 writel(MPI2_WRSEQ_3RD_KEY_VALUE,
2078 &instance->reg_set->fusion_seq_offset);
2079 writel(MPI2_WRSEQ_4TH_KEY_VALUE,
2080 &instance->reg_set->fusion_seq_offset);
2081 writel(MPI2_WRSEQ_5TH_KEY_VALUE,
2082 &instance->reg_set->fusion_seq_offset);
2083 writel(MPI2_WRSEQ_6TH_KEY_VALUE,
2084 &instance->reg_set->fusion_seq_offset);
2085
2086 /* Check that the diag write enable (DRWE) bit is on */
2087 host_diag = readl(&instance->reg_set->fusion_host_diag);
2088 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2089 msleep(100);
2090 host_diag =
2091 readl(&instance->reg_set->fusion_host_diag);
2092 if (retry++ == 100) {
2093 printk(KERN_WARNING "megaraid_sas: "
2094 "Host diag unlock failed!\n");
2095 break;
2096 }
2097 }
2098 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2099 continue;
2100
2101 /* Send chip reset command */
2102 writel(host_diag | HOST_DIAG_RESET_ADAPTER,
2103 &instance->reg_set->fusion_host_diag);
2104 msleep(3000);
2105
2106 /* Make sure reset adapter bit is cleared */
2107 host_diag = readl(&instance->reg_set->fusion_host_diag);
2108 retry = 0;
2109 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2110 msleep(100);
2111 host_diag =
2112 readl(&instance->reg_set->fusion_host_diag);
2113 if (retry++ == 1000) {
2114 printk(KERN_WARNING "megaraid_sas: "
2115 "Diag reset adapter never "
2116 "cleared!\n");
2117 break;
2118 }
2119 }
2120 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2121 continue;
2122
2123 abs_state =
2124 instance->instancet->read_fw_status_reg(
2125 instance->reg_set);
2126 retry = 0;
2127
2128 while ((abs_state <= MFI_STATE_FW_INIT) &&
2129 (retry++ < 1000)) {
2130 msleep(100);
2131 abs_state =
2132 instance->instancet->read_fw_status_reg(
2133 instance->reg_set);
2134 }
2135 if (abs_state <= MFI_STATE_FW_INIT) {
2136 printk(KERN_WARNING "megaraid_sas: firmware "
2137 "state < MFI_STATE_FW_INIT, state = "
2138 "0x%x\n", abs_state);
2139 continue;
2140 }
2141
2142 /* Wait for FW to become ready */
2143 if (megasas_transition_to_ready(instance)) {
2144 printk(KERN_WARNING "megaraid_sas: Failed to "
2145 "transition controller to ready.\n");
2146 continue;
2147 }
2148
2149 megasas_reset_reply_desc(instance);
2150 if (megasas_ioc_init_fusion(instance)) {
2151 printk(KERN_WARNING "megaraid_sas: "
2152 "megasas_ioc_init_fusion() failed!\n");
2153 continue;
2154 }
2155
2156 instance->instancet->enable_intr(instance->reg_set);
2157 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2158
2159 /* Re-fire management commands */
2160 for (j = 0 ; j < instance->max_fw_cmds; j++) {
2161 cmd_fusion = fusion->cmd_list[j];
2162 if (cmd_fusion->sync_cmd_idx !=
2163 (u32)ULONG_MAX) {
2164 cmd_mfi =
2165 instance->
2166 cmd_list[cmd_fusion->sync_cmd_idx];
2167 if (cmd_mfi->frame->dcmd.opcode ==
2168 MR_DCMD_LD_MAP_GET_INFO) {
2169 megasas_return_cmd(instance,
2170 cmd_mfi);
2171 megasas_return_cmd_fusion(
2172 instance, cmd_fusion);
2173 } else {
2174 req_desc =
2175 megasas_get_request_descriptor(
2176 instance,
2177 cmd_mfi->context.smid
2178 -1);
2179 if (!req_desc)
2180 printk(KERN_WARNING
2181 "req_desc NULL"
2182 "\n");
2183 else {
2184 instance->instancet->
2185 fire_cmd(instance,
2186 req_desc->
2187 u.low,
2188 req_desc->
2189 u.high,
2190 instance->
2191 reg_set);
2192 }
2193 }
2194 }
2195 }
2196
2197 /* Reset load balance info */
2198 memset(fusion->load_balance_info, 0,
2199 sizeof(struct LD_LOAD_BALANCE_INFO)
2200 *MAX_LOGICAL_DRIVES);
2201
2202 if (!megasas_get_map_info(instance))
2203 megasas_sync_map_info(instance);
2204
2205 /* Adapter reset completed successfully */
2206 printk(KERN_WARNING "megaraid_sas: Reset "
2207 "successful.\n");
2208 retval = SUCCESS;
2209 goto out;
2210 }
2211 /* Reset failed, kill the adapter */
2212 printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
2213 "adapter.\n");
2214 megaraid_sas_kill_hba(instance);
2215 retval = FAILED;
2216 } else {
2217 instance->instancet->enable_intr(instance->reg_set);
2218 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2219 }
2220out:
2221 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2222 mutex_unlock(&instance->reset_mutex);
2223 return retval;
2224}
2225
2226/* Fusion OCR work queue */
2227void megasas_fusion_ocr_wq(struct work_struct *work)
2228{
2229 struct megasas_instance *instance =
2230 container_of(work, struct megasas_instance, work_init);
2231
2232 megasas_reset_fusion(instance->host);
2233}
2234
2235struct megasas_instance_template megasas_instance_template_fusion = {
2236 .fire_cmd = megasas_fire_cmd_fusion,
2237 .enable_intr = megasas_enable_intr_fusion,
2238 .disable_intr = megasas_disable_intr_fusion,
2239 .clear_intr = megasas_clear_intr_fusion,
2240 .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
2241 .adp_reset = megasas_adp_reset_fusion,
2242 .check_reset = megasas_check_reset_fusion,
2243 .service_isr = megasas_isr_fusion,
2244 .tasklet = megasas_complete_cmd_dpc_fusion,
2245 .init_adapter = megasas_init_adapter_fusion,
2246 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
2247 .issue_dcmd = megasas_issue_dcmd_fusion,
2248};
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
new file mode 100644
index 000000000000..82b577a72c8b
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -0,0 +1,695 @@
1/*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2009-2011 LSI Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * FILE: megaraid_sas_fusion.h
21 *
22 * Authors: LSI Corporation
23 * Manoj Jose
24 * Sumant Patro
25 *
26 * Send feedback to: <megaraidlinux@lsi.com>
27 *
28 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
29 * ATTN: Linuxraid
30 */
31
32#ifndef _MEGARAID_SAS_FUSION_H_
33#define _MEGARAID_SAS_FUSION_H_
34
35/* Fusion defines */
36#define MEGASAS_MAX_SZ_CHAIN_FRAME 1024
37#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
38#define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
39#define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
40#define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
41#define MEGASAS_LOAD_BALANCE_FLAG 0x1
42#define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
43#define HOST_DIAG_WRITE_ENABLE 0x80
44#define HOST_DIAG_RESET_ADAPTER 0x4
45#define MEGASAS_FUSION_MAX_RESET_TRIES 3
46
47/* T10 PI defines */
48#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
49#define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
50#define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
51#define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
52#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
53#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
54#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
55#define MEGASAS_EEDPBLOCKSIZE 512
56
57/*
58 * Raid context flags
59 */
60
61#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
62#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
63enum MR_RAID_FLAGS_IO_SUB_TYPE {
64 MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
65 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
66};
67
68/*
69 * Request descriptor types
70 */
71#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
72#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
73
74#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
75
76#define MEGASAS_FP_CMD_LEN 16
77#define MEGASAS_FUSION_IN_RESET 0
78
79/*
80 * Raid Context structure which describes MegaRAID specific IO Paramenters
81 * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
82 */
83
84struct RAID_CONTEXT {
85 u16 resvd0;
86 u16 timeoutValue;
87 u8 regLockFlags;
88 u8 resvd1;
89 u16 VirtualDiskTgtId;
90 u64 regLockRowLBA;
91 u32 regLockLength;
92 u16 nextLMId;
93 u8 exStatus;
94 u8 status;
95 u8 RAIDFlags;
96 u8 numSGE;
97 u16 configSeqNum;
98 u8 spanArm;
99 u8 resvd2[3];
100};
101
102#define RAID_CTX_SPANARM_ARM_SHIFT (0)
103#define RAID_CTX_SPANARM_ARM_MASK (0x1f)
104
105#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
106#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
107
108/*
109 * define region lock types
110 */
111enum REGION_TYPE {
112 REGION_TYPE_UNUSED = 0,
113 REGION_TYPE_SHARED_READ = 1,
114 REGION_TYPE_SHARED_WRITE = 2,
115 REGION_TYPE_EXCLUSIVE = 3,
116};
117
118/* MPI2 defines */
119#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
120#define MPI2_WHOINIT_HOST_DRIVER (0x04)
121#define MPI2_VERSION_MAJOR (0x02)
122#define MPI2_VERSION_MINOR (0x00)
123#define MPI2_VERSION_MAJOR_MASK (0xFF00)
124#define MPI2_VERSION_MAJOR_SHIFT (8)
125#define MPI2_VERSION_MINOR_MASK (0x00FF)
126#define MPI2_VERSION_MINOR_SHIFT (0)
127#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
128 MPI2_VERSION_MINOR)
129#define MPI2_HEADER_VERSION_UNIT (0x10)
130#define MPI2_HEADER_VERSION_DEV (0x00)
131#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
132#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
133#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
134#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
135#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
136 MPI2_HEADER_VERSION_DEV)
137#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
138#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
139#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
140#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
141#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
142#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
143#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
144#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
145#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
146#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
147#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
148#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
149#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
150#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
151#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
152#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
153#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
154#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
155#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
156#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
157#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
158#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
159#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
160#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
161#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
162
163struct MPI25_IEEE_SGE_CHAIN64 {
164 u64 Address;
165 u32 Length;
166 u16 Reserved1;
167 u8 NextChainOffset;
168 u8 Flags;
169};
170
171struct MPI2_SGE_SIMPLE_UNION {
172 u32 FlagsLength;
173 union {
174 u32 Address32;
175 u64 Address64;
176 } u;
177};
178
179struct MPI2_SCSI_IO_CDB_EEDP32 {
180 u8 CDB[20]; /* 0x00 */
181 u32 PrimaryReferenceTag; /* 0x14 */
182 u16 PrimaryApplicationTag; /* 0x18 */
183 u16 PrimaryApplicationTagMask; /* 0x1A */
184 u32 TransferLength; /* 0x1C */
185};
186
187struct MPI2_SGE_CHAIN_UNION {
188 u16 Length;
189 u8 NextChainOffset;
190 u8 Flags;
191 union {
192 u32 Address32;
193 u64 Address64;
194 } u;
195};
196
197struct MPI2_IEEE_SGE_SIMPLE32 {
198 u32 Address;
199 u32 FlagsLength;
200};
201
202struct MPI2_IEEE_SGE_CHAIN32 {
203 u32 Address;
204 u32 FlagsLength;
205};
206
207struct MPI2_IEEE_SGE_SIMPLE64 {
208 u64 Address;
209 u32 Length;
210 u16 Reserved1;
211 u8 Reserved2;
212 u8 Flags;
213};
214
215struct MPI2_IEEE_SGE_CHAIN64 {
216 u64 Address;
217 u32 Length;
218 u16 Reserved1;
219 u8 Reserved2;
220 u8 Flags;
221};
222
223union MPI2_IEEE_SGE_SIMPLE_UNION {
224 struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
225 struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
226};
227
228union MPI2_IEEE_SGE_CHAIN_UNION {
229 struct MPI2_IEEE_SGE_CHAIN32 Chain32;
230 struct MPI2_IEEE_SGE_CHAIN64 Chain64;
231};
232
233union MPI2_SGE_IO_UNION {
234 struct MPI2_SGE_SIMPLE_UNION MpiSimple;
235 struct MPI2_SGE_CHAIN_UNION MpiChain;
236 union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
237 union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
238};
239
240union MPI2_SCSI_IO_CDB_UNION {
241 u8 CDB32[32];
242 struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
243 struct MPI2_SGE_SIMPLE_UNION SGE;
244};
245
246/*
247 * RAID SCSI IO Request Message
248 * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
249 */
250struct MPI2_RAID_SCSI_IO_REQUEST {
251 u16 DevHandle; /* 0x00 */
252 u8 ChainOffset; /* 0x02 */
253 u8 Function; /* 0x03 */
254 u16 Reserved1; /* 0x04 */
255 u8 Reserved2; /* 0x06 */
256 u8 MsgFlags; /* 0x07 */
257 u8 VP_ID; /* 0x08 */
258 u8 VF_ID; /* 0x09 */
259 u16 Reserved3; /* 0x0A */
260 u32 SenseBufferLowAddress; /* 0x0C */
261 u16 SGLFlags; /* 0x10 */
262 u8 SenseBufferLength; /* 0x12 */
263 u8 Reserved4; /* 0x13 */
264 u8 SGLOffset0; /* 0x14 */
265 u8 SGLOffset1; /* 0x15 */
266 u8 SGLOffset2; /* 0x16 */
267 u8 SGLOffset3; /* 0x17 */
268 u32 SkipCount; /* 0x18 */
269 u32 DataLength; /* 0x1C */
270 u32 BidirectionalDataLength; /* 0x20 */
271 u16 IoFlags; /* 0x24 */
272 u16 EEDPFlags; /* 0x26 */
273 u32 EEDPBlockSize; /* 0x28 */
274 u32 SecondaryReferenceTag; /* 0x2C */
275 u16 SecondaryApplicationTag; /* 0x30 */
276 u16 ApplicationTagTranslationMask; /* 0x32 */
277 u8 LUN[8]; /* 0x34 */
278 u32 Control; /* 0x3C */
279 union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
280 struct RAID_CONTEXT RaidContext; /* 0x60 */
281 union MPI2_SGE_IO_UNION SGL; /* 0x80 */
282};
283
284/*
285 * MPT RAID MFA IO Descriptor.
286 */
287struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
288 u32 RequestFlags:8;
289 u32 MessageAddress1:24; /* bits 31:8*/
290 u32 MessageAddress2; /* bits 61:32 */
291};
292
293/* Default Request Descriptor */
294struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
295 u8 RequestFlags; /* 0x00 */
296 u8 MSIxIndex; /* 0x01 */
297 u16 SMID; /* 0x02 */
298 u16 LMID; /* 0x04 */
299 u16 DescriptorTypeDependent; /* 0x06 */
300};
301
302/* High Priority Request Descriptor */
303struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
304 u8 RequestFlags; /* 0x00 */
305 u8 MSIxIndex; /* 0x01 */
306 u16 SMID; /* 0x02 */
307 u16 LMID; /* 0x04 */
308 u16 Reserved1; /* 0x06 */
309};
310
311/* SCSI IO Request Descriptor */
312struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
313 u8 RequestFlags; /* 0x00 */
314 u8 MSIxIndex; /* 0x01 */
315 u16 SMID; /* 0x02 */
316 u16 LMID; /* 0x04 */
317 u16 DevHandle; /* 0x06 */
318};
319
320/* SCSI Target Request Descriptor */
321struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
322 u8 RequestFlags; /* 0x00 */
323 u8 MSIxIndex; /* 0x01 */
324 u16 SMID; /* 0x02 */
325 u16 LMID; /* 0x04 */
326 u16 IoIndex; /* 0x06 */
327};
328
329/* RAID Accelerator Request Descriptor */
330struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
331 u8 RequestFlags; /* 0x00 */
332 u8 MSIxIndex; /* 0x01 */
333 u16 SMID; /* 0x02 */
334 u16 LMID; /* 0x04 */
335 u16 Reserved; /* 0x06 */
336};
337
338/* union of Request Descriptors */
339union MEGASAS_REQUEST_DESCRIPTOR_UNION {
340 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
341 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
342 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
343 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
344 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
345 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
346 union {
347 struct {
348 u32 low;
349 u32 high;
350 } u;
351 u64 Words;
352 };
353};
354
355/* Default Reply Descriptor */
356struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
357 u8 ReplyFlags; /* 0x00 */
358 u8 MSIxIndex; /* 0x01 */
359 u16 DescriptorTypeDependent1; /* 0x02 */
360 u32 DescriptorTypeDependent2; /* 0x04 */
361};
362
363/* Address Reply Descriptor */
364struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
365 u8 ReplyFlags; /* 0x00 */
366 u8 MSIxIndex; /* 0x01 */
367 u16 SMID; /* 0x02 */
368 u32 ReplyFrameAddress; /* 0x04 */
369};
370
371/* SCSI IO Success Reply Descriptor */
372struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
373 u8 ReplyFlags; /* 0x00 */
374 u8 MSIxIndex; /* 0x01 */
375 u16 SMID; /* 0x02 */
376 u16 TaskTag; /* 0x04 */
377 u16 Reserved1; /* 0x06 */
378};
379
380/* TargetAssist Success Reply Descriptor */
381struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
382 u8 ReplyFlags; /* 0x00 */
383 u8 MSIxIndex; /* 0x01 */
384 u16 SMID; /* 0x02 */
385 u8 SequenceNumber; /* 0x04 */
386 u8 Reserved1; /* 0x05 */
387 u16 IoIndex; /* 0x06 */
388};
389
390/* Target Command Buffer Reply Descriptor */
391struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
392 u8 ReplyFlags; /* 0x00 */
393 u8 MSIxIndex; /* 0x01 */
394 u8 VP_ID; /* 0x02 */
395 u8 Flags; /* 0x03 */
396 u16 InitiatorDevHandle; /* 0x04 */
397 u16 IoIndex; /* 0x06 */
398};
399
400/* RAID Accelerator Success Reply Descriptor */
401struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
402 u8 ReplyFlags; /* 0x00 */
403 u8 MSIxIndex; /* 0x01 */
404 u16 SMID; /* 0x02 */
405 u32 Reserved; /* 0x04 */
406};
407
408/* union of Reply Descriptors */
409union MPI2_REPLY_DESCRIPTORS_UNION {
410 struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
411 struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
412 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
413 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
414 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
415 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
416 RAIDAcceleratorSuccess;
417 u64 Words;
418};
419
420/* IOCInit Request message */
421struct MPI2_IOC_INIT_REQUEST {
422 u8 WhoInit; /* 0x00 */
423 u8 Reserved1; /* 0x01 */
424 u8 ChainOffset; /* 0x02 */
425 u8 Function; /* 0x03 */
426 u16 Reserved2; /* 0x04 */
427 u8 Reserved3; /* 0x06 */
428 u8 MsgFlags; /* 0x07 */
429 u8 VP_ID; /* 0x08 */
430 u8 VF_ID; /* 0x09 */
431 u16 Reserved4; /* 0x0A */
432 u16 MsgVersion; /* 0x0C */
433 u16 HeaderVersion; /* 0x0E */
434 u32 Reserved5; /* 0x10 */
435 u16 Reserved6; /* 0x14 */
436 u8 Reserved7; /* 0x16 */
437 u8 HostMSIxVectors; /* 0x17 */
438 u16 Reserved8; /* 0x18 */
439 u16 SystemRequestFrameSize; /* 0x1A */
440 u16 ReplyDescriptorPostQueueDepth; /* 0x1C */
441 u16 ReplyFreeQueueDepth; /* 0x1E */
442 u32 SenseBufferAddressHigh; /* 0x20 */
443 u32 SystemReplyAddressHigh; /* 0x24 */
444 u64 SystemRequestFrameBaseAddress; /* 0x28 */
445 u64 ReplyDescriptorPostQueueAddress;/* 0x30 */
446 u64 ReplyFreeQueueAddress; /* 0x38 */
447 u64 TimeStamp; /* 0x40 */
448};
449
450/* mrpriv defines */
451#define MR_PD_INVALID 0xFFFF
452#define MAX_SPAN_DEPTH 8
453#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
454#define MAX_ROW_SIZE 32
455#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
456#define MAX_LOGICAL_DRIVES 64
457#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
458#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
459#define MAX_ARRAYS 128
460#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
461#define MAX_PHYSICAL_DEVICES 256
462#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
463#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
464
465struct MR_DEV_HANDLE_INFO {
466 u16 curDevHdl;
467 u8 validHandles;
468 u8 reserved;
469 u16 devHandle[2];
470};
471
472struct MR_ARRAY_INFO {
473 u16 pd[MAX_RAIDMAP_ROW_SIZE];
474};
475
476struct MR_QUAD_ELEMENT {
477 u64 logStart;
478 u64 logEnd;
479 u64 offsetInSpan;
480 u32 diff;
481 u32 reserved1;
482};
483
484struct MR_SPAN_INFO {
485 u32 noElements;
486 u32 reserved1;
487 struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
488};
489
490struct MR_LD_SPAN {
491 u64 startBlk;
492 u64 numBlks;
493 u16 arrayRef;
494 u8 reserved[6];
495};
496
497struct MR_SPAN_BLOCK_INFO {
498 u64 num_rows;
499 struct MR_LD_SPAN span;
500 struct MR_SPAN_INFO block_span_info;
501};
502
503struct MR_LD_RAID {
504 struct {
505 u32 fpCapable:1;
506 u32 reserved5:3;
507 u32 ldPiMode:4;
508 u32 pdPiMode:4;
509 u32 encryptionType:8;
510 u32 fpWriteCapable:1;
511 u32 fpReadCapable:1;
512 u32 fpWriteAcrossStripe:1;
513 u32 fpReadAcrossStripe:1;
514 u32 reserved4:8;
515 } capability;
516 u32 reserved6;
517 u64 size;
518 u8 spanDepth;
519 u8 level;
520 u8 stripeShift;
521 u8 rowSize;
522 u8 rowDataSize;
523 u8 writeMode;
524 u8 PRL;
525 u8 SRL;
526 u16 targetId;
527 u8 ldState;
528 u8 regTypeReqOnWrite;
529 u8 modFactor;
530 u8 reserved2[1];
531 u16 seqNum;
532
533 struct {
534 u32 ldSyncRequired:1;
535 u32 reserved:31;
536 } flags;
537
538 u8 reserved3[0x5C];
539};
540
541struct MR_LD_SPAN_MAP {
542 struct MR_LD_RAID ldRaid;
543 u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE];
544 struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
545};
546
547struct MR_FW_RAID_MAP {
548 u32 totalSize;
549 union {
550 struct {
551 u32 maxLd;
552 u32 maxSpanDepth;
553 u32 maxRowSize;
554 u32 maxPdCount;
555 u32 maxArrays;
556 } validationInfo;
557 u32 version[5];
558 u32 reserved1[5];
559 };
560
561 u32 ldCount;
562 u32 Reserved1;
563 u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
564 MAX_RAIDMAP_VIEWS];
565 u8 fpPdIoTimeoutSec;
566 u8 reserved2[7];
567 struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
568 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
569 struct MR_LD_SPAN_MAP ldSpanMap[1];
570};
571
572struct IO_REQUEST_INFO {
573 u64 ldStartBlock;
574 u32 numBlocks;
575 u16 ldTgtId;
576 u8 isRead;
577 u16 devHandle;
578 u64 pdBlock;
579 u8 fpOkForIo;
580};
581
582struct MR_LD_TARGET_SYNC {
583 u8 targetId;
584 u8 reserved;
585 u16 seqNum;
586};
587
588#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
589#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
590#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
591#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
592#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
593#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
594#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
595
596struct megasas_register_set;
597struct megasas_instance;
598
599union desc_word {
600 u64 word;
601 struct {
602 u32 low;
603 u32 high;
604 } u;
605};
606
607struct megasas_cmd_fusion {
608 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
609 dma_addr_t io_request_phys_addr;
610
611 union MPI2_SGE_IO_UNION *sg_frame;
612 dma_addr_t sg_frame_phys_addr;
613
614 u8 *sense;
615 dma_addr_t sense_phys_addr;
616
617 struct list_head list;
618 struct scsi_cmnd *scmd;
619 struct megasas_instance *instance;
620
621 u8 retry_for_fw_reset;
622 union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc;
623
624 /*
625 * Context for a MFI frame.
626 * Used to get the mfi cmd from list when a MFI cmd is completed
627 */
628 u32 sync_cmd_idx;
629 u32 index;
630 u8 flags;
631};
632
633struct LD_LOAD_BALANCE_INFO {
634 u8 loadBalanceFlag;
635 u8 reserved1;
636 u16 raid1DevHandle[2];
637 atomic_t scsi_pending_cmds[2];
638 u64 last_accessed_block[2];
639};
640
641struct MR_FW_RAID_MAP_ALL {
642 struct MR_FW_RAID_MAP raidMap;
643 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
644} __attribute__ ((packed));
645
646struct fusion_context {
647 struct megasas_cmd_fusion **cmd_list;
648 struct list_head cmd_pool;
649
650 spinlock_t cmd_pool_lock;
651
652 dma_addr_t req_frames_desc_phys;
653 u8 *req_frames_desc;
654
655 struct dma_pool *io_request_frames_pool;
656 dma_addr_t io_request_frames_phys;
657 u8 *io_request_frames;
658
659 struct dma_pool *sg_dma_pool;
660 struct dma_pool *sense_dma_pool;
661
662 dma_addr_t reply_frames_desc_phys;
663 union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
664 struct dma_pool *reply_frames_desc_pool;
665
666 u16 last_reply_idx;
667
668 u32 reply_q_depth;
669 u32 request_alloc_sz;
670 u32 reply_alloc_sz;
671 u32 io_frames_alloc_sz;
672
673 u16 max_sge_in_main_msg;
674 u16 max_sge_in_chain;
675
676 u8 chain_offset_io_request;
677 u8 chain_offset_mfi_pthru;
678
679 struct MR_FW_RAID_MAP_ALL *ld_map[2];
680 dma_addr_t ld_map_phys[2];
681
682 u32 map_sz;
683 u8 fast_path_io;
684 struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
685};
686
687union desc_value {
688 u64 word;
689 struct {
690 u32 low;
691 u32 high;
692 } u;
693};
694
695#endif /* _MEGARAID_SAS_FUSION_H_ */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 4b1c2f0350f9..8be75e65f763 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.15 11 * mpi2.h Version: 02.00.16
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -61,6 +61,8 @@
61 * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. 61 * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
62 * Added defines for product-specific range of message 62 * Added defines for product-specific range of message
63 * function codes, 0xF0 to 0xFF. 63 * function codes, 0xF0 to 0xFF.
64 * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
65 * Added alternative defines for the SGE Direction bit.
64 * -------------------------------------------------------------------------- 66 * --------------------------------------------------------------------------
65 */ 67 */
66 68
@@ -86,7 +88,7 @@
86#define MPI2_VERSION_02_00 (0x0200) 88#define MPI2_VERSION_02_00 (0x0200)
87 89
88/* versioning for this MPI header set */ 90/* versioning for this MPI header set */
89#define MPI2_HEADER_VERSION_UNIT (0x0F) 91#define MPI2_HEADER_VERSION_UNIT (0x10)
90#define MPI2_HEADER_VERSION_DEV (0x00) 92#define MPI2_HEADER_VERSION_DEV (0x00)
91#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 93#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
92#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 94#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -929,6 +931,9 @@ typedef struct _MPI2_MPI_SGE_UNION
929#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) 931#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
930#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) 932#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
931 933
934#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST)
935#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC)
936
932/* Address Size */ 937/* Address Size */
933 938
934#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) 939#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index e3728d736d85..d76a65847603 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.14 9 * mpi2_cnfg.h Version: 02.00.15
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -121,6 +121,10 @@
121 * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. 121 * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
122 * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. 122 * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
123 * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. 123 * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
124 * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
125 * define.
126 * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
127 * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
124 * -------------------------------------------------------------------------- 128 * --------------------------------------------------------------------------
125 */ 129 */
126 130
@@ -333,7 +337,7 @@ typedef struct _MPI2_CONFIG_REQUEST
333#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) 337#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
334#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) 338#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
335 339
336/* values for SGLFlags field are in the SGL section of mpi2.h */ 340/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
337 341
338 342
339/* Config Reply Message */ 343/* Config Reply Message */
@@ -379,6 +383,8 @@ typedef struct _MPI2_CONFIG_REPLY
379#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) 383#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
380#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) 384#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
381 385
386#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E)
387
382#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) 388#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
383#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) 389#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
384#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) 390#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
@@ -390,6 +396,8 @@ typedef struct _MPI2_CONFIG_REPLY
390#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) 396#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
391 397
392 398
399
400
393/* Manufacturing Page 0 */ 401/* Manufacturing Page 0 */
394 402
395typedef struct _MPI2_CONFIG_PAGE_MAN_0 403typedef struct _MPI2_CONFIG_PAGE_MAN_0
@@ -729,6 +737,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
729/* IO Unit Page 1 Flags defines */ 737/* IO Unit Page 1 Flags defines */
730#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) 738#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
731#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) 739#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
740#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
732#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) 741#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
733#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) 742#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
734#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) 743#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
@@ -1347,6 +1356,7 @@ typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0
1347#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) 1356#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
1348#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) 1357#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
1349#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) 1358#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
1359#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080)
1350#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) 1360#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
1351#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) 1361#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
1352#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) 1362#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
@@ -1469,11 +1479,15 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0
1469#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) 1479#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
1470#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) 1480#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
1471#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) 1481#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
1482#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06)
1472#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) 1483#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
1473 1484
1474/* PhysDiskAttributes defines */ 1485/* PhysDiskAttributes defines */
1486#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C)
1475#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) 1487#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
1476#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) 1488#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
1489
1490#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03)
1477#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) 1491#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
1478#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) 1492#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
1479 1493
@@ -1545,6 +1559,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
1545#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) 1559#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
1546#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) 1560#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
1547#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) 1561#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
1562#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
1548#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) 1563#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
1549#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) 1564#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
1550#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) 1565#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
@@ -1571,6 +1586,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
1571#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) 1586#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
1572 1587
1573#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) 1588#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
1589#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27)
1574#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) 1590#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
1575#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) 1591#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
1576#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) 1592#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index bd6c92b5fae5..b1e88f26b748 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -291,6 +291,7 @@ mpi2_raid.h
291 * can be sized by the build environment. 291 * can be sized by the build environment.
292 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of 292 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
293 * VolumeCreationFlags and marked the old one as obsolete. 293 * VolumeCreationFlags and marked the old one as obsolete.
294 * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
294 * -------------------------------------------------------------------------- 295 * --------------------------------------------------------------------------
295 296
296mpi2_sas.h 297mpi2_sas.h
@@ -301,6 +302,7 @@ mpi2_sas.h
301 * Request. 302 * Request.
302 * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST 303 * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
303 * to MPI2_SGE_IO_UNION since it supports chained SGLs. 304 * to MPI2_SGE_IO_UNION since it supports chained SGLs.
305 * 05-12-10 02.00.04 Modified some comments.
304 * -------------------------------------------------------------------------- 306 * --------------------------------------------------------------------------
305 307
306mpi2_targ.h 308mpi2_targ.h
@@ -324,6 +326,7 @@ mpi2_tool.h
324 * and reply messages. 326 * and reply messages.
325 * Added MPI2_DIAG_BUF_TYPE_EXTENDED. 327 * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
326 * Incremented MPI2_DIAG_BUF_TYPE_COUNT. 328 * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
329 * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
327 * -------------------------------------------------------------------------- 330 * --------------------------------------------------------------------------
328 331
329mpi2_type.h 332mpi2_type.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index c4c99dfcb820..20e6b8869341 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.09 9 * mpi2_init.h Version: 02.00.10
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -32,6 +32,7 @@
32 * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. 32 * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
33 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. 33 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
34 * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. 34 * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
35 * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
35 * -------------------------------------------------------------------------- 36 * --------------------------------------------------------------------------
36 */ 37 */
37 38
@@ -98,7 +99,13 @@ typedef struct _MPI2_SCSI_IO_REQUEST
98 U8 LUN[8]; /* 0x34 */ 99 U8 LUN[8]; /* 0x34 */
99 U32 Control; /* 0x3C */ 100 U32 Control; /* 0x3C */
100 MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ 101 MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
102
103#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */
104 MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
105#endif
106
101 MPI2_SGE_IO_UNION SGL; /* 0x60 */ 107 MPI2_SGE_IO_UNION SGL; /* 0x60 */
108
102} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST, 109} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST,
103 Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t; 110 Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t;
104 111
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 495bedc4d1f7..761cbdb8a033 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.14 9 * mpi2_ioc.h Version: 02.00.15
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -101,6 +101,8 @@
101 * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. 101 * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
102 * Added PowerManagementControl Request structures and 102 * Added PowerManagementControl Request structures and
103 * defines. 103 * defines.
104 * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
105 * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
104 * -------------------------------------------------------------------------- 106 * --------------------------------------------------------------------------
105 */ 107 */
106 108
@@ -456,7 +458,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
456#define MPI2_EVENT_STATE_CHANGE (0x0002) 458#define MPI2_EVENT_STATE_CHANGE (0x0002)
457#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) 459#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
458#define MPI2_EVENT_EVENT_CHANGE (0x000A) 460#define MPI2_EVENT_EVENT_CHANGE (0x000A)
459#define MPI2_EVENT_TASK_SET_FULL (0x000E) 461#define MPI2_EVENT_TASK_SET_FULL (0x000E) /* obsolete */
460#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) 462#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
461#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) 463#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
462#define MPI2_EVENT_SAS_DISCOVERY (0x0016) 464#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
@@ -517,6 +519,7 @@ typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
517 MPI2_POINTER pMpi2EventDataHardResetReceived_t; 519 MPI2_POINTER pMpi2EventDataHardResetReceived_t;
518 520
519/* Task Set Full Event data */ 521/* Task Set Full Event data */
522/* this event is obsolete */
520 523
521typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL 524typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL
522{ 525{
@@ -831,6 +834,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
831#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) 834#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
832#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) 835#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
833#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) 836#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
837#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
834#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) 838#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
835#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) 839#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
836#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) 840#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 5160c33d2a00..bd61a7b60a2b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2008 LSI Corporation. 2 * Copyright (c) 2000-2010 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_raid.h 5 * Name: mpi2_raid.h
6 * Title: MPI Integrated RAID messages and structures 6 * Title: MPI Integrated RAID messages and structures
7 * Creation Date: April 26, 2007 7 * Creation Date: April 26, 2007
8 * 8 *
9 * mpi2_raid.h Version: 02.00.04 9 * mpi2_raid.h Version: 02.00.05
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -22,6 +22,7 @@
22 * can be sized by the build environment. 22 * can be sized by the build environment.
23 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of 23 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
24 * VolumeCreationFlags and marked the old one as obsolete. 24 * VolumeCreationFlags and marked the old one as obsolete.
25 * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
25 * -------------------------------------------------------------------------- 26 * --------------------------------------------------------------------------
26 */ 27 */
27 28
@@ -260,6 +261,7 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
260#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) 261#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
261#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) 262#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
262#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) 263#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
264#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
263 265
264 266
265/* RAID Action Reply ActionData union */ 267/* RAID Action Reply ActionData union */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 2d8aeed51392..608f6d6e6fca 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2007 LSI Corporation. 2 * Copyright (c) 2000-2010 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_sas.h 5 * Name: mpi2_sas.h
6 * Title: MPI Serial Attached SCSI structures and definitions 6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: February 9, 2007 7 * Creation Date: February 9, 2007
8 * 8 *
9 * mpi2.h Version: 02.00.03 9 * mpi2_sas.h Version: 02.00.04
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -20,6 +20,7 @@
20 * Request. 20 * Request.
21 * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST 21 * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
22 * to MPI2_SGE_IO_UNION since it supports chained SGLs. 22 * to MPI2_SGE_IO_UNION since it supports chained SGLs.
23 * 05-12-10 02.00.04 Modified some comments.
23 * -------------------------------------------------------------------------- 24 * --------------------------------------------------------------------------
24 */ 25 */
25 26
@@ -110,7 +111,7 @@ typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST
110/* values for PassthroughFlags field */ 111/* values for PassthroughFlags field */
111#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) 112#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
112 113
113/* values for SGLFlags field are in the SGL section of mpi2.h */ 114/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
114 115
115 116
116/* SMP Passthrough Reply Message */ 117/* SMP Passthrough Reply Message */
@@ -174,7 +175,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
174#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) 175#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
175#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) 176#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
176 177
177/* values for SGLFlags field are in the SGL section of mpi2.h */ 178/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
178 179
179 180
180/* SATA Passthrough Reply Message */ 181/* SATA Passthrough Reply Message */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 686b09b81219..5c6e3a67bb94 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
6 * Title: MPI diagnostic tool structures and definitions 6 * Title: MPI diagnostic tool structures and definitions
7 * Creation Date: March 26, 2007 7 * Creation Date: March 26, 2007
8 * 8 *
9 * mpi2_tool.h Version: 02.00.04 9 * mpi2_tool.h Version: 02.00.05
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -22,6 +22,7 @@
22 * and reply messages. 22 * and reply messages.
23 * Added MPI2_DIAG_BUF_TYPE_EXTENDED. 23 * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
24 * Incremented MPI2_DIAG_BUF_TYPE_COUNT. 24 * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
25 * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
25 * -------------------------------------------------------------------------- 26 * --------------------------------------------------------------------------
26 */ 27 */
27 28
@@ -37,6 +38,7 @@
37/* defines for the Tools */ 38/* defines for the Tools */
38#define MPI2_TOOLBOX_CLEAN_TOOL (0x00) 39#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
39#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) 40#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
41#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
40#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) 42#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
41#define MPI2_TOOLBOX_BEACON_TOOL (0x05) 43#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
42#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) 44#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
@@ -102,8 +104,7 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST
102* Toolbox Memory Move request 104* Toolbox Memory Move request
103****************************************************************************/ 105****************************************************************************/
104 106
105typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST 107typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST {
106{
107 U8 Tool; /* 0x00 */ 108 U8 Tool; /* 0x00 */
108 U8 Reserved1; /* 0x01 */ 109 U8 Reserved1; /* 0x01 */
109 U8 ChainOffset; /* 0x02 */ 110 U8 ChainOffset; /* 0x02 */
@@ -120,6 +121,44 @@ typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST
120 121
121 122
122/**************************************************************************** 123/****************************************************************************
124* Toolbox Diagnostic Data Upload request
125****************************************************************************/
126
127typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST {
128 U8 Tool; /* 0x00 */
129 U8 Reserved1; /* 0x01 */
130 U8 ChainOffset; /* 0x02 */
131 U8 Function; /* 0x03 */
132 U16 Reserved2; /* 0x04 */
133 U8 Reserved3; /* 0x06 */
134 U8 MsgFlags; /* 0x07 */
135 U8 VP_ID; /* 0x08 */
136 U8 VF_ID; /* 0x09 */
137 U16 Reserved4; /* 0x0A */
138 U8 SGLFlags; /* 0x0C */
139 U8 Reserved5; /* 0x0D */
140 U16 Reserved6; /* 0x0E */
141 U32 Flags; /* 0x10 */
142 U32 DataLength; /* 0x14 */
143 MPI2_SGE_SIMPLE_UNION SGL; /* 0x18 */
144} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
145MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
146Mpi2ToolboxDiagDataUploadRequest_t,
147MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t;
148
149/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
150
151
152typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER {
153 U32 DiagDataLength; /* 00h */
154 U8 FormatCode; /* 04h */
155 U8 Reserved1; /* 05h */
156 U16 Reserved2; /* 06h */
157} MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
158Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t;
159
160
161/****************************************************************************
123* Toolbox ISTWI Read Write Tool 162* Toolbox ISTWI Read Write Tool
124****************************************************************************/ 163****************************************************************************/
125 164
@@ -162,7 +201,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
162#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) 201#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
163#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) 202#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
164 203
165/* values for SGLFlags field are in the SGL section of mpi2.h */ 204/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
166 205
167 206
168/* Toolbox ISTWI Read Write Tool reply message */ 207/* Toolbox ISTWI Read Write Tool reply message */
@@ -248,7 +287,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
248 Mpi2ToolboxDiagnosticCliRequest_t, 287 Mpi2ToolboxDiagnosticCliRequest_t,
249 MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t; 288 MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t;
250 289
251/* values for SGLFlags field are in the SGL section of mpi2.h */ 290/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
252 291
253 292
254/* Toolbox Diagnostic CLI Tool reply message */ 293/* Toolbox Diagnostic CLI Tool reply message */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 12faf64f91b0..b2a817055b8b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -65,7 +65,6 @@
65static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; 65static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
66 66
67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
68#define MPT2SAS_MAX_REQUEST_QUEUE 600 /* maximum controller queue depth */
69 68
70static int max_queue_depth = -1; 69static int max_queue_depth = -1;
71module_param(max_queue_depth, int, 0); 70module_param(max_queue_depth, int, 0);
@@ -79,6 +78,10 @@ static int msix_disable = -1;
79module_param(msix_disable, int, 0); 78module_param(msix_disable, int, 0);
80MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 79MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
81 80
81static int missing_delay[2] = {-1, -1};
82module_param_array(missing_delay, int, NULL, 0);
83MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
84
82/* diag_buffer_enable is bitwise 85/* diag_buffer_enable is bitwise
83 * bit 0 set = TRACE 86 * bit 0 set = TRACE
84 * bit 1 set = SNAPSHOT 87 * bit 1 set = SNAPSHOT
@@ -515,9 +518,6 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
515 case MPI2_EVENT_EVENT_CHANGE: 518 case MPI2_EVENT_EVENT_CHANGE:
516 desc = "Event Change"; 519 desc = "Event Change";
517 break; 520 break;
518 case MPI2_EVENT_TASK_SET_FULL:
519 desc = "Task Set Full";
520 break;
521 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 521 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
522 desc = "Device Status Change"; 522 desc = "Device Status Change";
523 break; 523 break;
@@ -758,7 +758,7 @@ _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
758 if (smid < ioc->internal_smid) { 758 if (smid < ioc->internal_smid) {
759 i = smid - ioc->hi_priority_smid; 759 i = smid - ioc->hi_priority_smid;
760 cb_idx = ioc->hpr_lookup[i].cb_idx; 760 cb_idx = ioc->hpr_lookup[i].cb_idx;
761 } else { 761 } else if (smid <= ioc->hba_queue_depth) {
762 i = smid - ioc->internal_smid; 762 i = smid - ioc->internal_smid;
763 cb_idx = ioc->internal_lookup[i].cb_idx; 763 cb_idx = ioc->internal_lookup[i].cb_idx;
764 } 764 }
@@ -848,6 +848,7 @@ _base_interrupt(int irq, void *bus_id)
848 return IRQ_NONE; 848 return IRQ_NONE;
849 849
850 completed_cmds = 0; 850 completed_cmds = 0;
851 cb_idx = 0xFF;
851 do { 852 do {
852 rd.word = rpf->Words; 853 rd.word = rpf->Words;
853 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 854 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
@@ -860,6 +861,9 @@ _base_interrupt(int irq, void *bus_id)
860 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 861 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
861 reply = le32_to_cpu 862 reply = le32_to_cpu
862 (rpf->AddressReply.ReplyFrameAddress); 863 (rpf->AddressReply.ReplyFrameAddress);
864 if (reply > ioc->reply_dma_max_address ||
865 reply < ioc->reply_dma_min_address)
866 reply = 0;
863 } else if (request_desript_type == 867 } else if (request_desript_type ==
864 MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER) 868 MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
865 goto next; 869 goto next;
@@ -1489,6 +1493,7 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1489{ 1493{
1490 unsigned long flags; 1494 unsigned long flags;
1491 int i; 1495 int i;
1496 struct chain_tracker *chain_req, *next;
1492 1497
1493 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1498 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1494 if (smid >= ioc->hi_priority_smid) { 1499 if (smid >= ioc->hi_priority_smid) {
@@ -1511,6 +1516,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1511 1516
1512 /* scsiio queue */ 1517 /* scsiio queue */
1513 i = smid - 1; 1518 i = smid - 1;
1519 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
1520 list_for_each_entry_safe(chain_req, next,
1521 &ioc->scsi_lookup[i].chain_list, tracker_list) {
1522 list_del_init(&chain_req->tracker_list);
1523 list_add_tail(&chain_req->tracker_list,
1524 &ioc->free_chain_list);
1525 }
1526 }
1514 ioc->scsi_lookup[i].cb_idx = 0xFF; 1527 ioc->scsi_lookup[i].cb_idx = 0xFF;
1515 ioc->scsi_lookup[i].scmd = NULL; 1528 ioc->scsi_lookup[i].scmd = NULL;
1516 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 1529 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
@@ -1819,6 +1832,97 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1819} 1832}
1820 1833
1821/** 1834/**
1835 * _base_update_missing_delay - change the missing delay timers
1836 * @ioc: per adapter object
1837 * @device_missing_delay: amount of time till device is reported missing
1838 * @io_missing_delay: interval IO is returned when there is a missing device
1839 *
1840 * Return nothing.
1841 *
1842 * Passed on the command line, this function will modify the device missing
1843 * delay, as well as the io missing delay. This should be called at driver
1844 * load time.
1845 */
1846static void
1847_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
1848 u16 device_missing_delay, u8 io_missing_delay)
1849{
1850 u16 dmd, dmd_new, dmd_orignal;
1851 u8 io_missing_delay_original;
1852 u16 sz;
1853 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1854 Mpi2ConfigReply_t mpi_reply;
1855 u8 num_phys = 0;
1856 u16 ioc_status;
1857
1858 mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
1859 if (!num_phys)
1860 return;
1861
1862 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
1863 sizeof(Mpi2SasIOUnit1PhyData_t));
1864 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
1865 if (!sas_iounit_pg1) {
1866 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1867 ioc->name, __FILE__, __LINE__, __func__);
1868 goto out;
1869 }
1870 if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
1871 sas_iounit_pg1, sz))) {
1872 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1873 ioc->name, __FILE__, __LINE__, __func__);
1874 goto out;
1875 }
1876 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1877 MPI2_IOCSTATUS_MASK;
1878 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1879 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1880 ioc->name, __FILE__, __LINE__, __func__);
1881 goto out;
1882 }
1883
1884 /* device missing delay */
1885 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
1886 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
1887 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
1888 else
1889 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
1890 dmd_orignal = dmd;
1891 if (device_missing_delay > 0x7F) {
1892 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
1893 device_missing_delay;
1894 dmd = dmd / 16;
1895 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
1896 } else
1897 dmd = device_missing_delay;
1898 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
1899
1900 /* io missing delay */
1901 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
1902 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
1903
1904 if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
1905 sz)) {
1906 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
1907 dmd_new = (dmd &
1908 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
1909 else
1910 dmd_new =
1911 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
1912 printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
1913 "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
1914 printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
1915 "new(%d)\n", ioc->name, io_missing_delay_original,
1916 io_missing_delay);
1917 ioc->device_missing_delay = dmd_new;
1918 ioc->io_missing_delay = io_missing_delay;
1919 }
1920
1921out:
1922 kfree(sas_iounit_pg1);
1923}
1924
1925/**
1822 * _base_static_config_pages - static start of day config pages 1926 * _base_static_config_pages - static start of day config pages
1823 * @ioc: per adapter object 1927 * @ioc: per adapter object
1824 * 1928 *
@@ -1855,6 +1959,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1855 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 1959 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
1856 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); 1960 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
1857 mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 1961 mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
1962
1858} 1963}
1859 1964
1860/** 1965/**
@@ -1868,6 +1973,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1868static void 1973static void
1869_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) 1974_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1870{ 1975{
1976 int i;
1977
1871 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1978 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1872 __func__)); 1979 __func__));
1873 1980
@@ -1932,6 +2039,20 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1932 } 2039 }
1933 kfree(ioc->hpr_lookup); 2040 kfree(ioc->hpr_lookup);
1934 kfree(ioc->internal_lookup); 2041 kfree(ioc->internal_lookup);
2042 if (ioc->chain_lookup) {
2043 for (i = 0; i < ioc->chain_depth; i++) {
2044 if (ioc->chain_lookup[i].chain_buffer)
2045 pci_pool_free(ioc->chain_dma_pool,
2046 ioc->chain_lookup[i].chain_buffer,
2047 ioc->chain_lookup[i].chain_buffer_dma);
2048 }
2049 if (ioc->chain_dma_pool)
2050 pci_pool_destroy(ioc->chain_dma_pool);
2051 }
2052 if (ioc->chain_lookup) {
2053 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2054 ioc->chain_lookup = NULL;
2055 }
1935} 2056}
1936 2057
1937 2058
@@ -1953,6 +2074,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1953 u32 sz, total_sz; 2074 u32 sz, total_sz;
1954 u32 retry_sz; 2075 u32 retry_sz;
1955 u16 max_request_credit; 2076 u16 max_request_credit;
2077 int i;
1956 2078
1957 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 2079 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1958 __func__)); 2080 __func__));
@@ -1970,14 +2092,11 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1970 } 2092 }
1971 2093
1972 /* command line tunables for max controller queue depth */ 2094 /* command line tunables for max controller queue depth */
1973 if (max_queue_depth != -1) { 2095 if (max_queue_depth != -1)
1974 max_request_credit = (max_queue_depth < facts->RequestCredit) 2096 max_request_credit = (max_queue_depth < facts->RequestCredit)
1975 ? max_queue_depth : facts->RequestCredit; 2097 ? max_queue_depth : facts->RequestCredit;
1976 } else { 2098 else
1977 max_request_credit = (facts->RequestCredit > 2099 max_request_credit = facts->RequestCredit;
1978 MPT2SAS_MAX_REQUEST_QUEUE) ? MPT2SAS_MAX_REQUEST_QUEUE :
1979 facts->RequestCredit;
1980 }
1981 2100
1982 ioc->hba_queue_depth = max_request_credit; 2101 ioc->hba_queue_depth = max_request_credit;
1983 ioc->hi_priority_depth = facts->HighPriorityCredit; 2102 ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2083,7 +2202,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2083 * "frame for smid=0 2202 * "frame for smid=0
2084 */ 2203 */
2085 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; 2204 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2086 sz = ((ioc->scsiio_depth + 1 + ioc->chain_depth) * ioc->request_sz); 2205 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2087 2206
2088 /* hi-priority queue */ 2207 /* hi-priority queue */
2089 sz += (ioc->hi_priority_depth * ioc->request_sz); 2208 sz += (ioc->hi_priority_depth * ioc->request_sz);
@@ -2124,19 +2243,11 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2124 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * 2243 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2125 ioc->request_sz); 2244 ioc->request_sz);
2126 2245
2127 ioc->chain = ioc->internal + (ioc->internal_depth *
2128 ioc->request_sz);
2129 ioc->chain_dma = ioc->internal_dma + (ioc->internal_depth *
2130 ioc->request_sz);
2131 2246
2132 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): " 2247 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
2133 "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, 2248 "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2134 ioc->request, ioc->hba_queue_depth, ioc->request_sz, 2249 ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2135 (ioc->hba_queue_depth * ioc->request_sz)/1024)); 2250 (ioc->hba_queue_depth * ioc->request_sz)/1024));
2136 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool(0x%p): depth"
2137 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->chain,
2138 ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2139 ioc->request_sz))/1024));
2140 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n", 2251 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
2141 ioc->name, (unsigned long long) ioc->request_dma)); 2252 ioc->name, (unsigned long long) ioc->request_dma));
2142 total_sz += sz; 2253 total_sz += sz;
@@ -2155,6 +2266,38 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2155 "depth(%d)\n", ioc->name, ioc->request, 2266 "depth(%d)\n", ioc->name, ioc->request,
2156 ioc->scsiio_depth)); 2267 ioc->scsiio_depth));
2157 2268
2269 /* loop till the allocation succeeds */
2270 do {
2271 sz = ioc->chain_depth * sizeof(struct chain_tracker);
2272 ioc->chain_pages = get_order(sz);
2273 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2274 GFP_KERNEL, ioc->chain_pages);
2275 if (ioc->chain_lookup == NULL)
2276 ioc->chain_depth -= 100;
2277 } while (ioc->chain_lookup == NULL);
2278 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2279 ioc->request_sz, 16, 0);
2280 if (!ioc->chain_dma_pool) {
2281 printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
2282 "failed\n", ioc->name);
2283 goto out;
2284 }
2285 for (i = 0; i < ioc->chain_depth; i++) {
2286 ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2287 ioc->chain_dma_pool , GFP_KERNEL,
2288 &ioc->chain_lookup[i].chain_buffer_dma);
2289 if (!ioc->chain_lookup[i].chain_buffer) {
2290 ioc->chain_depth = i;
2291 goto chain_done;
2292 }
2293 total_sz += ioc->request_sz;
2294 }
2295chain_done:
2296 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
2297 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2298 ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2299 ioc->request_sz))/1024));
2300
2158 /* initialize hi-priority queue smid's */ 2301 /* initialize hi-priority queue smid's */
2159 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 2302 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2160 sizeof(struct request_tracker), GFP_KERNEL); 2303 sizeof(struct request_tracker), GFP_KERNEL);
@@ -2221,6 +2364,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2221 ioc->name); 2364 ioc->name);
2222 goto out; 2365 goto out;
2223 } 2366 }
2367 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2368 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2224 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth" 2369 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
2225 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply, 2370 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
2226 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); 2371 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
@@ -2302,7 +2447,6 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2302 return 0; 2447 return 0;
2303 2448
2304 out: 2449 out:
2305 _base_release_memory_pools(ioc);
2306 return -ENOMEM; 2450 return -ENOMEM;
2307} 2451}
2308 2452
@@ -3485,6 +3629,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3485 INIT_LIST_HEAD(&ioc->free_list); 3629 INIT_LIST_HEAD(&ioc->free_list);
3486 smid = 1; 3630 smid = 1;
3487 for (i = 0; i < ioc->scsiio_depth; i++, smid++) { 3631 for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
3632 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
3488 ioc->scsi_lookup[i].cb_idx = 0xFF; 3633 ioc->scsi_lookup[i].cb_idx = 0xFF;
3489 ioc->scsi_lookup[i].smid = smid; 3634 ioc->scsi_lookup[i].smid = smid;
3490 ioc->scsi_lookup[i].scmd = NULL; 3635 ioc->scsi_lookup[i].scmd = NULL;
@@ -3511,6 +3656,13 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3511 list_add_tail(&ioc->internal_lookup[i].tracker_list, 3656 list_add_tail(&ioc->internal_lookup[i].tracker_list,
3512 &ioc->internal_free_list); 3657 &ioc->internal_free_list);
3513 } 3658 }
3659
3660 /* chain pool */
3661 INIT_LIST_HEAD(&ioc->free_chain_list);
3662 for (i = 0; i < ioc->chain_depth; i++)
3663 list_add_tail(&ioc->chain_lookup[i].tracker_list,
3664 &ioc->free_chain_list);
3665
3514 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3666 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3515 3667
3516 /* initialize Reply Free Queue */ 3668 /* initialize Reply Free Queue */
@@ -3708,12 +3860,15 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3708 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); 3860 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
3709 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); 3861 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
3710 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 3862 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
3711 _base_unmask_events(ioc, MPI2_EVENT_TASK_SET_FULL);
3712 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 3863 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
3713 r = _base_make_ioc_operational(ioc, CAN_SLEEP); 3864 r = _base_make_ioc_operational(ioc, CAN_SLEEP);
3714 if (r) 3865 if (r)
3715 goto out_free_resources; 3866 goto out_free_resources;
3716 3867
3868 if (missing_delay[0] != -1 && missing_delay[1] != -1)
3869 _base_update_missing_delay(ioc, missing_delay[0],
3870 missing_delay[1]);
3871
3717 mpt2sas_base_start_watchdog(ioc); 3872 mpt2sas_base_start_watchdog(ioc);
3718 return 0; 3873 return 0;
3719 3874
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 0b15a8bdebfc..283568c6fb04 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "06.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "07.100.00.00"
73#define MPT2SAS_MAJOR_VERSION 06 73#define MPT2SAS_MAJOR_VERSION 07
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
@@ -419,6 +419,18 @@ enum reset_type {
419}; 419};
420 420
421/** 421/**
422 * struct chain_tracker - firmware chain tracker
423 * @chain_buffer: chain buffer
424 * @chain_buffer_dma: physical address
425 * @tracker_list: list of free request (ioc->free_chain_list)
426 */
427struct chain_tracker {
428 void *chain_buffer;
429 dma_addr_t chain_buffer_dma;
430 struct list_head tracker_list;
431};
432
433/**
422 * struct request_tracker - firmware request tracker 434 * struct request_tracker - firmware request tracker
423 * @smid: system message id 435 * @smid: system message id
424 * @scmd: scsi request pointer 436 * @scmd: scsi request pointer
@@ -430,6 +442,7 @@ struct request_tracker {
430 u16 smid; 442 u16 smid;
431 struct scsi_cmnd *scmd; 443 struct scsi_cmnd *scmd;
432 u8 cb_idx; 444 u8 cb_idx;
445 struct list_head chain_list;
433 struct list_head tracker_list; 446 struct list_head tracker_list;
434}; 447};
435 448
@@ -704,8 +717,10 @@ struct MPT2SAS_ADAPTER {
704 wait_queue_head_t reset_wq; 717 wait_queue_head_t reset_wq;
705 718
706 /* chain */ 719 /* chain */
707 u8 *chain; 720 struct chain_tracker *chain_lookup;
708 dma_addr_t chain_dma; 721 struct list_head free_chain_list;
722 struct dma_pool *chain_dma_pool;
723 ulong chain_pages;
709 u16 max_sges_in_main_message; 724 u16 max_sges_in_main_message;
710 u16 max_sges_in_chain_message; 725 u16 max_sges_in_chain_message;
711 u16 chains_needed_per_io; 726 u16 chains_needed_per_io;
@@ -737,6 +752,8 @@ struct MPT2SAS_ADAPTER {
737 u16 reply_sz; 752 u16 reply_sz;
738 u8 *reply; 753 u8 *reply;
739 dma_addr_t reply_dma; 754 dma_addr_t reply_dma;
755 u32 reply_dma_max_address;
756 u32 reply_dma_min_address;
740 struct dma_pool *reply_dma_pool; 757 struct dma_pool *reply_dma_pool;
741 758
742 /* reply free queue */ 759 /* reply free queue */
@@ -832,6 +849,8 @@ int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
832 ulong timeout, struct scsi_cmnd *scmd); 849 ulong timeout, struct scsi_cmnd *scmd);
833void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 850void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
834void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 851void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
852void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
853void mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
835struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, 854struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
836 u16 handle); 855 u16 handle);
837struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER 856struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 40cb8aeb21b1..e92b77af5484 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -81,6 +81,7 @@ enum block_state {
81 BLOCKING, 81 BLOCKING,
82}; 82};
83 83
84#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
84/** 85/**
85 * _ctl_sas_device_find_by_handle - sas device search 86 * _ctl_sas_device_find_by_handle - sas device search
86 * @ioc: per adapter object 87 * @ioc: per adapter object
@@ -107,7 +108,6 @@ _ctl_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
107 return r; 108 return r;
108} 109}
109 110
110#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
111/** 111/**
112 * _ctl_display_some_debug - debug routine 112 * _ctl_display_some_debug - debug routine
113 * @ioc: per adapter object 113 * @ioc: per adapter object
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 1a96a00418a4..eda347c57979 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -931,31 +931,32 @@ _scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
931} 931}
932 932
933/** 933/**
934 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) 934 * _scsih_get_chain_buffer_tracker - obtain chain tracker
935 * @ioc: per adapter object 935 * @ioc: per adapter object
936 * @smid: system request message index 936 * @smid: smid associated to an IO request
937 * 937 *
938 * Returns phys pointer to chain buffer. 938 * Returns chain tracker(from ioc->free_chain_list)
939 */ 939 */
940static dma_addr_t 940static struct chain_tracker *
941_scsih_get_chain_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid) 941_scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
942{ 942{
943 return ioc->chain_dma + ((smid - 1) * (ioc->request_sz * 943 struct chain_tracker *chain_req;
944 ioc->chains_needed_per_io)); 944 unsigned long flags;
945}
946 945
947/** 946 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
948 * _scsih_get_chain_buffer - obtain block of chains assigned to a mf request 947 if (list_empty(&ioc->free_chain_list)) {
949 * @ioc: per adapter object 948 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
950 * @smid: system request message index 949 printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
951 * 950 ioc->name);
952 * Returns virt pointer to chain buffer. 951 return NULL;
953 */ 952 }
954static void * 953 chain_req = list_entry(ioc->free_chain_list.next,
955_scsih_get_chain_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid) 954 struct chain_tracker, tracker_list);
956{ 955 list_del_init(&chain_req->tracker_list);
957 return (void *)(ioc->chain + ((smid - 1) * (ioc->request_sz * 956 list_add_tail(&chain_req->tracker_list,
958 ioc->chains_needed_per_io))); 957 &ioc->scsi_lookup[smid - 1].chain_list);
958 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
959 return chain_req;
959} 960}
960 961
961/** 962/**
@@ -986,6 +987,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
986 u32 sgl_flags; 987 u32 sgl_flags;
987 u32 sgl_flags_last_element; 988 u32 sgl_flags_last_element;
988 u32 sgl_flags_end_buffer; 989 u32 sgl_flags_end_buffer;
990 struct chain_tracker *chain_req;
989 991
990 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 992 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
991 993
@@ -1033,8 +1035,11 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1033 1035
1034 /* initializing the chain flags and pointers */ 1036 /* initializing the chain flags and pointers */
1035 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; 1037 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1036 chain = _scsih_get_chain_buffer(ioc, smid); 1038 chain_req = _scsih_get_chain_buffer_tracker(ioc, smid);
1037 chain_dma = _scsih_get_chain_buffer_dma(ioc, smid); 1039 if (!chain_req)
1040 return -1;
1041 chain = chain_req->chain_buffer;
1042 chain_dma = chain_req->chain_buffer_dma;
1038 do { 1043 do {
1039 sges_in_segment = (sges_left <= 1044 sges_in_segment = (sges_left <=
1040 ioc->max_sges_in_chain_message) ? sges_left : 1045 ioc->max_sges_in_chain_message) ? sges_left :
@@ -1070,8 +1075,11 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1070 sges_in_segment--; 1075 sges_in_segment--;
1071 } 1076 }
1072 1077
1073 chain_dma += ioc->request_sz; 1078 chain_req = _scsih_get_chain_buffer_tracker(ioc, smid);
1074 chain += ioc->request_sz; 1079 if (!chain_req)
1080 return -1;
1081 chain = chain_req->chain_buffer;
1082 chain_dma = chain_req->chain_buffer_dma;
1075 } while (1); 1083 } while (1);
1076 1084
1077 1085
@@ -1094,28 +1102,24 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1094} 1102}
1095 1103
1096/** 1104/**
1097 * _scsih_change_queue_depth - setting device queue depth 1105 * _scsih_adjust_queue_depth - setting device queue depth
1098 * @sdev: scsi device struct 1106 * @sdev: scsi device struct
1099 * @qdepth: requested queue depth 1107 * @qdepth: requested queue depth
1100 * @reason: calling context
1101 * 1108 *
1102 * Returns queue depth. 1109 *
1110 * Returns nothing
1103 */ 1111 */
1104static int 1112static void
1105_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 1113_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
1106{ 1114{
1107 struct Scsi_Host *shost = sdev->host; 1115 struct Scsi_Host *shost = sdev->host;
1108 int max_depth; 1116 int max_depth;
1109 int tag_type;
1110 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1117 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1111 struct MPT2SAS_DEVICE *sas_device_priv_data; 1118 struct MPT2SAS_DEVICE *sas_device_priv_data;
1112 struct MPT2SAS_TARGET *sas_target_priv_data; 1119 struct MPT2SAS_TARGET *sas_target_priv_data;
1113 struct _sas_device *sas_device; 1120 struct _sas_device *sas_device;
1114 unsigned long flags; 1121 unsigned long flags;
1115 1122
1116 if (reason != SCSI_QDEPTH_DEFAULT)
1117 return -EOPNOTSUPP;
1118
1119 max_depth = shost->can_queue; 1123 max_depth = shost->can_queue;
1120 1124
1121 /* limit max device queue for SATA to 32 */ 1125 /* limit max device queue for SATA to 32 */
@@ -1141,8 +1145,27 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1141 max_depth = 1; 1145 max_depth = 1;
1142 if (qdepth > max_depth) 1146 if (qdepth > max_depth)
1143 qdepth = max_depth; 1147 qdepth = max_depth;
1144 tag_type = (qdepth == 1) ? 0 : MSG_SIMPLE_TAG; 1148 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1145 scsi_adjust_queue_depth(sdev, tag_type, qdepth); 1149}
1150
1151/**
1152 * _scsih_change_queue_depth - setting device queue depth
1153 * @sdev: scsi device struct
1154 * @qdepth: requested queue depth
1155 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1156 * (see include/scsi/scsi_host.h for definition)
1157 *
1158 * Returns queue depth.
1159 */
1160static int
1161_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1162{
1163 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
1164 _scsih_adjust_queue_depth(sdev, qdepth);
1165 else if (reason == SCSI_QDEPTH_QFULL)
1166 scsi_track_queue_full(sdev, qdepth);
1167 else
1168 return -EOPNOTSUPP;
1146 1169
1147 if (sdev->inquiry_len > 7) 1170 if (sdev->inquiry_len > 7)
1148 sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " 1171 sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), "
@@ -2251,13 +2274,13 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2251 2274
2252 struct scsi_target *starget = scmd->device->sdev_target; 2275 struct scsi_target *starget = scmd->device->sdev_target;
2253 2276
2254 starget_printk(KERN_INFO, starget, "attempting target reset! " 2277 starget_printk(KERN_INFO, starget, "attempting device reset! "
2255 "scmd(%p)\n", scmd); 2278 "scmd(%p)\n", scmd);
2256 _scsih_tm_display_info(ioc, scmd); 2279 _scsih_tm_display_info(ioc, scmd);
2257 2280
2258 sas_device_priv_data = scmd->device->hostdata; 2281 sas_device_priv_data = scmd->device->hostdata;
2259 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2282 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
2260 starget_printk(KERN_INFO, starget, "target been deleted! " 2283 starget_printk(KERN_INFO, starget, "device been deleted! "
2261 "scmd(%p)\n", scmd); 2284 "scmd(%p)\n", scmd);
2262 scmd->result = DID_NO_CONNECT << 16; 2285 scmd->result = DID_NO_CONNECT << 16;
2263 scmd->scsi_done(scmd); 2286 scmd->scsi_done(scmd);
@@ -2576,9 +2599,9 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
2576 &sas_expander->sas_port_list, port_list) { 2599 &sas_expander->sas_port_list, port_list) {
2577 2600
2578 if (mpt2sas_port->remote_identify.device_type == 2601 if (mpt2sas_port->remote_identify.device_type ==
2579 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || 2602 SAS_EDGE_EXPANDER_DEVICE ||
2580 mpt2sas_port->remote_identify.device_type == 2603 mpt2sas_port->remote_identify.device_type ==
2581 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) { 2604 SAS_FANOUT_EXPANDER_DEVICE) {
2582 2605
2583 spin_lock_irqsave(&ioc->sas_node_lock, flags); 2606 spin_lock_irqsave(&ioc->sas_node_lock, flags);
2584 expander_sibling = 2607 expander_sibling =
@@ -2715,9 +2738,10 @@ static u8
2715_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, 2738_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
2716 u8 msix_index, u32 reply) 2739 u8 msix_index, u32 reply)
2717{ 2740{
2741#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
2718 Mpi2SasIoUnitControlReply_t *mpi_reply = 2742 Mpi2SasIoUnitControlReply_t *mpi_reply =
2719 mpt2sas_base_get_reply_virt_addr(ioc, reply); 2743 mpt2sas_base_get_reply_virt_addr(ioc, reply);
2720 2744#endif
2721 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT 2745 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2722 "sc_complete:handle(0x%04x), (open) " 2746 "sc_complete:handle(0x%04x), (open) "
2723 "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 2747 "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -3963,6 +3987,7 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc)
3963 Mpi2ConfigReply_t mpi_reply; 3987 Mpi2ConfigReply_t mpi_reply;
3964 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 3988 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
3965 u16 attached_handle; 3989 u16 attached_handle;
3990 u8 link_rate;
3966 3991
3967 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT 3992 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
3968 "updating handles for sas_host(0x%016llx)\n", 3993 "updating handles for sas_host(0x%016llx)\n",
@@ -3984,15 +4009,17 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc)
3984 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 4009 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3985 goto out; 4010 goto out;
3986 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 4011 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
4012 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
3987 if (i == 0) 4013 if (i == 0)
3988 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> 4014 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
3989 PhyData[0].ControllerDevHandle); 4015 PhyData[0].ControllerDevHandle);
3990 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 4016 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
3991 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. 4017 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
3992 AttachedDevHandle); 4018 AttachedDevHandle);
4019 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
4020 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
3993 mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address, 4021 mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
3994 attached_handle, i, sas_iounit_pg0->PhyData[i]. 4022 attached_handle, i, link_rate);
3995 NegotiatedLinkRate >> 4);
3996 } 4023 }
3997 out: 4024 out:
3998 kfree(sas_iounit_pg0); 4025 kfree(sas_iounit_pg0);
@@ -4336,14 +4363,14 @@ _scsih_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4336} 4363}
4337 4364
4338/** 4365/**
4339 * _scsih_expander_remove - removing expander object 4366 * mpt2sas_expander_remove - removing expander object
4340 * @ioc: per adapter object 4367 * @ioc: per adapter object
4341 * @sas_address: expander sas_address 4368 * @sas_address: expander sas_address
4342 * 4369 *
4343 * Return nothing. 4370 * Return nothing.
4344 */ 4371 */
4345static void 4372void
4346_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) 4373mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
4347{ 4374{
4348 struct _sas_node *sas_expander; 4375 struct _sas_node *sas_expander;
4349 unsigned long flags; 4376 unsigned long flags;
@@ -4354,6 +4381,11 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
4354 spin_lock_irqsave(&ioc->sas_node_lock, flags); 4381 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4355 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, 4382 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
4356 sas_address); 4383 sas_address);
4384 if (!sas_expander) {
4385 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4386 return;
4387 }
4388 list_del(&sas_expander->list);
4357 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4389 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4358 _scsih_expander_node_remove(ioc, sas_expander); 4390 _scsih_expander_node_remove(ioc, sas_expander);
4359} 4391}
@@ -4643,6 +4675,33 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
4643 sas_device_backup.sas_address)); 4675 sas_device_backup.sas_address));
4644} 4676}
4645 4677
4678/**
4679 * mpt2sas_device_remove - removing device object
4680 * @ioc: per adapter object
4681 * @sas_address: expander sas_address
4682 *
4683 * Return nothing.
4684 */
4685void
4686mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
4687{
4688 struct _sas_device *sas_device;
4689 unsigned long flags;
4690
4691 if (ioc->shost_recovery)
4692 return;
4693
4694 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4695 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
4696 sas_address);
4697 if (!sas_device) {
4698 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4699 return;
4700 }
4701 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4702 _scsih_remove_device(ioc, sas_device);
4703}
4704
4646#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4705#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4647/** 4706/**
4648 * _scsih_sas_topology_change_event_debug - debug for topology event 4707 * _scsih_sas_topology_change_event_debug - debug for topology event
@@ -4737,7 +4796,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4737 int i; 4796 int i;
4738 u16 parent_handle, handle; 4797 u16 parent_handle, handle;
4739 u16 reason_code; 4798 u16 reason_code;
4740 u8 phy_number; 4799 u8 phy_number, max_phys;
4741 struct _sas_node *sas_expander; 4800 struct _sas_node *sas_expander;
4742 struct _sas_device *sas_device; 4801 struct _sas_device *sas_device;
4743 u64 sas_address; 4802 u64 sas_address;
@@ -4775,11 +4834,13 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4775 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, 4834 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
4776 parent_handle); 4835 parent_handle);
4777 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4836 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4778 if (sas_expander) 4837 if (sas_expander) {
4779 sas_address = sas_expander->sas_address; 4838 sas_address = sas_expander->sas_address;
4780 else if (parent_handle < ioc->sas_hba.num_phys) 4839 max_phys = sas_expander->num_phys;
4840 } else if (parent_handle < ioc->sas_hba.num_phys) {
4781 sas_address = ioc->sas_hba.sas_address; 4841 sas_address = ioc->sas_hba.sas_address;
4782 else 4842 max_phys = ioc->sas_hba.num_phys;
4843 } else
4783 return; 4844 return;
4784 4845
4785 /* handle siblings events */ 4846 /* handle siblings events */
@@ -4793,6 +4854,8 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4793 ioc->pci_error_recovery) 4854 ioc->pci_error_recovery)
4794 return; 4855 return;
4795 phy_number = event_data->StartPhyNum + i; 4856 phy_number = event_data->StartPhyNum + i;
4857 if (phy_number >= max_phys)
4858 continue;
4796 reason_code = event_data->PHY[i].PhyStatus & 4859 reason_code = event_data->PHY[i].PhyStatus &
4797 MPI2_EVENT_SAS_TOPO_RC_MASK; 4860 MPI2_EVENT_SAS_TOPO_RC_MASK;
4798 if ((event_data->PHY[i].PhyStatus & 4861 if ((event_data->PHY[i].PhyStatus &
@@ -4844,7 +4907,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4844 /* handle expander removal */ 4907 /* handle expander removal */
4845 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && 4908 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
4846 sas_expander) 4909 sas_expander)
4847 _scsih_expander_remove(ioc, sas_address); 4910 mpt2sas_expander_remove(ioc, sas_address);
4848 4911
4849} 4912}
4850 4913
@@ -5773,90 +5836,6 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
5773} 5836}
5774 5837
5775/** 5838/**
5776 * _scsih_task_set_full - handle task set full
5777 * @ioc: per adapter object
5778 * @fw_event: The fw_event_work object
5779 * Context: user.
5780 *
5781 * Throttle back qdepth.
5782 */
5783static void
5784_scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
5785 *fw_event)
5786{
5787 unsigned long flags;
5788 struct _sas_device *sas_device;
5789 static struct _raid_device *raid_device;
5790 struct scsi_device *sdev;
5791 int depth;
5792 u16 current_depth;
5793 u16 handle;
5794 int id, channel;
5795 u64 sas_address;
5796 Mpi2EventDataTaskSetFull_t *event_data = fw_event->event_data;
5797
5798 current_depth = le16_to_cpu(event_data->CurrentDepth);
5799 handle = le16_to_cpu(event_data->DevHandle);
5800 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5801 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
5802 if (!sas_device) {
5803 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5804 return;
5805 }
5806 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5807 id = sas_device->id;
5808 channel = sas_device->channel;
5809 sas_address = sas_device->sas_address;
5810
5811 /* if hidden raid component, then change to volume characteristics */
5812 if (test_bit(handle, ioc->pd_handles) && sas_device->volume_handle) {
5813 spin_lock_irqsave(&ioc->raid_device_lock, flags);
5814 raid_device = _scsih_raid_device_find_by_handle(
5815 ioc, sas_device->volume_handle);
5816 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
5817 if (raid_device) {
5818 id = raid_device->id;
5819 channel = raid_device->channel;
5820 handle = raid_device->handle;
5821 sas_address = raid_device->wwid;
5822 }
5823 }
5824
5825 if (ioc->logging_level & MPT_DEBUG_TASK_SET_FULL)
5826 starget_printk(KERN_INFO, sas_device->starget, "task set "
5827 "full: handle(0x%04x), sas_addr(0x%016llx), depth(%d)\n",
5828 handle, (unsigned long long)sas_address, current_depth);
5829
5830 shost_for_each_device(sdev, ioc->shost) {
5831 if (sdev->id == id && sdev->channel == channel) {
5832 if (current_depth > sdev->queue_depth) {
5833 if (ioc->logging_level &
5834 MPT_DEBUG_TASK_SET_FULL)
5835 sdev_printk(KERN_INFO, sdev, "strange "
5836 "observation, the queue depth is"
5837 " (%d) meanwhile fw queue depth "
5838 "is (%d)\n", sdev->queue_depth,
5839 current_depth);
5840 continue;
5841 }
5842 depth = scsi_track_queue_full(sdev,
5843 current_depth - 1);
5844 if (depth > 0)
5845 sdev_printk(KERN_INFO, sdev, "Queue depth "
5846 "reduced to (%d)\n", depth);
5847 else if (depth < 0)
5848 sdev_printk(KERN_INFO, sdev, "Tagged Command "
5849 "Queueing is being disabled\n");
5850 else if (depth == 0)
5851 if (ioc->logging_level &
5852 MPT_DEBUG_TASK_SET_FULL)
5853 sdev_printk(KERN_INFO, sdev,
5854 "Queue depth not changed yet\n");
5855 }
5856 }
5857}
5858
5859/**
5860 * _scsih_prep_device_scan - initialize parameters prior to device scan 5839 * _scsih_prep_device_scan - initialize parameters prior to device scan
5861 * @ioc: per adapter object 5840 * @ioc: per adapter object
5862 * 5841 *
@@ -6219,7 +6198,7 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
6219 sas_expander->responding = 0; 6198 sas_expander->responding = 0;
6220 continue; 6199 continue;
6221 } 6200 }
6222 _scsih_expander_remove(ioc, sas_expander->sas_address); 6201 mpt2sas_expander_remove(ioc, sas_expander->sas_address);
6223 goto retry_expander_search; 6202 goto retry_expander_search;
6224 } 6203 }
6225} 6204}
@@ -6343,9 +6322,6 @@ _firmware_event_work(struct work_struct *work)
6343 case MPI2_EVENT_IR_OPERATION_STATUS: 6322 case MPI2_EVENT_IR_OPERATION_STATUS:
6344 _scsih_sas_ir_operation_status_event(ioc, fw_event); 6323 _scsih_sas_ir_operation_status_event(ioc, fw_event);
6345 break; 6324 break;
6346 case MPI2_EVENT_TASK_SET_FULL:
6347 _scsih_task_set_full(ioc, fw_event);
6348 break;
6349 } 6325 }
6350 _scsih_fw_event_free(ioc, fw_event); 6326 _scsih_fw_event_free(ioc, fw_event);
6351} 6327}
@@ -6415,7 +6391,6 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
6415 case MPI2_EVENT_SAS_DISCOVERY: 6391 case MPI2_EVENT_SAS_DISCOVERY:
6416 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 6392 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
6417 case MPI2_EVENT_IR_PHYSICAL_DISK: 6393 case MPI2_EVENT_IR_PHYSICAL_DISK:
6418 case MPI2_EVENT_TASK_SET_FULL:
6419 break; 6394 break;
6420 6395
6421 default: /* ignore the rest */ 6396 default: /* ignore the rest */
@@ -6490,56 +6465,23 @@ static void
6490_scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, 6465_scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
6491 struct _sas_node *sas_expander) 6466 struct _sas_node *sas_expander)
6492{ 6467{
6493 struct _sas_port *mpt2sas_port; 6468 struct _sas_port *mpt2sas_port, *next;
6494 struct _sas_device *sas_device;
6495 struct _sas_node *expander_sibling;
6496 unsigned long flags;
6497
6498 if (!sas_expander)
6499 return;
6500 6469
6501 /* remove sibling ports attached to this expander */ 6470 /* remove sibling ports attached to this expander */
6502 retry_device_search: 6471 list_for_each_entry_safe(mpt2sas_port, next,
6503 list_for_each_entry(mpt2sas_port,
6504 &sas_expander->sas_port_list, port_list) { 6472 &sas_expander->sas_port_list, port_list) {
6473 if (ioc->shost_recovery)
6474 return;
6505 if (mpt2sas_port->remote_identify.device_type == 6475 if (mpt2sas_port->remote_identify.device_type ==
6506 SAS_END_DEVICE) { 6476 SAS_END_DEVICE)
6507 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6477 mpt2sas_device_remove(ioc,
6508 sas_device = 6478 mpt2sas_port->remote_identify.sas_address);
6509 mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 6479 else if (mpt2sas_port->remote_identify.device_type ==
6510 mpt2sas_port->remote_identify.sas_address); 6480 SAS_EDGE_EXPANDER_DEVICE ||
6511 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6512 if (!sas_device)
6513 continue;
6514 _scsih_remove_device(ioc, sas_device);
6515 if (ioc->shost_recovery)
6516 return;
6517 goto retry_device_search;
6518 }
6519 }
6520
6521 retry_expander_search:
6522 list_for_each_entry(mpt2sas_port,
6523 &sas_expander->sas_port_list, port_list) {
6524
6525 if (mpt2sas_port->remote_identify.device_type ==
6526 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
6527 mpt2sas_port->remote_identify.device_type == 6481 mpt2sas_port->remote_identify.device_type ==
6528 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) { 6482 SAS_FANOUT_EXPANDER_DEVICE)
6529 6483 mpt2sas_expander_remove(ioc,
6530 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6484 mpt2sas_port->remote_identify.sas_address);
6531 expander_sibling =
6532 mpt2sas_scsih_expander_find_by_sas_address(
6533 ioc, mpt2sas_port->remote_identify.sas_address);
6534 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6535 if (!expander_sibling)
6536 continue;
6537 _scsih_expander_remove(ioc,
6538 expander_sibling->sas_address);
6539 if (ioc->shost_recovery)
6540 return;
6541 goto retry_expander_search;
6542 }
6543 } 6485 }
6544 6486
6545 mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, 6487 mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
@@ -6550,7 +6492,6 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
6550 sas_expander->handle, (unsigned long long) 6492 sas_expander->handle, (unsigned long long)
6551 sas_expander->sas_address); 6493 sas_expander->sas_address);
6552 6494
6553 list_del(&sas_expander->list);
6554 kfree(sas_expander->phy); 6495 kfree(sas_expander->phy);
6555 kfree(sas_expander); 6496 kfree(sas_expander);
6556} 6497}
@@ -6668,9 +6609,7 @@ _scsih_remove(struct pci_dev *pdev)
6668{ 6609{
6669 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6610 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6670 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 6611 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
6671 struct _sas_port *mpt2sas_port; 6612 struct _sas_port *mpt2sas_port, *next_port;
6672 struct _sas_device *sas_device;
6673 struct _sas_node *expander_sibling;
6674 struct _raid_device *raid_device, *next; 6613 struct _raid_device *raid_device, *next;
6675 struct MPT2SAS_TARGET *sas_target_priv_data; 6614 struct MPT2SAS_TARGET *sas_target_priv_data;
6676 struct workqueue_struct *wq; 6615 struct workqueue_struct *wq;
@@ -6702,28 +6641,18 @@ _scsih_remove(struct pci_dev *pdev)
6702 } 6641 }
6703 6642
6704 /* free ports attached to the sas_host */ 6643 /* free ports attached to the sas_host */
6705 retry_again: 6644 list_for_each_entry_safe(mpt2sas_port, next_port,
6706 list_for_each_entry(mpt2sas_port,
6707 &ioc->sas_hba.sas_port_list, port_list) { 6645 &ioc->sas_hba.sas_port_list, port_list) {
6708 if (mpt2sas_port->remote_identify.device_type == 6646 if (mpt2sas_port->remote_identify.device_type ==
6709 SAS_END_DEVICE) { 6647 SAS_END_DEVICE)
6710 sas_device = 6648 mpt2sas_device_remove(ioc,
6711 mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 6649 mpt2sas_port->remote_identify.sas_address);
6712 mpt2sas_port->remote_identify.sas_address); 6650 else if (mpt2sas_port->remote_identify.device_type ==
6713 if (sas_device) { 6651 SAS_EDGE_EXPANDER_DEVICE ||
6714 _scsih_remove_device(ioc, sas_device); 6652 mpt2sas_port->remote_identify.device_type ==
6715 goto retry_again; 6653 SAS_FANOUT_EXPANDER_DEVICE)
6716 } 6654 mpt2sas_expander_remove(ioc,
6717 } else {
6718 expander_sibling =
6719 mpt2sas_scsih_expander_find_by_sas_address(ioc,
6720 mpt2sas_port->remote_identify.sas_address); 6655 mpt2sas_port->remote_identify.sas_address);
6721 if (expander_sibling) {
6722 _scsih_expander_remove(ioc,
6723 expander_sibling->sas_address);
6724 goto retry_again;
6725 }
6726 }
6727 } 6656 }
6728 6657
6729 /* free phys attached to the sas_host */ 6658 /* free phys attached to the sas_host */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index b55c6dc07470..cb1cdecbe0f8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -465,62 +465,149 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
465 return rc; 465 return rc;
466} 466}
467 467
468/**
469 * _transport_delete_port - helper function to removing a port
470 * @ioc: per adapter object
471 * @mpt2sas_port: mpt2sas per port object
472 *
473 * Returns nothing.
474 */
475static void
476_transport_delete_port(struct MPT2SAS_ADAPTER *ioc,
477 struct _sas_port *mpt2sas_port)
478{
479 u64 sas_address = mpt2sas_port->remote_identify.sas_address;
480 enum sas_device_type device_type =
481 mpt2sas_port->remote_identify.device_type;
482
483 dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
484 "remove: sas_addr(0x%016llx)\n",
485 (unsigned long long) sas_address);
486
487 ioc->logging_level |= MPT_DEBUG_TRANSPORT;
488 if (device_type == SAS_END_DEVICE)
489 mpt2sas_device_remove(ioc, sas_address);
490 else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
491 device_type == SAS_FANOUT_EXPANDER_DEVICE)
492 mpt2sas_expander_remove(ioc, sas_address);
493 ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
494}
468 495
469/** 496/**
470 * _transport_delete_duplicate_port - (see below description) 497 * _transport_delete_phy - helper function to removing single phy from port
471 * @ioc: per adapter object 498 * @ioc: per adapter object
472 * @sas_node: sas node object (either expander or sas host) 499 * @mpt2sas_port: mpt2sas per port object
473 * @sas_address: sas address of device being added 500 * @mpt2sas_phy: mpt2sas per phy object
474 * @phy_num: phy number
475 * 501 *
476 * This function is called when attempting to add a new port that is claiming 502 * Returns nothing.
477 * the same phy resources already in use by another port. If we don't release 503 */
478 * the claimed phy resources, the sas transport layer will hang from the BUG 504static void
479 * in sas_port_add_phy. 505_transport_delete_phy(struct MPT2SAS_ADAPTER *ioc,
506 struct _sas_port *mpt2sas_port, struct _sas_phy *mpt2sas_phy)
507{
508 u64 sas_address = mpt2sas_port->remote_identify.sas_address;
509
510 dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
511 "remove: sas_addr(0x%016llx), phy(%d)\n",
512 (unsigned long long) sas_address, mpt2sas_phy->phy_id);
513
514 list_del(&mpt2sas_phy->port_siblings);
515 mpt2sas_port->num_phys--;
516 sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
517 mpt2sas_phy->phy_belongs_to_port = 0;
518}
519
520/**
521 * _transport_add_phy - helper function to adding single phy to port
522 * @ioc: per adapter object
523 * @mpt2sas_port: mpt2sas per port object
524 * @mpt2sas_phy: mpt2sas per phy object
480 * 525 *
481 * The reason we would hit this issue is becuase someone is changing the 526 * Returns nothing.
482 * sas address of a device on the fly, meanwhile controller firmware sends
483 * EVENTs out of order when removing the previous instance of the device.
484 */ 527 */
485static void 528static void
486_transport_delete_duplicate_port(struct MPT2SAS_ADAPTER *ioc, 529_transport_add_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_port *mpt2sas_port,
487 struct _sas_node *sas_node, u64 sas_address, int phy_num) 530 struct _sas_phy *mpt2sas_phy)
488{ 531{
489 struct _sas_port *mpt2sas_port, *mpt2sas_port_duplicate; 532 u64 sas_address = mpt2sas_port->remote_identify.sas_address;
490 struct _sas_phy *mpt2sas_phy;
491 533
492 printk(MPT2SAS_ERR_FMT "new device located at sas_addr(0x%016llx), " 534 dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
493 "phy_id(%d)\n", ioc->name, (unsigned long long)sas_address, 535 "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
494 phy_num); 536 sas_address, mpt2sas_phy->phy_id);
495 537
496 mpt2sas_port_duplicate = NULL; 538 list_add_tail(&mpt2sas_phy->port_siblings, &mpt2sas_port->phy_list);
497 list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list, port_list) { 539 mpt2sas_port->num_phys++;
498 dev_printk(KERN_ERR, &mpt2sas_port->port->dev, 540 sas_port_add_phy(mpt2sas_port->port, mpt2sas_phy->phy);
499 "existing device at sas_addr(0x%016llx), num_phys(%d)\n", 541 mpt2sas_phy->phy_belongs_to_port = 1;
500 (unsigned long long) 542}
501 mpt2sas_port->remote_identify.sas_address, 543
502 mpt2sas_port->num_phys); 544/**
503 list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list, 545 * _transport_add_phy_to_an_existing_port - adding new phy to existing port
546 * @ioc: per adapter object
547 * @sas_node: sas node object (either expander or sas host)
548 * @mpt2sas_phy: mpt2sas per phy object
549 * @sas_address: sas address of device/expander were phy needs to be added to
550 *
551 * Returns nothing.
552 */
553static void
554_transport_add_phy_to_an_existing_port(struct MPT2SAS_ADAPTER *ioc,
555struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy, u64 sas_address)
556{
557 struct _sas_port *mpt2sas_port;
558 struct _sas_phy *phy_srch;
559
560 if (mpt2sas_phy->phy_belongs_to_port == 1)
561 return;
562
563 list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list,
564 port_list) {
565 if (mpt2sas_port->remote_identify.sas_address !=
566 sas_address)
567 continue;
568 list_for_each_entry(phy_srch, &mpt2sas_port->phy_list,
504 port_siblings) { 569 port_siblings) {
505 dev_printk(KERN_ERR, &mpt2sas_phy->phy->dev, 570 if (phy_srch == mpt2sas_phy)
506 "phy_number(%d)\n", mpt2sas_phy->phy_id); 571 return;
507 if (mpt2sas_phy->phy_id == phy_num)
508 mpt2sas_port_duplicate = mpt2sas_port;
509 } 572 }
573 _transport_add_phy(ioc, mpt2sas_port, mpt2sas_phy);
574 return;
510 } 575 }
511 576
512 if (!mpt2sas_port_duplicate) 577}
578
579/**
580 * _transport_del_phy_from_an_existing_port - delete phy from existing port
581 * @ioc: per adapter object
582 * @sas_node: sas node object (either expander or sas host)
583 * @mpt2sas_phy: mpt2sas per phy object
584 *
585 * Returns nothing.
586 */
587static void
588_transport_del_phy_from_an_existing_port(struct MPT2SAS_ADAPTER *ioc,
589 struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy)
590{
591 struct _sas_port *mpt2sas_port, *next;
592 struct _sas_phy *phy_srch;
593
594 if (mpt2sas_phy->phy_belongs_to_port == 0)
513 return; 595 return;
514 596
515 dev_printk(KERN_ERR, &mpt2sas_port_duplicate->port->dev, 597 list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
516 "deleting duplicate device at sas_addr(0x%016llx), phy(%d)!!!!\n", 598 port_list) {
517 (unsigned long long) 599 list_for_each_entry(phy_srch, &mpt2sas_port->phy_list,
518 mpt2sas_port_duplicate->remote_identify.sas_address, phy_num); 600 port_siblings) {
519 ioc->logging_level |= MPT_DEBUG_TRANSPORT; 601 if (phy_srch != mpt2sas_phy)
520 mpt2sas_transport_port_remove(ioc, 602 continue;
521 mpt2sas_port_duplicate->remote_identify.sas_address, 603 if (mpt2sas_port->num_phys == 1)
522 sas_node->sas_address); 604 _transport_delete_port(ioc, mpt2sas_port);
523 ioc->logging_level &= ~MPT_DEBUG_TRANSPORT; 605 else
606 _transport_delete_phy(ioc, mpt2sas_port,
607 mpt2sas_phy);
608 return;
609 }
610 }
524} 611}
525 612
526/** 613/**
@@ -537,11 +624,13 @@ _transport_sanity_check(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node,
537{ 624{
538 int i; 625 int i;
539 626
540 for (i = 0; i < sas_node->num_phys; i++) 627 for (i = 0; i < sas_node->num_phys; i++) {
541 if (sas_node->phy[i].remote_identify.sas_address == sas_address) 628 if (sas_node->phy[i].remote_identify.sas_address != sas_address)
542 if (sas_node->phy[i].phy_belongs_to_port) 629 continue;
543 _transport_delete_duplicate_port(ioc, sas_node, 630 if (sas_node->phy[i].phy_belongs_to_port == 1)
544 sas_address, i); 631 _transport_del_phy_from_an_existing_port(ioc, sas_node,
632 &sas_node->phy[i]);
633 }
545} 634}
546 635
547/** 636/**
@@ -905,10 +994,12 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
905 994
906 mpt2sas_phy = &sas_node->phy[phy_number]; 995 mpt2sas_phy = &sas_node->phy[phy_number];
907 mpt2sas_phy->attached_handle = handle; 996 mpt2sas_phy->attached_handle = handle;
908 if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) 997 if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
909 _transport_set_identify(ioc, handle, 998 _transport_set_identify(ioc, handle,
910 &mpt2sas_phy->remote_identify); 999 &mpt2sas_phy->remote_identify);
911 else 1000 _transport_add_phy_to_an_existing_port(ioc, sas_node,
1001 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
1002 } else
912 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct 1003 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
913 sas_identify)); 1004 sas_identify));
914 1005
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index bc8194f74625..44578b56ad0a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1309,6 +1309,31 @@ qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1309} 1309}
1310 1310
1311static ssize_t 1311static ssize_t
1312qla2x00_thermal_temp_show(struct device *dev,
1313 struct device_attribute *attr, char *buf)
1314{
1315 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1316 int rval = QLA_FUNCTION_FAILED;
1317 uint16_t temp, frac;
1318
1319 if (!vha->hw->flags.thermal_supported)
1320 return snprintf(buf, PAGE_SIZE, "\n");
1321
1322 temp = frac = 0;
1323 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1324 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1325 DEBUG2_3_11(printk(KERN_WARNING
1326 "%s(%ld): isp reset in progress.\n",
1327 __func__, vha->host_no));
1328 else if (!vha->hw->flags.eeh_busy)
1329 rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1330 if (rval != QLA_SUCCESS)
1331 temp = frac = 0;
1332
1333 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
1334}
1335
1336static ssize_t
1312qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, 1337qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1313 char *buf) 1338 char *buf)
1314{ 1339{
@@ -1366,6 +1391,7 @@ static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1366 qla2x00_vn_port_mac_address_show, NULL); 1391 qla2x00_vn_port_mac_address_show, NULL);
1367static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 1392static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1368static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 1393static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1394static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1369 1395
1370struct device_attribute *qla2x00_host_attrs[] = { 1396struct device_attribute *qla2x00_host_attrs[] = {
1371 &dev_attr_driver_version, 1397 &dev_attr_driver_version,
@@ -1394,6 +1420,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
1394 &dev_attr_fabric_param, 1420 &dev_attr_fabric_param,
1395 &dev_attr_fw_state, 1421 &dev_attr_fw_state,
1396 &dev_attr_optrom_gold_fw_version, 1422 &dev_attr_optrom_gold_fw_version,
1423 &dev_attr_thermal_temp,
1397 NULL, 1424 NULL,
1398}; 1425};
1399 1426
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 31a4121a2be1..903b0586ded3 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -103,7 +103,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
103 103
104 bsg_job->reply->reply_payload_rcv_len = 0; 104 bsg_job->reply->reply_payload_rcv_len = 0;
105 105
106 if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) { 106 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
107 ret = -EINVAL; 107 ret = -EINVAL;
108 goto exit_fcp_prio_cfg; 108 goto exit_fcp_prio_cfg;
109 } 109 }
@@ -753,7 +753,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
753 command_sent = INT_DEF_LB_LOOPBACK_CMD; 753 command_sent = INT_DEF_LB_LOOPBACK_CMD;
754 rval = qla2x00_loopback_test(vha, &elreq, response); 754 rval = qla2x00_loopback_test(vha, &elreq, response);
755 755
756 if (new_config[1]) { 756 if (new_config[0]) {
757 /* Revert back to original port config 757 /* Revert back to original port config
758 * Also clear internal loopback 758 * Also clear internal loopback
759 */ 759 */
@@ -1512,6 +1512,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1512 if (((sp_bsg->type == SRB_CT_CMD) || 1512 if (((sp_bsg->type == SRB_CT_CMD) ||
1513 (sp_bsg->type == SRB_ELS_CMD_HST)) 1513 (sp_bsg->type == SRB_ELS_CMD_HST))
1514 && (sp_bsg->u.bsg_job == bsg_job)) { 1514 && (sp_bsg->u.bsg_job == bsg_job)) {
1515 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1515 if (ha->isp_ops->abort_command(sp)) { 1516 if (ha->isp_ops->abort_command(sp)) {
1516 DEBUG2(qla_printk(KERN_INFO, ha, 1517 DEBUG2(qla_printk(KERN_INFO, ha,
1517 "scsi(%ld): mbx " 1518 "scsi(%ld): mbx "
@@ -1527,6 +1528,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1527 bsg_job->req->errors = 1528 bsg_job->req->errors =
1528 bsg_job->reply->result = 0; 1529 bsg_job->reply->result = 0;
1529 } 1530 }
1531 spin_lock_irqsave(&ha->hardware_lock, flags);
1530 goto done; 1532 goto done;
1531 } 1533 }
1532 } 1534 }
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9ce539d4557e..ccfc8e78be21 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2425,6 +2425,9 @@ struct qla_hw_data {
2425 uint32_t disable_msix_handshake :1; 2425 uint32_t disable_msix_handshake :1;
2426 uint32_t fcp_prio_enabled :1; 2426 uint32_t fcp_prio_enabled :1;
2427 uint32_t fw_hung :1; 2427 uint32_t fw_hung :1;
2428 uint32_t quiesce_owner:1;
2429 uint32_t thermal_supported:1;
2430 /* 26 bits */
2428 } flags; 2431 } flags;
2429 2432
2430 /* This spinlock is used to protect "io transactions", you must 2433 /* This spinlock is used to protect "io transactions", you must
@@ -2863,6 +2866,7 @@ typedef struct scsi_qla_host {
2863#define ISP_UNRECOVERABLE 17 2866#define ISP_UNRECOVERABLE 17
2864#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ 2867#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
2865#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ 2868#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
2869#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
2866 2870
2867 uint32_t device_flags; 2871 uint32_t device_flags;
2868#define SWITCH_FOUND BIT_0 2872#define SWITCH_FOUND BIT_0
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9382a816c133..89e900adb679 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -36,6 +36,7 @@ extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
36extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); 36extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
37extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *); 37extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
38 38
39extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
39extern int qla2x00_loop_resync(scsi_qla_host_t *); 40extern int qla2x00_loop_resync(scsi_qla_host_t *);
40 41
41extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); 42extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
@@ -45,12 +46,15 @@ extern void qla2x00_update_fcports(scsi_qla_host_t *);
45 46
46extern int qla2x00_abort_isp(scsi_qla_host_t *); 47extern int qla2x00_abort_isp(scsi_qla_host_t *);
47extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); 48extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
49extern void qla82xx_quiescent_state_cleanup(scsi_qla_host_t *);
48 50
49extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); 51extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
50 52
51extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); 53extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
52extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); 54extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
53 55
56extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
57
54extern void qla84xx_put_chip(struct scsi_qla_host *); 58extern void qla84xx_put_chip(struct scsi_qla_host *);
55 59
56extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *, 60extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
@@ -68,6 +72,7 @@ extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
68extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *, 72extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
69 struct srb_iocb *); 73 struct srb_iocb *);
70extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); 74extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
75extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
71 76
72extern fc_port_t * 77extern fc_port_t *
73qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); 78qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
@@ -90,7 +95,6 @@ extern int ql2xfwloadbin;
90extern int ql2xetsenable; 95extern int ql2xetsenable;
91extern int ql2xshiftctondsd; 96extern int ql2xshiftctondsd;
92extern int ql2xdbwr; 97extern int ql2xdbwr;
93extern int ql2xdontresethba;
94extern int ql2xasynctmfenable; 98extern int ql2xasynctmfenable;
95extern int ql2xgffidenable; 99extern int ql2xgffidenable;
96extern int ql2xenabledif; 100extern int ql2xenabledif;
@@ -549,9 +553,11 @@ extern void qla82xx_rom_unlock(struct qla_hw_data *);
549 553
550/* ISP 8021 IDC */ 554/* ISP 8021 IDC */
551extern void qla82xx_clear_drv_active(struct qla_hw_data *); 555extern void qla82xx_clear_drv_active(struct qla_hw_data *);
556extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
552extern int qla82xx_idc_lock(struct qla_hw_data *); 557extern int qla82xx_idc_lock(struct qla_hw_data *);
553extern void qla82xx_idc_unlock(struct qla_hw_data *); 558extern void qla82xx_idc_unlock(struct qla_hw_data *);
554extern int qla82xx_device_state_handler(scsi_qla_host_t *); 559extern int qla82xx_device_state_handler(scsi_qla_host_t *);
560extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
555 561
556extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, 562extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
557 size_t, char *); 563 size_t, char *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 259f51137493..f948e1a73aec 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -498,6 +498,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
498 vha->flags.reset_active = 0; 498 vha->flags.reset_active = 0;
499 ha->flags.pci_channel_io_perm_failure = 0; 499 ha->flags.pci_channel_io_perm_failure = 0;
500 ha->flags.eeh_busy = 0; 500 ha->flags.eeh_busy = 0;
501 ha->flags.thermal_supported = 1;
501 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 502 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
502 atomic_set(&vha->loop_state, LOOP_DOWN); 503 atomic_set(&vha->loop_state, LOOP_DOWN);
503 vha->device_flags = DFLG_NO_CABLE; 504 vha->device_flags = DFLG_NO_CABLE;
@@ -2023,6 +2024,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2023 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 2024 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
2024 if (rval != QLA_SUCCESS) { 2025 if (rval != QLA_SUCCESS) {
2025 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 2026 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2027 IS_QLA8XXX_TYPE(ha) ||
2026 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 2028 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2027 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 2029 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
2028 __func__, vha->host_no)); 2030 __func__, vha->host_no));
@@ -2928,6 +2930,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2928 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 2930 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2929 2931
2930 qla2x00_iidma_fcport(vha, fcport); 2932 qla2x00_iidma_fcport(vha, fcport);
2933 qla24xx_update_fcport_fcp_prio(vha, fcport);
2931 qla2x00_reg_remote_port(vha, fcport); 2934 qla2x00_reg_remote_port(vha, fcport);
2932 atomic_set(&fcport->state, FCS_ONLINE); 2935 atomic_set(&fcport->state, FCS_ONLINE);
2933} 2936}
@@ -3844,6 +3847,37 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3844 return (rval); 3847 return (rval);
3845} 3848}
3846 3849
3850/*
3851* qla2x00_perform_loop_resync
3852* Description: This function will set the appropriate flags and call
3853* qla2x00_loop_resync. If successful loop will be resynced
3854* Arguments : scsi_qla_host_t pointer
3855* returm : Success or Failure
3856*/
3857
3858int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
3859{
3860 int32_t rval = 0;
3861
3862 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
3863 /*Configure the flags so that resync happens properly*/
3864 atomic_set(&ha->loop_down_timer, 0);
3865 if (!(ha->device_flags & DFLG_NO_CABLE)) {
3866 atomic_set(&ha->loop_state, LOOP_UP);
3867 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
3868 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
3869 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
3870
3871 rval = qla2x00_loop_resync(ha);
3872 } else
3873 atomic_set(&ha->loop_state, LOOP_DEAD);
3874
3875 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
3876 }
3877
3878 return rval;
3879}
3880
3847void 3881void
3848qla2x00_update_fcports(scsi_qla_host_t *base_vha) 3882qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3849{ 3883{
@@ -3857,7 +3891,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3857 list_for_each_entry(vha, &base_vha->hw->vp_list, list) { 3891 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3858 atomic_inc(&vha->vref_count); 3892 atomic_inc(&vha->vref_count);
3859 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3893 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3860 if (fcport && fcport->drport && 3894 if (fcport->drport &&
3861 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 3895 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3862 spin_unlock_irqrestore(&ha->vport_slock, flags); 3896 spin_unlock_irqrestore(&ha->vport_slock, flags);
3863 3897
@@ -3871,11 +3905,43 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3871 spin_unlock_irqrestore(&ha->vport_slock, flags); 3905 spin_unlock_irqrestore(&ha->vport_slock, flags);
3872} 3906}
3873 3907
3908/*
3909* qla82xx_quiescent_state_cleanup
3910* Description: This function will block the new I/Os
3911* Its not aborting any I/Os as context
3912* is not destroyed during quiescence
3913* Arguments: scsi_qla_host_t
3914* return : void
3915*/
3916void
3917qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
3918{
3919 struct qla_hw_data *ha = vha->hw;
3920 struct scsi_qla_host *vp;
3921
3922 qla_printk(KERN_INFO, ha,
3923 "Performing ISP error recovery - ha= %p.\n", ha);
3924
3925 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3926 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3927 atomic_set(&vha->loop_state, LOOP_DOWN);
3928 qla2x00_mark_all_devices_lost(vha, 0);
3929 list_for_each_entry(vp, &ha->vp_list, list)
3930 qla2x00_mark_all_devices_lost(vha, 0);
3931 } else {
3932 if (!atomic_read(&vha->loop_down_timer))
3933 atomic_set(&vha->loop_down_timer,
3934 LOOP_DOWN_TIME);
3935 }
3936 /* Wait for pending cmds to complete */
3937 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
3938}
3939
3874void 3940void
3875qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) 3941qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3876{ 3942{
3877 struct qla_hw_data *ha = vha->hw; 3943 struct qla_hw_data *ha = vha->hw;
3878 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 3944 struct scsi_qla_host *vp;
3879 unsigned long flags; 3945 unsigned long flags;
3880 3946
3881 vha->flags.online = 0; 3947 vha->flags.online = 0;
@@ -3896,7 +3962,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3896 qla2x00_mark_all_devices_lost(vha, 0); 3962 qla2x00_mark_all_devices_lost(vha, 0);
3897 3963
3898 spin_lock_irqsave(&ha->vport_slock, flags); 3964 spin_lock_irqsave(&ha->vport_slock, flags);
3899 list_for_each_entry(vp, &base_vha->hw->vp_list, list) { 3965 list_for_each_entry(vp, &ha->vp_list, list) {
3900 atomic_inc(&vp->vref_count); 3966 atomic_inc(&vp->vref_count);
3901 spin_unlock_irqrestore(&ha->vport_slock, flags); 3967 spin_unlock_irqrestore(&ha->vport_slock, flags);
3902 3968
@@ -5410,7 +5476,7 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
5410 * the tag (priority) value is returned. 5476 * the tag (priority) value is returned.
5411 * 5477 *
5412 * Input: 5478 * Input:
5413 * ha = adapter block po 5479 * vha = scsi host structure pointer.
5414 * fcport = port structure pointer. 5480 * fcport = port structure pointer.
5415 * 5481 *
5416 * Return: 5482 * Return:
@@ -5504,7 +5570,7 @@ qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5504 * Activates fcp priority for the logged in fc port 5570 * Activates fcp priority for the logged in fc port
5505 * 5571 *
5506 * Input: 5572 * Input:
5507 * ha = adapter block pointer. 5573 * vha = scsi host structure pointer.
5508 * fcp = port structure pointer. 5574 * fcp = port structure pointer.
5509 * 5575 *
5510 * Return: 5576 * Return:
@@ -5514,25 +5580,24 @@ qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5514 * Kernel context. 5580 * Kernel context.
5515 */ 5581 */
5516int 5582int
5517qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport) 5583qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5518{ 5584{
5519 int ret; 5585 int ret;
5520 uint8_t priority; 5586 uint8_t priority;
5521 uint16_t mb[5]; 5587 uint16_t mb[5];
5522 5588
5523 if (atomic_read(&fcport->state) == FCS_UNCONFIGURED || 5589 if (fcport->port_type != FCT_TARGET ||
5524 fcport->port_type != FCT_TARGET || 5590 fcport->loop_id == FC_NO_LOOP_ID)
5525 fcport->loop_id == FC_NO_LOOP_ID)
5526 return QLA_FUNCTION_FAILED; 5591 return QLA_FUNCTION_FAILED;
5527 5592
5528 priority = qla24xx_get_fcp_prio(ha, fcport); 5593 priority = qla24xx_get_fcp_prio(vha, fcport);
5529 ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb); 5594 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
5530 if (ret == QLA_SUCCESS) 5595 if (ret == QLA_SUCCESS)
5531 fcport->fcp_prio = priority; 5596 fcport->fcp_prio = priority;
5532 else 5597 else
5533 DEBUG2(printk(KERN_WARNING 5598 DEBUG2(printk(KERN_WARNING
5534 "scsi(%ld): Unable to activate fcp priority, " 5599 "scsi(%ld): Unable to activate fcp priority, "
5535 " ret=0x%x\n", ha->host_no, ret)); 5600 " ret=0x%x\n", vha->host_no, ret));
5536 5601
5537 return ret; 5602 return ret;
5538} 5603}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7f77898486a9..d17ed9a94a0c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -321,6 +321,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
321 struct qla_hw_data *ha = vha->hw; 321 struct qla_hw_data *ha = vha->hw;
322 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 322 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
323 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 323 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
324 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
324 uint32_t rscn_entry, host_pid; 325 uint32_t rscn_entry, host_pid;
325 uint8_t rscn_queue_index; 326 uint8_t rscn_queue_index;
326 unsigned long flags; 327 unsigned long flags;
@@ -498,6 +499,7 @@ skip_rio:
498 499
499 case MBA_LOOP_DOWN: /* Loop Down Event */ 500 case MBA_LOOP_DOWN: /* Loop Down Event */
500 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; 501 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
502 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
501 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 503 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
502 "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], 504 "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
503 mbx)); 505 mbx));
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index effd8a1403d9..e473e9fb363c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4125,7 +4125,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4125 return QLA_FUNCTION_FAILED; 4125 return QLA_FUNCTION_FAILED;
4126 4126
4127 DEBUG11(printk(KERN_INFO 4127 DEBUG11(printk(KERN_INFO
4128 "%s(%ld): entered.\n", __func__, ha->host_no)); 4128 "%s(%ld): entered.\n", __func__, vha->host_no));
4129 4129
4130 mcp->mb[0] = MBC_PORT_PARAMS; 4130 mcp->mb[0] = MBC_PORT_PARAMS;
4131 mcp->mb[1] = loop_id; 4131 mcp->mb[1] = loop_id;
@@ -4160,6 +4160,71 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4160} 4160}
4161 4161
4162int 4162int
4163qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4164{
4165 int rval;
4166 mbx_cmd_t mc;
4167 mbx_cmd_t *mcp = &mc;
4168 struct qla_hw_data *ha = vha->hw;
4169
4170 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, ha->host_no));
4171
4172 /* High bits. */
4173 mcp->mb[0] = MBC_READ_SFP;
4174 mcp->mb[1] = 0x98;
4175 mcp->mb[2] = 0;
4176 mcp->mb[3] = 0;
4177 mcp->mb[6] = 0;
4178 mcp->mb[7] = 0;
4179 mcp->mb[8] = 1;
4180 mcp->mb[9] = 0x01;
4181 mcp->mb[10] = BIT_13|BIT_0;
4182 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4183 mcp->in_mb = MBX_1|MBX_0;
4184 mcp->tov = MBX_TOV_SECONDS;
4185 mcp->flags = 0;
4186 rval = qla2x00_mailbox_command(vha, mcp);
4187 if (rval != QLA_SUCCESS) {
4188 DEBUG2_3_11(printk(KERN_WARNING
4189 "%s(%ld): failed=%x (%x).\n", __func__,
4190 vha->host_no, rval, mcp->mb[0]));
4191 ha->flags.thermal_supported = 0;
4192 goto fail;
4193 }
4194 *temp = mcp->mb[1] & 0xFF;
4195
4196 /* Low bits. */
4197 mcp->mb[0] = MBC_READ_SFP;
4198 mcp->mb[1] = 0x98;
4199 mcp->mb[2] = 0;
4200 mcp->mb[3] = 0;
4201 mcp->mb[6] = 0;
4202 mcp->mb[7] = 0;
4203 mcp->mb[8] = 1;
4204 mcp->mb[9] = 0x10;
4205 mcp->mb[10] = BIT_13|BIT_0;
4206 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4207 mcp->in_mb = MBX_1|MBX_0;
4208 mcp->tov = MBX_TOV_SECONDS;
4209 mcp->flags = 0;
4210 rval = qla2x00_mailbox_command(vha, mcp);
4211 if (rval != QLA_SUCCESS) {
4212 DEBUG2_3_11(printk(KERN_WARNING
4213 "%s(%ld): failed=%x (%x).\n", __func__,
4214 vha->host_no, rval, mcp->mb[0]));
4215 ha->flags.thermal_supported = 0;
4216 goto fail;
4217 }
4218 *frac = ((mcp->mb[1] & 0xFF) >> 6) * 25;
4219
4220 if (rval == QLA_SUCCESS)
4221 DEBUG11(printk(KERN_INFO
4222 "%s(%ld): done.\n", __func__, ha->host_no));
4223fail:
4224 return rval;
4225}
4226
4227int
4163qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 4228qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4164{ 4229{
4165 int rval; 4230 int rval;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index ae2acacc0003..fdb96a3584a5 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1079,11 +1079,55 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1079 1079
1080 /* Halt all the indiviual PEGs and other blocks of the ISP */ 1080 /* Halt all the indiviual PEGs and other blocks of the ISP */
1081 qla82xx_rom_lock(ha); 1081 qla82xx_rom_lock(ha);
1082
1083 /* mask all niu interrupts */
1084 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1085 /* disable xge rx/tx */
1086 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1087 /* disable xg1 rx/tx */
1088 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1089
1090 /* halt sre */
1091 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1092 qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1093
1094 /* halt epg */
1095 qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1096
1097 /* halt timers */
1098 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1099 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1100 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1101 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1102 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1103
1104 /* halt pegs */
1105 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1106 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1107 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1108 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1109 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1110
1111 /* big hammer */
1112 msleep(1000);
1082 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 1113 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1083 /* don't reset CAM block on reset */ 1114 /* don't reset CAM block on reset */
1084 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1115 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1085 else 1116 else
1086 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); 1117 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1118
1119 /* reset ms */
1120 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1121 val |= (1 << 1);
1122 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1123 msleep(20);
1124
1125 /* unreset ms */
1126 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1127 val &= ~(1 << 1);
1128 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1129 msleep(20);
1130
1087 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1131 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1088 1132
1089 /* Read the signature value from the flash. 1133 /* Read the signature value from the flash.
@@ -1210,25 +1254,6 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1210} 1254}
1211 1255
1212static int 1256static int
1213qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
1214{
1215 u32 val = 0;
1216 val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
1217 val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
1218 if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
1219 qla_printk(KERN_INFO, ha,
1220 "Memory DIMM SPD not programmed. "
1221 " Assumed valid.\n");
1222 return 1;
1223 } else if (val) {
1224 qla_printk(KERN_INFO, ha,
1225 "Memory DIMM type incorrect.Info:%08X.\n", val);
1226 return 2;
1227 }
1228 return 0;
1229}
1230
1231static int
1232qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, 1257qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1233 u64 off, void *data, int size) 1258 u64 off, void *data, int size)
1234{ 1259{
@@ -1293,11 +1318,6 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1293 word[startword+1] |= tmpw >> (sz[0] * 8); 1318 word[startword+1] |= tmpw >> (sz[0] * 8);
1294 } 1319 }
1295 1320
1296 /*
1297 * don't lock here - write_wx gets the lock if each time
1298 * write_lock_irqsave(&adapter->adapter_lock, flags);
1299 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1300 */
1301 for (i = 0; i < loop; i++) { 1321 for (i = 0; i < loop; i++) {
1302 temp = off8 + (i << shift_amount); 1322 temp = off8 + (i << shift_amount);
1303 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); 1323 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
@@ -1399,12 +1419,6 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1399 off0[1] = 0; 1419 off0[1] = 0;
1400 sz[1] = size - sz[0]; 1420 sz[1] = size - sz[0];
1401 1421
1402 /*
1403 * don't lock here - write_wx gets the lock if each time
1404 * write_lock_irqsave(&adapter->adapter_lock, flags);
1405 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1406 */
1407
1408 for (i = 0; i < loop; i++) { 1422 for (i = 0; i < loop; i++) {
1409 temp = off8 + (i << shift_amount); 1423 temp = off8 + (i << shift_amount);
1410 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); 1424 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
@@ -1437,11 +1451,6 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1437 } 1451 }
1438 } 1452 }
1439 1453
1440 /*
1441 * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
1442 * write_unlock_irqrestore(&adapter->adapter_lock, flags);
1443 */
1444
1445 if (j >= MAX_CTL_CHECK) 1454 if (j >= MAX_CTL_CHECK)
1446 return -1; 1455 return -1;
1447 1456
@@ -1872,7 +1881,6 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1872 qla_printk(KERN_INFO, ha, 1881 qla_printk(KERN_INFO, ha,
1873 "Cmd Peg initialization failed: 0x%x.\n", val); 1882 "Cmd Peg initialization failed: 0x%x.\n", val);
1874 1883
1875 qla82xx_check_for_bad_spd(ha);
1876 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1884 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1877 read_lock(&ha->hw_lock); 1885 read_lock(&ha->hw_lock);
1878 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); 1886 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
@@ -2343,6 +2351,17 @@ qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2343 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 2351 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2344} 2352}
2345 2353
2354void
2355qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2356{
2357 struct qla_hw_data *ha = vha->hw;
2358 uint32_t qsnt_state;
2359
2360 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2361 qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2362 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2363}
2364
2346static int 2365static int
2347qla82xx_load_fw(scsi_qla_host_t *vha) 2366qla82xx_load_fw(scsi_qla_host_t *vha)
2348{ 2367{
@@ -2542,7 +2561,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2542 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2561 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2543 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2562 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2544 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); 2563 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2545 cur_seg++; 2564 cur_seg = sg_next(cur_seg);
2546 avail_dsds--; 2565 avail_dsds--;
2547 } 2566 }
2548 } 2567 }
@@ -3261,6 +3280,104 @@ dev_ready:
3261 return QLA_SUCCESS; 3280 return QLA_SUCCESS;
3262} 3281}
3263 3282
3283/*
3284* qla82xx_need_qsnt_handler
3285* Code to start quiescence sequence
3286*
3287* Note:
3288* IDC lock must be held upon entry
3289*
3290* Return: void
3291*/
3292
3293static void
3294qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3295{
3296 struct qla_hw_data *ha = vha->hw;
3297 uint32_t dev_state, drv_state, drv_active;
3298 unsigned long reset_timeout;
3299
3300 if (vha->flags.online) {
3301 /*Block any further I/O and wait for pending cmnds to complete*/
3302 qla82xx_quiescent_state_cleanup(vha);
3303 }
3304
3305 /* Set the quiescence ready bit */
3306 qla82xx_set_qsnt_ready(ha);
3307
3308 /*wait for 30 secs for other functions to ack */
3309 reset_timeout = jiffies + (30 * HZ);
3310
3311 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3312 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3313 /* Its 2 that is written when qsnt is acked, moving one bit */
3314 drv_active = drv_active << 0x01;
3315
3316 while (drv_state != drv_active) {
3317
3318 if (time_after_eq(jiffies, reset_timeout)) {
3319 /* quiescence timeout, other functions didn't ack
3320 * changing the state to DEV_READY
3321 */
3322 qla_printk(KERN_INFO, ha,
3323 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
3324 qla_printk(KERN_INFO, ha,
3325 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
3326 drv_state);
3327 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3328 QLA82XX_DEV_READY);
3329 qla_printk(KERN_INFO, ha,
3330 "HW State: DEV_READY\n");
3331 qla82xx_idc_unlock(ha);
3332 qla2x00_perform_loop_resync(vha);
3333 qla82xx_idc_lock(ha);
3334
3335 qla82xx_clear_qsnt_ready(vha);
3336 return;
3337 }
3338
3339 qla82xx_idc_unlock(ha);
3340 msleep(1000);
3341 qla82xx_idc_lock(ha);
3342
3343 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3344 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3345 drv_active = drv_active << 0x01;
3346 }
3347 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3348 /* everyone acked so set the state to DEV_QUIESCENCE */
3349 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3350 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
3351 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3352 }
3353}
3354
3355/*
3356* qla82xx_wait_for_state_change
3357* Wait for device state to change from given current state
3358*
3359* Note:
3360* IDC lock must not be held upon entry
3361*
3362* Return:
3363* Changed device state.
3364*/
3365uint32_t
3366qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3367{
3368 struct qla_hw_data *ha = vha->hw;
3369 uint32_t dev_state;
3370
3371 do {
3372 msleep(1000);
3373 qla82xx_idc_lock(ha);
3374 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3375 qla82xx_idc_unlock(ha);
3376 } while (dev_state == curr_state);
3377
3378 return dev_state;
3379}
3380
3264static void 3381static void
3265qla82xx_dev_failed_handler(scsi_qla_host_t *vha) 3382qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3266{ 3383{
@@ -3439,15 +3556,28 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3439 qla82xx_idc_lock(ha); 3556 qla82xx_idc_lock(ha);
3440 break; 3557 break;
3441 case QLA82XX_DEV_NEED_RESET: 3558 case QLA82XX_DEV_NEED_RESET:
3442 if (!ql2xdontresethba) 3559 qla82xx_need_reset_handler(vha);
3443 qla82xx_need_reset_handler(vha);
3444 break; 3560 break;
3445 case QLA82XX_DEV_NEED_QUIESCENT: 3561 case QLA82XX_DEV_NEED_QUIESCENT:
3446 qla82xx_set_qsnt_ready(ha); 3562 qla82xx_need_qsnt_handler(vha);
3563 /* Reset timeout value after quiescence handler */
3564 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3565 * HZ);
3566 break;
3447 case QLA82XX_DEV_QUIESCENT: 3567 case QLA82XX_DEV_QUIESCENT:
3568 /* Owner will exit and other will wait for the state
3569 * to get changed
3570 */
3571 if (ha->flags.quiesce_owner)
3572 goto exit;
3573
3448 qla82xx_idc_unlock(ha); 3574 qla82xx_idc_unlock(ha);
3449 msleep(1000); 3575 msleep(1000);
3450 qla82xx_idc_lock(ha); 3576 qla82xx_idc_lock(ha);
3577
3578 /* Reset timeout value after quiescence handler */
3579 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3580 * HZ);
3451 break; 3581 break;
3452 case QLA82XX_DEV_FAILED: 3582 case QLA82XX_DEV_FAILED:
3453 qla82xx_dev_failed_handler(vha); 3583 qla82xx_dev_failed_handler(vha);
@@ -3490,6 +3620,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3490 &ha->mbx_cmd_flags)) 3620 &ha->mbx_cmd_flags))
3491 complete(&ha->mbx_intr_comp); 3621 complete(&ha->mbx_intr_comp);
3492 } 3622 }
3623 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3624 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3625 DEBUG(qla_printk(KERN_INFO, ha,
3626 "scsi(%ld) %s - detected quiescence needed\n",
3627 vha->host_no, __func__));
3628 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3629 qla2xxx_wake_dpc(vha);
3493 } else { 3630 } else {
3494 qla82xx_check_fw_alive(vha); 3631 qla82xx_check_fw_alive(vha);
3495 } 3632 }
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 51ec0c5380e8..ed5883f1778a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -523,8 +523,6 @@
523# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) 523# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
524# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) 524# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
525 525
526#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
527#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
528#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24)) 526#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
529#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8)) 527#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
530#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac)) 528#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2c0876c81a3f..c194c23ca1fb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -37,12 +37,12 @@ static struct kmem_cache *srb_cachep;
37static struct kmem_cache *ctx_cachep; 37static struct kmem_cache *ctx_cachep;
38 38
39int ql2xlogintimeout = 20; 39int ql2xlogintimeout = 20;
40module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 40module_param(ql2xlogintimeout, int, S_IRUGO);
41MODULE_PARM_DESC(ql2xlogintimeout, 41MODULE_PARM_DESC(ql2xlogintimeout,
42 "Login timeout value in seconds."); 42 "Login timeout value in seconds.");
43 43
44int qlport_down_retry; 44int qlport_down_retry;
45module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 45module_param(qlport_down_retry, int, S_IRUGO);
46MODULE_PARM_DESC(qlport_down_retry, 46MODULE_PARM_DESC(qlport_down_retry,
47 "Maximum number of command retries to a port that returns " 47 "Maximum number of command retries to a port that returns "
48 "a PORT-DOWN status."); 48 "a PORT-DOWN status.");
@@ -55,12 +55,12 @@ MODULE_PARM_DESC(ql2xplogiabsentdevice,
55 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 55 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
56 56
57int ql2xloginretrycount = 0; 57int ql2xloginretrycount = 0;
58module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR); 58module_param(ql2xloginretrycount, int, S_IRUGO);
59MODULE_PARM_DESC(ql2xloginretrycount, 59MODULE_PARM_DESC(ql2xloginretrycount,
60 "Specify an alternate value for the NVRAM login retry count."); 60 "Specify an alternate value for the NVRAM login retry count.");
61 61
62int ql2xallocfwdump = 1; 62int ql2xallocfwdump = 1;
63module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR); 63module_param(ql2xallocfwdump, int, S_IRUGO);
64MODULE_PARM_DESC(ql2xallocfwdump, 64MODULE_PARM_DESC(ql2xallocfwdump,
65 "Option to enable allocation of memory for a firmware dump " 65 "Option to enable allocation of memory for a firmware dump "
66 "during HBA initialization. Memory allocation requirements " 66 "during HBA initialization. Memory allocation requirements "
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
73 "Default is 0 - no logging. 1 - log errors."); 73 "Default is 0 - no logging. 1 - log errors.");
74 74
75int ql2xshiftctondsd = 6; 75int ql2xshiftctondsd = 6;
76module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR); 76module_param(ql2xshiftctondsd, int, S_IRUGO);
77MODULE_PARM_DESC(ql2xshiftctondsd, 77MODULE_PARM_DESC(ql2xshiftctondsd,
78 "Set to control shifting of command type processing " 78 "Set to control shifting of command type processing "
79 "based on total number of SG elements."); 79 "based on total number of SG elements.");
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd,
81static void qla2x00_free_device(scsi_qla_host_t *); 81static void qla2x00_free_device(scsi_qla_host_t *);
82 82
83int ql2xfdmienable=1; 83int ql2xfdmienable=1;
84module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 84module_param(ql2xfdmienable, int, S_IRUGO);
85MODULE_PARM_DESC(ql2xfdmienable, 85MODULE_PARM_DESC(ql2xfdmienable,
86 "Enables FDMI registrations. " 86 "Enables FDMI registrations. "
87 "0 - no FDMI. Default is 1 - perform FDMI."); 87 "0 - no FDMI. Default is 1 - perform FDMI.");
@@ -106,27 +106,27 @@ MODULE_PARM_DESC(ql2xenablehba_err_chk,
106 " Default is 0 - Error isolation disabled, 1 - Enable it"); 106 " Default is 0 - Error isolation disabled, 1 - Enable it");
107 107
108int ql2xiidmaenable=1; 108int ql2xiidmaenable=1;
109module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 109module_param(ql2xiidmaenable, int, S_IRUGO);
110MODULE_PARM_DESC(ql2xiidmaenable, 110MODULE_PARM_DESC(ql2xiidmaenable,
111 "Enables iIDMA settings " 111 "Enables iIDMA settings "
112 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 112 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
113 113
114int ql2xmaxqueues = 1; 114int ql2xmaxqueues = 1;
115module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR); 115module_param(ql2xmaxqueues, int, S_IRUGO);
116MODULE_PARM_DESC(ql2xmaxqueues, 116MODULE_PARM_DESC(ql2xmaxqueues,
117 "Enables MQ settings " 117 "Enables MQ settings "
118 "Default is 1 for single queue. Set it to number " 118 "Default is 1 for single queue. Set it to number "
119 "of queues in MQ mode."); 119 "of queues in MQ mode.");
120 120
121int ql2xmultique_tag; 121int ql2xmultique_tag;
122module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR); 122module_param(ql2xmultique_tag, int, S_IRUGO);
123MODULE_PARM_DESC(ql2xmultique_tag, 123MODULE_PARM_DESC(ql2xmultique_tag,
124 "Enables CPU affinity settings for the driver " 124 "Enables CPU affinity settings for the driver "
125 "Default is 0 for no affinity of request and response IO. " 125 "Default is 0 for no affinity of request and response IO. "
126 "Set it to 1 to turn on the cpu affinity."); 126 "Set it to 1 to turn on the cpu affinity.");
127 127
128int ql2xfwloadbin; 128int ql2xfwloadbin;
129module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); 129module_param(ql2xfwloadbin, int, S_IRUGO);
130MODULE_PARM_DESC(ql2xfwloadbin, 130MODULE_PARM_DESC(ql2xfwloadbin,
131 "Option to specify location from which to load ISP firmware:\n" 131 "Option to specify location from which to load ISP firmware:\n"
132 " 2 -- load firmware via the request_firmware() (hotplug)\n" 132 " 2 -- load firmware via the request_firmware() (hotplug)\n"
@@ -135,39 +135,32 @@ MODULE_PARM_DESC(ql2xfwloadbin,
135 " 0 -- use default semantics.\n"); 135 " 0 -- use default semantics.\n");
136 136
137int ql2xetsenable; 137int ql2xetsenable;
138module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR); 138module_param(ql2xetsenable, int, S_IRUGO);
139MODULE_PARM_DESC(ql2xetsenable, 139MODULE_PARM_DESC(ql2xetsenable,
140 "Enables firmware ETS burst." 140 "Enables firmware ETS burst."
141 "Default is 0 - skip ETS enablement."); 141 "Default is 0 - skip ETS enablement.");
142 142
143int ql2xdbwr = 1; 143int ql2xdbwr = 1;
144module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR); 144module_param(ql2xdbwr, int, S_IRUGO);
145MODULE_PARM_DESC(ql2xdbwr, 145MODULE_PARM_DESC(ql2xdbwr,
146 "Option to specify scheme for request queue posting\n" 146 "Option to specify scheme for request queue posting\n"
147 " 0 -- Regular doorbell.\n" 147 " 0 -- Regular doorbell.\n"
148 " 1 -- CAMRAM doorbell (faster).\n"); 148 " 1 -- CAMRAM doorbell (faster).\n");
149 149
150int ql2xdontresethba;
151module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
152MODULE_PARM_DESC(ql2xdontresethba,
153 "Option to specify reset behaviour\n"
154 " 0 (Default) -- Reset on failure.\n"
155 " 1 -- Do not reset on failure.\n");
156
157int ql2xtargetreset = 1; 150int ql2xtargetreset = 1;
158module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR); 151module_param(ql2xtargetreset, int, S_IRUGO);
159MODULE_PARM_DESC(ql2xtargetreset, 152MODULE_PARM_DESC(ql2xtargetreset,
160 "Enable target reset." 153 "Enable target reset."
161 "Default is 1 - use hw defaults."); 154 "Default is 1 - use hw defaults.");
162 155
163int ql2xgffidenable; 156int ql2xgffidenable;
164module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR); 157module_param(ql2xgffidenable, int, S_IRUGO);
165MODULE_PARM_DESC(ql2xgffidenable, 158MODULE_PARM_DESC(ql2xgffidenable,
166 "Enables GFF_ID checks of port type. " 159 "Enables GFF_ID checks of port type. "
167 "Default is 0 - Do not use GFF_ID information."); 160 "Default is 0 - Do not use GFF_ID information.");
168 161
169int ql2xasynctmfenable; 162int ql2xasynctmfenable;
170module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR); 163module_param(ql2xasynctmfenable, int, S_IRUGO);
171MODULE_PARM_DESC(ql2xasynctmfenable, 164MODULE_PARM_DESC(ql2xasynctmfenable,
172 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 165 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
173 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 166 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
@@ -2371,7 +2364,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
2371 list_for_each_entry(vha, &ha->vp_list, list) { 2364 list_for_each_entry(vha, &ha->vp_list, list) {
2372 atomic_inc(&vha->vref_count); 2365 atomic_inc(&vha->vref_count);
2373 2366
2374 if (vha && vha->fc_vport) { 2367 if (vha->fc_vport) {
2375 spin_unlock_irqrestore(&ha->vport_slock, flags); 2368 spin_unlock_irqrestore(&ha->vport_slock, flags);
2376 2369
2377 fc_vport_terminate(vha->fc_vport); 2370 fc_vport_terminate(vha->fc_vport);
@@ -3386,6 +3379,21 @@ qla2x00_do_dpc(void *data)
3386 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3379 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3387 } 3380 }
3388 3381
3382 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3383 DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched "
3384 "qla2x00_quiesce_needed ha = %p\n",
3385 base_vha->host_no, ha));
3386 qla82xx_device_state_handler(base_vha);
3387 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
3388 if (!ha->flags.quiesce_owner) {
3389 qla2x00_perform_loop_resync(base_vha);
3390
3391 qla82xx_idc_lock(ha);
3392 qla82xx_clear_qsnt_ready(base_vha);
3393 qla82xx_idc_unlock(ha);
3394 }
3395 }
3396
3389 if (test_and_clear_bit(RESET_MARKER_NEEDED, 3397 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3390 &base_vha->dpc_flags) && 3398 &base_vha->dpc_flags) &&
3391 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 3399 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
@@ -3589,13 +3597,16 @@ qla2x00_timer(scsi_qla_host_t *vha)
3589 return; 3597 return;
3590 } 3598 }
3591 3599
3592 if (IS_QLA82XX(ha))
3593 qla82xx_watchdog(vha);
3594
3595 /* Hardware read to raise pending EEH errors during mailbox waits. */ 3600 /* Hardware read to raise pending EEH errors during mailbox waits. */
3596 if (!pci_channel_offline(ha->pdev)) 3601 if (!pci_channel_offline(ha->pdev))
3597 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 3602 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3598 3603
3604 if (IS_QLA82XX(ha)) {
3605 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
3606 start_dpc++;
3607 qla82xx_watchdog(vha);
3608 }
3609
3599 /* Loop down handler. */ 3610 /* Loop down handler. */
3600 if (atomic_read(&vha->loop_down_timer) > 0 && 3611 if (atomic_read(&vha->loop_down_timer) > 0 &&
3601 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 3612 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 76de9574b385..22070621206c 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -669,6 +669,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
669 def = 1; 669 def = 1;
670 else if (IS_QLA81XX(ha)) 670 else if (IS_QLA81XX(ha))
671 def = 2; 671 def = 2;
672
673 /* Assign FCP prio region since older adapters may not have FLT, or
674 FCP prio region in it's FLT.
675 */
676 ha->flt_region_fcp_prio = ha->flags.port0 ?
677 fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
678
672 ha->flt_region_flt = flt_addr; 679 ha->flt_region_flt = flt_addr;
673 wptr = (uint16_t *)req->ring; 680 wptr = (uint16_t *)req->ring;
674 flt = (struct qla_flt_header *)req->ring; 681 flt = (struct qla_flt_header *)req->ring;
@@ -696,10 +703,6 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
696 goto no_flash_data; 703 goto no_flash_data;
697 } 704 }
698 705
699 /* Assign FCP prio region since older FLT's may not have it */
700 ha->flt_region_fcp_prio = ha->flags.port0 ?
701 fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
702
703 loc = locations[1]; 706 loc = locations[1];
704 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); 707 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
705 for ( ; cnt; cnt--, region++) { 708 for ( ; cnt; cnt--, region++) {
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index edcf048215dd..af62c3cf8752 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index d861c3b411c8..abd83602cdda 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 0f3bfc3da5cf..2fc0045b1a52 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -175,7 +175,7 @@
175struct srb { 175struct srb {
176 struct list_head list; /* (8) */ 176 struct list_head list; /* (8) */
177 struct scsi_qla_host *ha; /* HA the SP is queued on */ 177 struct scsi_qla_host *ha; /* HA the SP is queued on */
178 struct ddb_entry *ddb; 178 struct ddb_entry *ddb;
179 uint16_t flags; /* (1) Status flags. */ 179 uint16_t flags; /* (1) Status flags. */
180 180
181#define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */ 181#define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */
@@ -191,7 +191,6 @@ struct srb {
191 struct scsi_cmnd *cmd; /* (4) SCSI command block */ 191 struct scsi_cmnd *cmd; /* (4) SCSI command block */
192 dma_addr_t dma_handle; /* (4) for unmap of single transfers */ 192 dma_addr_t dma_handle; /* (4) for unmap of single transfers */
193 struct kref srb_ref; /* reference count for this srb */ 193 struct kref srb_ref; /* reference count for this srb */
194 uint32_t fw_ddb_index;
195 uint8_t err_id; /* error id */ 194 uint8_t err_id; /* error id */
196#define SRB_ERR_PORT 1 /* Request failed because "port down" */ 195#define SRB_ERR_PORT 1 /* Request failed because "port down" */
197#define SRB_ERR_LOOP 2 /* Request failed because "loop down" */ 196#define SRB_ERR_LOOP 2 /* Request failed because "loop down" */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 5e757d7fff7d..c1985792f034 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 6575a47501e5..8fad99b7eef4 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index dc01fa3da5d1..1629c48c35ef 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 9471ac755000..62f90bdec5d5 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 5ae49fd87846..75fcd82a8fca 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 7c33fd5943d5..6ffbe9727dff 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -554,7 +554,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
554 /* mbox_sts[2] = Old ACB state 554 /* mbox_sts[2] = Old ACB state
555 * mbox_sts[3] = new ACB state */ 555 * mbox_sts[3] = new ACB state */
556 if ((mbox_sts[3] == ACB_STATE_VALID) && 556 if ((mbox_sts[3] == ACB_STATE_VALID) &&
557 (mbox_sts[2] == ACB_STATE_TENTATIVE)) 557 ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
558 (mbox_sts[2] == ACB_STATE_ACQUIRING)))
558 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 559 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
559 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && 560 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
560 (mbox_sts[2] == ACB_STATE_VALID)) 561 (mbox_sts[2] == ACB_STATE_VALID))
@@ -1077,7 +1078,7 @@ try_msi:
1077 ret = pci_enable_msi(ha->pdev); 1078 ret = pci_enable_msi(ha->pdev);
1078 if (!ret) { 1079 if (!ret) {
1079 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler, 1080 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1080 IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha); 1081 0, DRIVER_NAME, ha);
1081 if (!ret) { 1082 if (!ret) {
1082 DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 1083 DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1083 set_bit(AF_MSI_ENABLED, &ha->flags); 1084 set_bit(AF_MSI_ENABLED, &ha->flags);
@@ -1095,7 +1096,7 @@ try_msi:
1095try_intx: 1096try_intx:
1096 /* Trying INTx */ 1097 /* Trying INTx */
1097 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1098 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1098 IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha); 1099 IRQF_SHARED, DRIVER_NAME, ha);
1099 if (!ret) { 1100 if (!ret) {
1100 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n")); 1101 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1101 set_bit(AF_INTx_ENABLED, &ha->flags); 1102 set_bit(AF_INTx_ENABLED, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 2d2f9c879bfd..f65626aec7c1 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -81,23 +81,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
81 */ 81 */
82 spin_lock_irqsave(&ha->hardware_lock, flags); 82 spin_lock_irqsave(&ha->hardware_lock, flags);
83 83
84 if (is_qla8022(ha)) { 84 if (!is_qla8022(ha)) {
85 intr_status = readl(&ha->qla4_8xxx_reg->host_int);
86 if (intr_status & ISRX_82XX_RISC_INT) {
87 /* Service existing interrupt */
88 DEBUG2(printk("scsi%ld: %s: "
89 "servicing existing interrupt\n",
90 ha->host_no, __func__));
91 intr_status = readl(&ha->qla4_8xxx_reg->host_status);
92 ha->isp_ops->interrupt_service_routine(ha, intr_status);
93 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
94 if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
95 test_bit(AF_INTx_ENABLED, &ha->flags))
96 qla4_8xxx_wr_32(ha,
97 ha->nx_legacy_intr.tgt_mask_reg,
98 0xfbff);
99 }
100 } else {
101 intr_status = readl(&ha->reg->ctrl_status); 85 intr_status = readl(&ha->reg->ctrl_status);
102 if (intr_status & CSR_SCSI_PROCESSOR_INTR) { 86 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
103 /* Service existing interrupt */ 87 /* Service existing interrupt */
@@ -934,7 +918,7 @@ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
934 return status; 918 return status;
935 919
936 mbox_cmd[0] = MBOX_CMD_ABORT_TASK; 920 mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
937 mbox_cmd[1] = srb->fw_ddb_index; 921 mbox_cmd[1] = srb->ddb->fw_ddb_index;
938 mbox_cmd[2] = index; 922 mbox_cmd[2] = index;
939 /* Immediate Command Enable */ 923 /* Immediate Command Enable */
940 mbox_cmd[5] = 0x01; 924 mbox_cmd[5] = 0x01;
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index f0d0fbf88aa2..b4b859b2d47e 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index 7a8fc66a760d..b3831bd29479 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 474b10d71364..3d5ef2df4134 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2009 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -942,12 +942,55 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
942 942
943 /* Halt all the indiviual PEGs and other blocks of the ISP */ 943 /* Halt all the indiviual PEGs and other blocks of the ISP */
944 qla4_8xxx_rom_lock(ha); 944 qla4_8xxx_rom_lock(ha);
945
946 /* mask all niu interrupts */
947 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
948 /* disable xge rx/tx */
949 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
950 /* disable xg1 rx/tx */
951 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
952
953 /* halt sre */
954 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
955 qla4_8xxx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
956
957 /* halt epg */
958 qla4_8xxx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
959
960 /* halt timers */
961 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
962 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
963 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
964 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
965 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
966
967 /* halt pegs */
968 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
969 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
970 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
971 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
972 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
973
974 /* big hammer */
975 msleep(1000);
945 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 976 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
946 /* don't reset CAM block on reset */ 977 /* don't reset CAM block on reset */
947 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 978 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
948 else 979 else
949 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); 980 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
950 981
982 /* reset ms */
983 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
984 val |= (1 << 1);
985 qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
986
987 msleep(20);
988 /* unreset ms */
989 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
990 val &= ~(1 << 1);
991 qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
992 msleep(20);
993
951 qla4_8xxx_rom_unlock(ha); 994 qla4_8xxx_rom_unlock(ha);
952 995
953 /* Read the signature value from the flash. 996 /* Read the signature value from the flash.
@@ -1084,14 +1127,14 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1084static int 1127static int
1085qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) 1128qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1086{ 1129{
1087 int i; 1130 int i, rval = 0;
1088 long size = 0; 1131 long size = 0;
1089 long flashaddr, memaddr; 1132 long flashaddr, memaddr;
1090 u64 data; 1133 u64 data;
1091 u32 high, low; 1134 u32 high, low;
1092 1135
1093 flashaddr = memaddr = ha->hw.flt_region_bootload; 1136 flashaddr = memaddr = ha->hw.flt_region_bootload;
1094 size = (image_start - flashaddr)/8; 1137 size = (image_start - flashaddr) / 8;
1095 1138
1096 DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n", 1139 DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n",
1097 ha->host_no, __func__, flashaddr, image_start)); 1140 ha->host_no, __func__, flashaddr, image_start));
@@ -1100,14 +1143,18 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1100 if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) || 1143 if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1101 (qla4_8xxx_rom_fast_read(ha, flashaddr + 4, 1144 (qla4_8xxx_rom_fast_read(ha, flashaddr + 4,
1102 (int *)&high))) { 1145 (int *)&high))) {
1103 return -1; 1146 rval = -1;
1147 goto exit_load_from_flash;
1104 } 1148 }
1105 data = ((u64)high << 32) | low ; 1149 data = ((u64)high << 32) | low ;
1106 qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8); 1150 rval = qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8);
1151 if (rval)
1152 goto exit_load_from_flash;
1153
1107 flashaddr += 8; 1154 flashaddr += 8;
1108 memaddr += 8; 1155 memaddr += 8;
1109 1156
1110 if (i%0x1000 == 0) 1157 if (i % 0x1000 == 0)
1111 msleep(1); 1158 msleep(1);
1112 1159
1113 } 1160 }
@@ -1119,7 +1166,8 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1119 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); 1166 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1120 read_unlock(&ha->hw_lock); 1167 read_unlock(&ha->hw_lock);
1121 1168
1122 return 0; 1169exit_load_from_flash:
1170 return rval;
1123} 1171}
1124 1172
1125static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start) 1173static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index ff689bf53007..35376a1c3f1b 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7#ifndef __QLA_NX_H 7#ifndef __QLA_NX_H
8#define __QLA_NX_H 8#define __QLA_NX_H
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0d48fb4d1044..3fc1d256636f 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -706,18 +706,22 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
706 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 706 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
707 707
708 /* don't poll if reset is going on */ 708 /* don't poll if reset is going on */
709 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags)) { 709 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
710 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
711 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) {
710 if (dev_state == QLA82XX_DEV_NEED_RESET && 712 if (dev_state == QLA82XX_DEV_NEED_RESET &&
711 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 713 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
712 printk("scsi%ld: %s: HW State: NEED RESET!\n", 714 if (!ql4xdontresethba) {
713 ha->host_no, __func__); 715 ql4_printk(KERN_INFO, ha, "%s: HW State: "
714 set_bit(DPC_RESET_HA, &ha->dpc_flags); 716 "NEED RESET!\n", __func__);
715 qla4xxx_wake_dpc(ha); 717 set_bit(DPC_RESET_HA, &ha->dpc_flags);
716 qla4xxx_mailbox_premature_completion(ha); 718 qla4xxx_wake_dpc(ha);
719 qla4xxx_mailbox_premature_completion(ha);
720 }
717 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 721 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
718 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 722 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
719 printk("scsi%ld: %s: HW State: NEED QUIES!\n", 723 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
720 ha->host_no, __func__); 724 __func__);
721 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 725 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
722 qla4xxx_wake_dpc(ha); 726 qla4xxx_wake_dpc(ha);
723 } else { 727 } else {
@@ -1721,6 +1725,14 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1721 if (!test_bit(AF_ONLINE, &ha->flags)) { 1725 if (!test_bit(AF_ONLINE, &ha->flags)) {
1722 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 1726 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
1723 1727
1728 if (is_qla8022(ha) && ql4xdontresethba) {
1729 /* Put the device in failed state. */
1730 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
1731 qla4_8xxx_idc_lock(ha);
1732 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1733 QLA82XX_DEV_FAILED);
1734 qla4_8xxx_idc_unlock(ha);
1735 }
1724 ret = -ENODEV; 1736 ret = -ENODEV;
1725 goto probe_failed; 1737 goto probe_failed;
1726 } 1738 }
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 9bfacf4ed137..8475b308e01b 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k4" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k5"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2f1f9b079b10..7b310934efed 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1805,6 +1805,7 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1805 devip->sense_buff[5] = (ret >> 8) & 0xff; 1805 devip->sense_buff[5] = (ret >> 8) & 0xff;
1806 devip->sense_buff[6] = ret & 0xff; 1806 devip->sense_buff[6] = ret & 0xff;
1807 } 1807 }
1808 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1808 return check_condition_result; 1809 return check_condition_result;
1809 } 1810 }
1810 1811
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 30ac116186f5..45c75649b9e0 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1124,51 +1124,40 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1124 struct list_head *work_q, 1124 struct list_head *work_q,
1125 struct list_head *done_q) 1125 struct list_head *done_q)
1126{ 1126{
1127 struct scsi_cmnd *scmd, *tgtr_scmd, *next; 1127 LIST_HEAD(tmp_list);
1128 unsigned int id = 0;
1129 int rtn;
1130 1128
1131 do { 1129 list_splice_init(work_q, &tmp_list);
1132 tgtr_scmd = NULL; 1130
1133 list_for_each_entry(scmd, work_q, eh_entry) { 1131 while (!list_empty(&tmp_list)) {
1134 if (id == scmd_id(scmd)) { 1132 struct scsi_cmnd *next, *scmd;
1135 tgtr_scmd = scmd; 1133 int rtn;
1136 break; 1134 unsigned int id;
1137 } 1135
1138 } 1136 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1139 if (!tgtr_scmd) { 1137 id = scmd_id(scmd);
1140 /* not one exactly equal; find the next highest */
1141 list_for_each_entry(scmd, work_q, eh_entry) {
1142 if (scmd_id(scmd) > id &&
1143 (!tgtr_scmd ||
1144 scmd_id(tgtr_scmd) > scmd_id(scmd)))
1145 tgtr_scmd = scmd;
1146 }
1147 }
1148 if (!tgtr_scmd)
1149 /* no more commands, that's it */
1150 break;
1151 1138
1152 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " 1139 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
1153 "to target %d\n", 1140 "to target %d\n",
1154 current->comm, id)); 1141 current->comm, id));
1155 rtn = scsi_try_target_reset(tgtr_scmd); 1142 rtn = scsi_try_target_reset(scmd);
1156 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1143 if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
1157 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1158 if (id == scmd_id(scmd))
1159 if (!scsi_device_online(scmd->device) ||
1160 rtn == FAST_IO_FAIL ||
1161 !scsi_eh_tur(tgtr_scmd))
1162 scsi_eh_finish_cmd(scmd,
1163 done_q);
1164 }
1165 } else
1166 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset" 1144 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset"
1167 " failed target: " 1145 " failed target: "
1168 "%d\n", 1146 "%d\n",
1169 current->comm, id)); 1147 current->comm, id));
1170 id++; 1148 list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1171 } while(id != 0); 1149 if (scmd_id(scmd) != id)
1150 continue;
1151
1152 if ((rtn == SUCCESS || rtn == FAST_IO_FAIL)
1153 && (!scsi_device_online(scmd->device) ||
1154 rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd)))
1155 scsi_eh_finish_cmd(scmd, done_q);
1156 else
1157 /* push back on work queue for further processing */
1158 list_move(&scmd->eh_entry, work_q);
1159 }
1160 }
1172 1161
1173 return list_empty(work_q); 1162 return list_empty(work_q);
1174} 1163}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4a3842212c50..501f67bef719 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1278,11 +1278,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1278 } 1278 }
1279 1279
1280 if (scsi_target_is_busy(starget)) { 1280 if (scsi_target_is_busy(starget)) {
1281 if (list_empty(&sdev->starved_entry)) { 1281 if (list_empty(&sdev->starved_entry))
1282 list_add_tail(&sdev->starved_entry, 1282 list_add_tail(&sdev->starved_entry,
1283 &shost->starved_list); 1283 &shost->starved_list);
1284 return 0; 1284 return 0;
1285 }
1286 } 1285 }
1287 1286
1288 /* We're OK to process the command, so we can't be starved */ 1287 /* We're OK to process the command, so we can't be starved */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 76ee2e784f75..4c68d36f9ac2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -993,16 +993,14 @@ static int __remove_child (struct device * dev, void * data)
993 */ 993 */
994void scsi_remove_target(struct device *dev) 994void scsi_remove_target(struct device *dev)
995{ 995{
996 struct device *rdev;
997
998 if (scsi_is_target_device(dev)) { 996 if (scsi_is_target_device(dev)) {
999 __scsi_remove_target(to_scsi_target(dev)); 997 __scsi_remove_target(to_scsi_target(dev));
1000 return; 998 return;
1001 } 999 }
1002 1000
1003 rdev = get_device(dev); 1001 get_device(dev);
1004 device_for_each_child(dev, NULL, __remove_child); 1002 device_for_each_child(dev, NULL, __remove_child);
1005 put_device(rdev); 1003 put_device(dev);
1006} 1004}
1007EXPORT_SYMBOL(scsi_remove_target); 1005EXPORT_SYMBOL(scsi_remove_target);
1008 1006
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 332387a6bc25..f905ecb5704d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2200,3 +2200,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
2200MODULE_DESCRIPTION("iSCSI Transport Interface"); 2200MODULE_DESCRIPTION("iSCSI Transport Interface");
2201MODULE_LICENSE("GPL"); 2201MODULE_LICENSE("GPL");
2202MODULE_VERSION(ISCSI_TRANSPORT_VERSION); 2202MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
2203MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 956496182c80..365024b0c407 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -583,7 +583,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
583 * quietly refuse to do anything to a changed disc until 583 * quietly refuse to do anything to a changed disc until
584 * the changed bit has been reset 584 * the changed bit has been reset
585 */ 585 */
586 /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ 586 /* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */
587 goto out; 587 goto out;
588 } 588 }
589 589
@@ -1023,7 +1023,6 @@ static int sd_media_changed(struct gendisk *disk)
1023 */ 1023 */
1024 if (!scsi_device_online(sdp)) { 1024 if (!scsi_device_online(sdp)) {
1025 set_media_not_present(sdkp); 1025 set_media_not_present(sdkp);
1026 retval = 1;
1027 goto out; 1026 goto out;
1028 } 1027 }
1029 1028
@@ -1054,7 +1053,6 @@ static int sd_media_changed(struct gendisk *disk)
1054 /* 0x3a is medium not present */ 1053 /* 0x3a is medium not present */
1055 sshdr->asc == 0x3a)) { 1054 sshdr->asc == 0x3a)) {
1056 set_media_not_present(sdkp); 1055 set_media_not_present(sdkp);
1057 retval = 1;
1058 goto out; 1056 goto out;
1059 } 1057 }
1060 1058
@@ -1065,12 +1063,27 @@ static int sd_media_changed(struct gendisk *disk)
1065 */ 1063 */
1066 sdkp->media_present = 1; 1064 sdkp->media_present = 1;
1067 1065
1068 retval = sdp->changed;
1069 sdp->changed = 0;
1070out: 1066out:
1071 if (retval != sdkp->previous_state) 1067 /*
1068 * Report a media change under the following conditions:
1069 *
1070 * Medium is present now and wasn't present before.
1071 * Medium wasn't present before and is present now.
1072 * Medium was present at all times, but it changed while
1073 * we weren't looking (sdp->changed is set).
1074 *
1075 * If there was no medium before and there is no medium now then
1076 * don't report a change, even if a medium was inserted and removed
1077 * while we weren't looking.
1078 */
1079 retval = (sdkp->media_present != sdkp->previous_state ||
1080 (sdkp->media_present && sdp->changed));
1081 if (retval)
1072 sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL); 1082 sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
1073 sdkp->previous_state = retval; 1083 sdkp->previous_state = sdkp->media_present;
1084
1085 /* sdp->changed indicates medium was changed or is not present */
1086 sdp->changed = !sdkp->media_present;
1074 kfree(sshdr); 1087 kfree(sshdr);
1075 return retval; 1088 return retval;
1076} 1089}
@@ -1175,6 +1188,12 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1175 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); 1188 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
1176 u64 bad_lba; 1189 u64 bad_lba;
1177 int info_valid; 1190 int info_valid;
1191 /*
1192 * resid is optional but mostly filled in. When it's unused,
1193 * its value is zero, so we assume the whole buffer transferred
1194 */
1195 unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
1196 unsigned int good_bytes;
1178 1197
1179 if (scmd->request->cmd_type != REQ_TYPE_FS) 1198 if (scmd->request->cmd_type != REQ_TYPE_FS)
1180 return 0; 1199 return 0;
@@ -1208,7 +1227,8 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1208 /* This computation should always be done in terms of 1227 /* This computation should always be done in terms of
1209 * the resolution of the device's medium. 1228 * the resolution of the device's medium.
1210 */ 1229 */
1211 return (bad_lba - start_lba) * scmd->device->sector_size; 1230 good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
1231 return min(good_bytes, transferred);
1212} 1232}
1213 1233
1214/** 1234/**
@@ -1902,10 +1922,14 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1902 int old_rcd = sdkp->RCD; 1922 int old_rcd = sdkp->RCD;
1903 int old_dpofua = sdkp->DPOFUA; 1923 int old_dpofua = sdkp->DPOFUA;
1904 1924
1905 if (sdp->skip_ms_page_8) 1925 if (sdp->skip_ms_page_8) {
1906 goto defaults; 1926 if (sdp->type == TYPE_RBC)
1907 1927 goto defaults;
1908 if (sdp->type == TYPE_RBC) { 1928 else {
1929 modepage = 0x3F;
1930 dbd = 0;
1931 }
1932 } else if (sdp->type == TYPE_RBC) {
1909 modepage = 6; 1933 modepage = 6;
1910 dbd = 8; 1934 dbd = 8;
1911 } else { 1935 } else {
@@ -1933,13 +1957,11 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1933 */ 1957 */
1934 if (len < 3) 1958 if (len < 3)
1935 goto bad_sense; 1959 goto bad_sense;
1936 if (len > 20) 1960 else if (len > SD_BUF_SIZE) {
1937 len = 20; 1961 sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
1938 1962 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
1939 /* Take headers and block descriptors into account */ 1963 len = SD_BUF_SIZE;
1940 len += data.header_length + data.block_descriptor_length; 1964 }
1941 if (len > SD_BUF_SIZE)
1942 goto bad_sense;
1943 1965
1944 /* Get the data */ 1966 /* Get the data */
1945 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); 1967 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
@@ -1947,16 +1969,45 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1947 if (scsi_status_is_good(res)) { 1969 if (scsi_status_is_good(res)) {
1948 int offset = data.header_length + data.block_descriptor_length; 1970 int offset = data.header_length + data.block_descriptor_length;
1949 1971
1950 if (offset >= SD_BUF_SIZE - 2) { 1972 while (offset < len) {
1951 sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); 1973 u8 page_code = buffer[offset] & 0x3F;
1952 goto defaults; 1974 u8 spf = buffer[offset] & 0x40;
1975
1976 if (page_code == 8 || page_code == 6) {
1977 /* We're interested only in the first 3 bytes.
1978 */
1979 if (len - offset <= 2) {
1980 sd_printk(KERN_ERR, sdkp, "Incomplete "
1981 "mode parameter data\n");
1982 goto defaults;
1983 } else {
1984 modepage = page_code;
1985 goto Page_found;
1986 }
1987 } else {
1988 /* Go to the next page */
1989 if (spf && len - offset > 3)
1990 offset += 4 + (buffer[offset+2] << 8) +
1991 buffer[offset+3];
1992 else if (!spf && len - offset > 1)
1993 offset += 2 + buffer[offset+1];
1994 else {
1995 sd_printk(KERN_ERR, sdkp, "Incomplete "
1996 "mode parameter data\n");
1997 goto defaults;
1998 }
1999 }
1953 } 2000 }
1954 2001
1955 if ((buffer[offset] & 0x3f) != modepage) { 2002 if (modepage == 0x3F) {
2003 sd_printk(KERN_ERR, sdkp, "No Caching mode page "
2004 "present\n");
2005 goto defaults;
2006 } else if ((buffer[offset] & 0x3f) != modepage) {
1956 sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); 2007 sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
1957 goto defaults; 2008 goto defaults;
1958 } 2009 }
1959 2010 Page_found:
1960 if (modepage == 8) { 2011 if (modepage == 8) {
1961 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2012 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
1962 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2013 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 5b7388f1c835..1871b8ae83ae 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20100829"; 20static const char *verstr = "20101219";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -3729,9 +3729,11 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3729 b_size = PAGE_SIZE << order; 3729 b_size = PAGE_SIZE << order;
3730 } else { 3730 } else {
3731 for (b_size = PAGE_SIZE, order = 0; 3731 for (b_size = PAGE_SIZE, order = 0;
3732 order < ST_MAX_ORDER && b_size < new_size; 3732 order < ST_MAX_ORDER &&
3733 max_segs * (PAGE_SIZE << order) < new_size;
3733 order++, b_size *= 2) 3734 order++, b_size *= 2)
3734 ; /* empty */ 3735 ; /* empty */
3736 STbuffer->reserved_page_order = order;
3735 } 3737 }
3736 if (max_segs * (PAGE_SIZE << order) < new_size) { 3738 if (max_segs * (PAGE_SIZE << order) < new_size) {
3737 if (order == ST_MAX_ORDER) 3739 if (order == ST_MAX_ORDER)
@@ -3758,7 +3760,6 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3758 segs++; 3760 segs++;
3759 } 3761 }
3760 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); 3762 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
3761 STbuffer->reserved_page_order = order;
3762 3763
3763 return 1; 3764 return 1;
3764} 3765}
diff --git a/drivers/staging/autofs/root.c b/drivers/staging/autofs/root.c
index 0fdec4befd84..bf0e9755da67 100644
--- a/drivers/staging/autofs/root.c
+++ b/drivers/staging/autofs/root.c
@@ -154,13 +154,16 @@ static int try_to_fill_dentry(struct dentry *dentry, struct super_block *sb, str
154 * yet completely filled in, and revalidate has to delay such 154 * yet completely filled in, and revalidate has to delay such
155 * lookups.. 155 * lookups..
156 */ 156 */
157static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd) 157static int autofs_revalidate(struct dentry *dentry, struct nameidata *nd)
158{ 158{
159 struct inode * dir; 159 struct inode * dir;
160 struct autofs_sb_info *sbi; 160 struct autofs_sb_info *sbi;
161 struct autofs_dir_ent *ent; 161 struct autofs_dir_ent *ent;
162 int res; 162 int res;
163 163
164 if (nd->flags & LOOKUP_RCU)
165 return -ECHILD;
166
164 lock_kernel(); 167 lock_kernel();
165 dir = dentry->d_parent->d_inode; 168 dir = dentry->d_parent->d_inode;
166 sbi = autofs_sbi(dir->i_sb); 169 sbi = autofs_sbi(dir->i_sb);
@@ -237,7 +240,7 @@ static struct dentry *autofs_root_lookup(struct inode *dir, struct dentry *dentr
237 * 240 *
238 * We need to do this before we release the directory semaphore. 241 * We need to do this before we release the directory semaphore.
239 */ 242 */
240 dentry->d_op = &autofs_dentry_operations; 243 d_set_d_op(dentry, &autofs_dentry_operations);
241 dentry->d_flags |= DCACHE_AUTOFS_PENDING; 244 dentry->d_flags |= DCACHE_AUTOFS_PENDING;
242 d_add(dentry, NULL); 245 d_add(dentry, NULL);
243 246
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index 824f9a45007a..e97ad99b1bb4 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -277,7 +277,7 @@ usbbcm_device_probe(struct usb_interface *intf, const struct usb_device_id *id)
277 if(psAdapter->bDoSuspend) 277 if(psAdapter->bDoSuspend)
278 { 278 {
279#ifdef CONFIG_PM 279#ifdef CONFIG_PM
280 udev->autosuspend_delay = 0; 280 pm_runtime_set_autosuspend_delay(&udev->dev, 0);
281 intf->needs_remote_wakeup = 1; 281 intf->needs_remote_wakeup = 1;
282#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) 282#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
283 udev->autosuspend_disabled = 0; 283 udev->autosuspend_disabled = 0;
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 61685ccceda8..cc8d2840f9b6 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -826,6 +826,13 @@ const struct address_space_operations pohmelfs_aops = {
826 .set_page_dirty = __set_page_dirty_nobuffers, 826 .set_page_dirty = __set_page_dirty_nobuffers,
827}; 827};
828 828
829static void pohmelfs_i_callback(struct rcu_head *head)
830{
831 struct inode *inode = container_of(head, struct inode, i_rcu);
832 INIT_LIST_HEAD(&inode->i_dentry);
833 kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode));
834}
835
829/* 836/*
830 * ->detroy_inode() callback. Deletes inode from the caches 837 * ->detroy_inode() callback. Deletes inode from the caches
831 * and frees private data. 838 * and frees private data.
@@ -842,8 +849,8 @@ static void pohmelfs_destroy_inode(struct inode *inode)
842 849
843 dprintk("%s: pi: %p, inode: %p, ino: %llu.\n", 850 dprintk("%s: pi: %p, inode: %p, ino: %llu.\n",
844 __func__, pi, &pi->vfs_inode, pi->ino); 851 __func__, pi, &pi->vfs_inode, pi->ino);
845 kmem_cache_free(pohmelfs_inode_cache, pi);
846 atomic_long_dec(&psb->total_inodes); 852 atomic_long_dec(&psb->total_inodes);
853 call_rcu(&inode->i_rcu, pohmelfs_i_callback);
847} 854}
848 855
849/* 856/*
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
index 8ec83d2dffb7..400a9fc386ad 100644
--- a/drivers/staging/pohmelfs/path_entry.c
+++ b/drivers/staging/pohmelfs/path_entry.c
@@ -83,10 +83,11 @@ out:
83int pohmelfs_path_length(struct pohmelfs_inode *pi) 83int pohmelfs_path_length(struct pohmelfs_inode *pi)
84{ 84{
85 struct dentry *d, *root, *first; 85 struct dentry *d, *root, *first;
86 int len = 1; /* Root slash */ 86 int len;
87 unsigned seq;
87 88
88 first = d = d_find_alias(&pi->vfs_inode); 89 first = d_find_alias(&pi->vfs_inode);
89 if (!d) { 90 if (!first) {
90 dprintk("%s: ino: %llu, mode: %o.\n", __func__, pi->ino, pi->vfs_inode.i_mode); 91 dprintk("%s: ino: %llu, mode: %o.\n", __func__, pi->ino, pi->vfs_inode.i_mode);
91 return -ENOENT; 92 return -ENOENT;
92 } 93 }
@@ -95,7 +96,11 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
95 root = dget(current->fs->root.dentry); 96 root = dget(current->fs->root.dentry);
96 spin_unlock(&current->fs->lock); 97 spin_unlock(&current->fs->lock);
97 98
98 spin_lock(&dcache_lock); 99rename_retry:
100 len = 1; /* Root slash */
101 d = first;
102 seq = read_seqbegin(&rename_lock);
103 rcu_read_lock();
99 104
100 if (!IS_ROOT(d) && d_unhashed(d)) 105 if (!IS_ROOT(d) && d_unhashed(d))
101 len += UNHASHED_OBSCURE_STRING_SIZE; /* Obscure " (deleted)" string */ 106 len += UNHASHED_OBSCURE_STRING_SIZE; /* Obscure " (deleted)" string */
@@ -104,7 +109,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
104 len += d->d_name.len + 1; /* Plus slash */ 109 len += d->d_name.len + 1; /* Plus slash */
105 d = d->d_parent; 110 d = d->d_parent;
106 } 111 }
107 spin_unlock(&dcache_lock); 112 rcu_read_unlock();
113 if (read_seqretry(&rename_lock, seq))
114 goto rename_retry;
108 115
109 dput(root); 116 dput(root);
110 dput(first); 117 dput(first);
diff --git a/drivers/staging/smbfs/cache.c b/drivers/staging/smbfs/cache.c
index dbb98658148b..f2a1323ca827 100644
--- a/drivers/staging/smbfs/cache.c
+++ b/drivers/staging/smbfs/cache.c
@@ -62,7 +62,7 @@ smb_invalidate_dircache_entries(struct dentry *parent)
62 struct list_head *next; 62 struct list_head *next;
63 struct dentry *dentry; 63 struct dentry *dentry;
64 64
65 spin_lock(&dcache_lock); 65 spin_lock(&parent->d_lock);
66 next = parent->d_subdirs.next; 66 next = parent->d_subdirs.next;
67 while (next != &parent->d_subdirs) { 67 while (next != &parent->d_subdirs) {
68 dentry = list_entry(next, struct dentry, d_u.d_child); 68 dentry = list_entry(next, struct dentry, d_u.d_child);
@@ -70,7 +70,7 @@ smb_invalidate_dircache_entries(struct dentry *parent)
70 smb_age_dentry(server, dentry); 70 smb_age_dentry(server, dentry);
71 next = next->next; 71 next = next->next;
72 } 72 }
73 spin_unlock(&dcache_lock); 73 spin_unlock(&parent->d_lock);
74} 74}
75 75
76/* 76/*
@@ -96,13 +96,13 @@ smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
96 } 96 }
97 97
98 /* If a pointer is invalid, we search the dentry. */ 98 /* If a pointer is invalid, we search the dentry. */
99 spin_lock(&dcache_lock); 99 spin_lock(&parent->d_lock);
100 next = parent->d_subdirs.next; 100 next = parent->d_subdirs.next;
101 while (next != &parent->d_subdirs) { 101 while (next != &parent->d_subdirs) {
102 dent = list_entry(next, struct dentry, d_u.d_child); 102 dent = list_entry(next, struct dentry, d_u.d_child);
103 if ((unsigned long)dent->d_fsdata == fpos) { 103 if ((unsigned long)dent->d_fsdata == fpos) {
104 if (dent->d_inode) 104 if (dent->d_inode)
105 dget_locked(dent); 105 dget(dent);
106 else 106 else
107 dent = NULL; 107 dent = NULL;
108 goto out_unlock; 108 goto out_unlock;
@@ -111,7 +111,7 @@ smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
111 } 111 }
112 dent = NULL; 112 dent = NULL;
113out_unlock: 113out_unlock:
114 spin_unlock(&dcache_lock); 114 spin_unlock(&parent->d_lock);
115 return dent; 115 return dent;
116} 116}
117 117
@@ -134,7 +134,7 @@ smb_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
134 qname->hash = full_name_hash(qname->name, qname->len); 134 qname->hash = full_name_hash(qname->name, qname->len);
135 135
136 if (dentry->d_op && dentry->d_op->d_hash) 136 if (dentry->d_op && dentry->d_op->d_hash)
137 if (dentry->d_op->d_hash(dentry, qname) != 0) 137 if (dentry->d_op->d_hash(dentry, inode, qname) != 0)
138 goto end_advance; 138 goto end_advance;
139 139
140 newdent = d_lookup(dentry, qname); 140 newdent = d_lookup(dentry, qname);
@@ -145,8 +145,8 @@ smb_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
145 goto end_advance; 145 goto end_advance;
146 } else { 146 } else {
147 hashed = 1; 147 hashed = 1;
148 memcpy((char *) newdent->d_name.name, qname->name, 148 /* dir i_mutex is locked because we're in readdir */
149 newdent->d_name.len); 149 dentry_update_name_case(newdent, qname);
150 } 150 }
151 151
152 if (!newdent->d_inode) { 152 if (!newdent->d_inode) {
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index f088ea2f6ac9..dd612f50749f 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -14,6 +14,7 @@
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/net.h> 15#include <linux/net.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/namei.h>
17 18
18#include "smb_fs.h" 19#include "smb_fs.h"
19#include "smb_mount.h" 20#include "smb_mount.h"
@@ -274,9 +275,13 @@ smb_dir_open(struct inode *dir, struct file *file)
274 * Dentry operations routines 275 * Dentry operations routines
275 */ 276 */
276static int smb_lookup_validate(struct dentry *, struct nameidata *); 277static int smb_lookup_validate(struct dentry *, struct nameidata *);
277static int smb_hash_dentry(struct dentry *, struct qstr *); 278static int smb_hash_dentry(const struct dentry *, const struct inode *,
278static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 279 struct qstr *);
279static int smb_delete_dentry(struct dentry *); 280static int smb_compare_dentry(const struct dentry *,
281 const struct inode *,
282 const struct dentry *, const struct inode *,
283 unsigned int, const char *, const struct qstr *);
284static int smb_delete_dentry(const struct dentry *);
280 285
281static const struct dentry_operations smbfs_dentry_operations = 286static const struct dentry_operations smbfs_dentry_operations =
282{ 287{
@@ -297,13 +302,20 @@ static const struct dentry_operations smbfs_dentry_operations_case =
297 * This is the callback when the dcache has a lookup hit. 302 * This is the callback when the dcache has a lookup hit.
298 */ 303 */
299static int 304static int
300smb_lookup_validate(struct dentry * dentry, struct nameidata *nd) 305smb_lookup_validate(struct dentry *dentry, struct nameidata *nd)
301{ 306{
302 struct smb_sb_info *server = server_from_dentry(dentry); 307 struct smb_sb_info *server;
303 struct inode * inode = dentry->d_inode; 308 struct inode *inode;
304 unsigned long age = jiffies - dentry->d_time; 309 unsigned long age;
305 int valid; 310 int valid;
306 311
312 if (nd->flags & LOOKUP_RCU)
313 return -ECHILD;
314
315 server = server_from_dentry(dentry);
316 inode = dentry->d_inode;
317 age = jiffies - dentry->d_time;
318
307 /* 319 /*
308 * The default validation is based on dentry age: 320 * The default validation is based on dentry age:
309 * we believe in dentries for a few seconds. (But each 321 * we believe in dentries for a few seconds. (But each
@@ -333,7 +345,8 @@ smb_lookup_validate(struct dentry * dentry, struct nameidata *nd)
333} 345}
334 346
335static int 347static int
336smb_hash_dentry(struct dentry *dir, struct qstr *this) 348smb_hash_dentry(const struct dentry *dir, const struct inode *inode,
349 struct qstr *this)
337{ 350{
338 unsigned long hash; 351 unsigned long hash;
339 int i; 352 int i;
@@ -347,14 +360,17 @@ smb_hash_dentry(struct dentry *dir, struct qstr *this)
347} 360}
348 361
349static int 362static int
350smb_compare_dentry(struct dentry *dir, struct qstr *a, struct qstr *b) 363smb_compare_dentry(const struct dentry *parent,
364 const struct inode *pinode,
365 const struct dentry *dentry, const struct inode *inode,
366 unsigned int len, const char *str, const struct qstr *name)
351{ 367{
352 int i, result = 1; 368 int i, result = 1;
353 369
354 if (a->len != b->len) 370 if (len != name->len)
355 goto out; 371 goto out;
356 for (i=0; i < a->len; i++) { 372 for (i=0; i < len; i++) {
357 if (tolower(a->name[i]) != tolower(b->name[i])) 373 if (tolower(str[i]) != tolower(name->name[i]))
358 goto out; 374 goto out;
359 } 375 }
360 result = 0; 376 result = 0;
@@ -367,7 +383,7 @@ out:
367 * We use this to unhash dentries with bad inodes. 383 * We use this to unhash dentries with bad inodes.
368 */ 384 */
369static int 385static int
370smb_delete_dentry(struct dentry * dentry) 386smb_delete_dentry(const struct dentry *dentry)
371{ 387{
372 if (dentry->d_inode) { 388 if (dentry->d_inode) {
373 if (is_bad_inode(dentry->d_inode)) { 389 if (is_bad_inode(dentry->d_inode)) {
@@ -390,9 +406,9 @@ smb_new_dentry(struct dentry *dentry)
390 struct smb_sb_info *server = server_from_dentry(dentry); 406 struct smb_sb_info *server = server_from_dentry(dentry);
391 407
392 if (server->mnt->flags & SMB_MOUNT_CASE) 408 if (server->mnt->flags & SMB_MOUNT_CASE)
393 dentry->d_op = &smbfs_dentry_operations_case; 409 d_set_d_op(dentry, &smbfs_dentry_operations_case);
394 else 410 else
395 dentry->d_op = &smbfs_dentry_operations; 411 d_set_d_op(dentry, &smbfs_dentry_operations);
396 dentry->d_time = jiffies; 412 dentry->d_time = jiffies;
397} 413}
398 414
@@ -454,9 +470,9 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
454 add_entry: 470 add_entry:
455 server = server_from_dentry(dentry); 471 server = server_from_dentry(dentry);
456 if (server->mnt->flags & SMB_MOUNT_CASE) 472 if (server->mnt->flags & SMB_MOUNT_CASE)
457 dentry->d_op = &smbfs_dentry_operations_case; 473 d_set_d_op(dentry, &smbfs_dentry_operations_case);
458 else 474 else
459 dentry->d_op = &smbfs_dentry_operations; 475 d_set_d_op(dentry, &smbfs_dentry_operations);
460 476
461 d_add(dentry, inode); 477 d_add(dentry, inode);
462 smb_renew_times(dentry); 478 smb_renew_times(dentry);
diff --git a/drivers/staging/smbfs/file.c b/drivers/staging/smbfs/file.c
index 5dcd19c60eb9..31372e7b12de 100644
--- a/drivers/staging/smbfs/file.c
+++ b/drivers/staging/smbfs/file.c
@@ -407,11 +407,14 @@ smb_file_release(struct inode *inode, struct file * file)
407 * privileges, so we need our own check for this. 407 * privileges, so we need our own check for this.
408 */ 408 */
409static int 409static int
410smb_file_permission(struct inode *inode, int mask) 410smb_file_permission(struct inode *inode, int mask, unsigned int flags)
411{ 411{
412 int mode = inode->i_mode; 412 int mode = inode->i_mode;
413 int error = 0; 413 int error = 0;
414 414
415 if (flags & IPERM_FLAG_RCU)
416 return -ECHILD;
417
415 VERBOSE("mode=%x, mask=%x\n", mode, mask); 418 VERBOSE("mode=%x, mask=%x\n", mode, mask);
416 419
417 /* Look at user permissions */ 420 /* Look at user permissions */
diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c
index 540a984bb516..244319dc9702 100644
--- a/drivers/staging/smbfs/inode.c
+++ b/drivers/staging/smbfs/inode.c
@@ -62,11 +62,18 @@ static struct inode *smb_alloc_inode(struct super_block *sb)
62 return &ei->vfs_inode; 62 return &ei->vfs_inode;
63} 63}
64 64
65static void smb_destroy_inode(struct inode *inode) 65static void smb_i_callback(struct rcu_head *head)
66{ 66{
67 struct inode *inode = container_of(head, struct inode, i_rcu);
68 INIT_LIST_HEAD(&inode->i_dentry);
67 kmem_cache_free(smb_inode_cachep, SMB_I(inode)); 69 kmem_cache_free(smb_inode_cachep, SMB_I(inode));
68} 70}
69 71
72static void smb_destroy_inode(struct inode *inode)
73{
74 call_rcu(&inode->i_rcu, smb_i_callback);
75}
76
70static void init_once(void *foo) 77static void init_once(void *foo)
71{ 78{
72 struct smb_inode_info *ei = (struct smb_inode_info *) foo; 79 struct smb_inode_info *ei = (struct smb_inode_info *) foo;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 5a7c8f1d76c6..fceea5e4e02f 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -42,17 +42,13 @@ config USB_ARCH_HAS_OHCI
42 default y if ARCH_W90X900 42 default y if ARCH_W90X900
43 default y if ARCH_DAVINCI_DA8XX 43 default y if ARCH_DAVINCI_DA8XX
44 default y if ARCH_CNS3XXX 44 default y if ARCH_CNS3XXX
45 default y if PLAT_SPEAR
45 # PPC: 46 # PPC:
46 default y if STB03xxx 47 default y if STB03xxx
47 default y if PPC_MPC52xx 48 default y if PPC_MPC52xx
48 # MIPS: 49 # MIPS:
49 default y if MIPS_ALCHEMY 50 default y if MIPS_ALCHEMY
50 default y if MACH_JZ4740 51 default y if MACH_JZ4740
51 # SH:
52 default y if CPU_SUBTYPE_SH7720
53 default y if CPU_SUBTYPE_SH7721
54 default y if CPU_SUBTYPE_SH7763
55 default y if CPU_SUBTYPE_SH7786
56 # more: 52 # more:
57 default PCI 53 default PCI
58 54
@@ -68,6 +64,9 @@ config USB_ARCH_HAS_EHCI
68 default y if ARCH_MXC 64 default y if ARCH_MXC
69 default y if ARCH_OMAP3 65 default y if ARCH_OMAP3
70 default y if ARCH_CNS3XXX 66 default y if ARCH_CNS3XXX
67 default y if ARCH_VT8500
68 default y if PLAT_SPEAR
69 default y if ARCH_MSM
71 default PCI 70 default PCI
72 71
73# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. 72# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index c0e60fbcb048..b9278a1fb9e5 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -27,7 +27,6 @@
27#include <linux/usb.h> 27#include <linux/usb.h>
28#include <linux/usb/quirks.h> 28#include <linux/usb/quirks.h>
29#include <linux/usb/hcd.h> 29#include <linux/usb/hcd.h>
30#include <linux/pm_runtime.h>
31 30
32#include "usb.h" 31#include "usb.h"
33 32
@@ -1262,6 +1261,7 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
1262 udev->reset_resume); 1261 udev->reset_resume);
1263 } 1262 }
1264 } 1263 }
1264 usb_mark_last_busy(udev);
1265 1265
1266 done: 1266 done:
1267 dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); 1267 dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
@@ -1329,7 +1329,6 @@ int usb_resume(struct device *dev, pm_message_t msg)
1329 pm_runtime_disable(dev); 1329 pm_runtime_disable(dev);
1330 pm_runtime_set_active(dev); 1330 pm_runtime_set_active(dev);
1331 pm_runtime_enable(dev); 1331 pm_runtime_enable(dev);
1332 udev->last_busy = jiffies;
1333 do_unbind_rebind(udev, DO_REBIND); 1332 do_unbind_rebind(udev, DO_REBIND);
1334 } 1333 }
1335 } 1334 }
@@ -1397,33 +1396,8 @@ void usb_autosuspend_device(struct usb_device *udev)
1397{ 1396{
1398 int status; 1397 int status;
1399 1398
1400 udev->last_busy = jiffies; 1399 usb_mark_last_busy(udev);
1401 status = pm_runtime_put_sync(&udev->dev); 1400 status = pm_runtime_put_sync_autosuspend(&udev->dev);
1402 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
1403 __func__, atomic_read(&udev->dev.power.usage_count),
1404 status);
1405}
1406
1407/**
1408 * usb_try_autosuspend_device - attempt an autosuspend of a USB device and its interfaces
1409 * @udev: the usb_device to autosuspend
1410 *
1411 * This routine should be called when a core subsystem thinks @udev may
1412 * be ready to autosuspend.
1413 *
1414 * @udev's usage counter left unchanged. If it is 0 and all the interfaces
1415 * are inactive then an autosuspend will be attempted. The attempt may
1416 * fail or be delayed.
1417 *
1418 * The caller must hold @udev's device lock.
1419 *
1420 * This routine can run only in process context.
1421 */
1422void usb_try_autosuspend_device(struct usb_device *udev)
1423{
1424 int status;
1425
1426 status = pm_runtime_idle(&udev->dev);
1427 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n", 1401 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
1428 __func__, atomic_read(&udev->dev.power.usage_count), 1402 __func__, atomic_read(&udev->dev.power.usage_count),
1429 status); 1403 status);
@@ -1482,7 +1456,7 @@ void usb_autopm_put_interface(struct usb_interface *intf)
1482 struct usb_device *udev = interface_to_usbdev(intf); 1456 struct usb_device *udev = interface_to_usbdev(intf);
1483 int status; 1457 int status;
1484 1458
1485 udev->last_busy = jiffies; 1459 usb_mark_last_busy(udev);
1486 atomic_dec(&intf->pm_usage_cnt); 1460 atomic_dec(&intf->pm_usage_cnt);
1487 status = pm_runtime_put_sync(&intf->dev); 1461 status = pm_runtime_put_sync(&intf->dev);
1488 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", 1462 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
@@ -1509,32 +1483,11 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
1509void usb_autopm_put_interface_async(struct usb_interface *intf) 1483void usb_autopm_put_interface_async(struct usb_interface *intf)
1510{ 1484{
1511 struct usb_device *udev = interface_to_usbdev(intf); 1485 struct usb_device *udev = interface_to_usbdev(intf);
1512 unsigned long last_busy; 1486 int status;
1513 int status = 0;
1514 1487
1515 last_busy = udev->last_busy; 1488 usb_mark_last_busy(udev);
1516 udev->last_busy = jiffies;
1517 atomic_dec(&intf->pm_usage_cnt); 1489 atomic_dec(&intf->pm_usage_cnt);
1518 pm_runtime_put_noidle(&intf->dev); 1490 status = pm_runtime_put(&intf->dev);
1519
1520 if (udev->dev.power.runtime_auto) {
1521 /* Optimization: Don't schedule a delayed autosuspend if
1522 * the timer is already running and the expiration time
1523 * wouldn't change.
1524 *
1525 * We have to use the interface's timer. Attempts to
1526 * schedule a suspend for the device would fail because
1527 * the interface is still active.
1528 */
1529 if (intf->dev.power.timer_expires == 0 ||
1530 round_jiffies_up(last_busy) !=
1531 round_jiffies_up(jiffies)) {
1532 status = pm_schedule_suspend(&intf->dev,
1533 jiffies_to_msecs(
1534 round_jiffies_up_relative(
1535 udev->autosuspend_delay)));
1536 }
1537 }
1538 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", 1491 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1539 __func__, atomic_read(&intf->dev.power.usage_count), 1492 __func__, atomic_read(&intf->dev.power.usage_count),
1540 status); 1493 status);
@@ -1554,7 +1507,7 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
1554{ 1507{
1555 struct usb_device *udev = interface_to_usbdev(intf); 1508 struct usb_device *udev = interface_to_usbdev(intf);
1556 1509
1557 udev->last_busy = jiffies; 1510 usb_mark_last_busy(udev);
1558 atomic_dec(&intf->pm_usage_cnt); 1511 atomic_dec(&intf->pm_usage_cnt);
1559 pm_runtime_put_noidle(&intf->dev); 1512 pm_runtime_put_noidle(&intf->dev);
1560} 1513}
@@ -1612,18 +1565,9 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
1612 */ 1565 */
1613int usb_autopm_get_interface_async(struct usb_interface *intf) 1566int usb_autopm_get_interface_async(struct usb_interface *intf)
1614{ 1567{
1615 int status = 0; 1568 int status;
1616 enum rpm_status s;
1617
1618 /* Don't request a resume unless the interface is already suspending
1619 * or suspended. Doing so would force a running suspend timer to be
1620 * cancelled.
1621 */
1622 pm_runtime_get_noresume(&intf->dev);
1623 s = ACCESS_ONCE(intf->dev.power.runtime_status);
1624 if (s == RPM_SUSPENDING || s == RPM_SUSPENDED)
1625 status = pm_request_resume(&intf->dev);
1626 1569
1570 status = pm_runtime_get(&intf->dev);
1627 if (status < 0 && status != -EINPROGRESS) 1571 if (status < 0 && status != -EINPROGRESS)
1628 pm_runtime_put_noidle(&intf->dev); 1572 pm_runtime_put_noidle(&intf->dev);
1629 else 1573 else
@@ -1650,7 +1594,7 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
1650{ 1594{
1651 struct usb_device *udev = interface_to_usbdev(intf); 1595 struct usb_device *udev = interface_to_usbdev(intf);
1652 1596
1653 udev->last_busy = jiffies; 1597 usb_mark_last_busy(udev);
1654 atomic_inc(&intf->pm_usage_cnt); 1598 atomic_inc(&intf->pm_usage_cnt);
1655 pm_runtime_get_noresume(&intf->dev); 1599 pm_runtime_get_noresume(&intf->dev);
1656} 1600}
@@ -1661,7 +1605,6 @@ static int autosuspend_check(struct usb_device *udev)
1661{ 1605{
1662 int w, i; 1606 int w, i;
1663 struct usb_interface *intf; 1607 struct usb_interface *intf;
1664 unsigned long suspend_time, j;
1665 1608
1666 /* Fail if autosuspend is disabled, or any interfaces are in use, or 1609 /* Fail if autosuspend is disabled, or any interfaces are in use, or
1667 * any interface drivers require remote wakeup but it isn't available. 1610 * any interface drivers require remote wakeup but it isn't available.
@@ -1701,87 +1644,46 @@ static int autosuspend_check(struct usb_device *udev)
1701 return -EOPNOTSUPP; 1644 return -EOPNOTSUPP;
1702 } 1645 }
1703 udev->do_remote_wakeup = w; 1646 udev->do_remote_wakeup = w;
1704
1705 /* If everything is okay but the device hasn't been idle for long
1706 * enough, queue a delayed autosuspend request.
1707 */
1708 j = ACCESS_ONCE(jiffies);
1709 suspend_time = udev->last_busy + udev->autosuspend_delay;
1710 if (time_before(j, suspend_time)) {
1711 pm_schedule_suspend(&udev->dev, jiffies_to_msecs(
1712 round_jiffies_up_relative(suspend_time - j)));
1713 return -EAGAIN;
1714 }
1715 return 0; 1647 return 0;
1716} 1648}
1717 1649
1718static int usb_runtime_suspend(struct device *dev) 1650static int usb_runtime_suspend(struct device *dev)
1719{ 1651{
1720 int status = 0; 1652 struct usb_device *udev = to_usb_device(dev);
1653 int status;
1721 1654
1722 /* A USB device can be suspended if it passes the various autosuspend 1655 /* A USB device can be suspended if it passes the various autosuspend
1723 * checks. Runtime suspend for a USB device means suspending all the 1656 * checks. Runtime suspend for a USB device means suspending all the
1724 * interfaces and then the device itself. 1657 * interfaces and then the device itself.
1725 */ 1658 */
1726 if (is_usb_device(dev)) { 1659 if (autosuspend_check(udev) != 0)
1727 struct usb_device *udev = to_usb_device(dev); 1660 return -EAGAIN;
1728
1729 if (autosuspend_check(udev) != 0)
1730 return -EAGAIN;
1731
1732 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1733
1734 /* If an interface fails the suspend, adjust the last_busy
1735 * time so that we don't get another suspend attempt right
1736 * away.
1737 */
1738 if (status) {
1739 udev->last_busy = jiffies +
1740 (udev->autosuspend_delay == 0 ?
1741 HZ/2 : 0);
1742 }
1743
1744 /* Prevent the parent from suspending immediately after */
1745 else if (udev->parent)
1746 udev->parent->last_busy = jiffies;
1747 }
1748 1661
1749 /* Runtime suspend for a USB interface doesn't mean anything. */ 1662 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1750 return status; 1663 return status;
1751} 1664}
1752 1665
1753static int usb_runtime_resume(struct device *dev) 1666static int usb_runtime_resume(struct device *dev)
1754{ 1667{
1668 struct usb_device *udev = to_usb_device(dev);
1669 int status;
1670
1755 /* Runtime resume for a USB device means resuming both the device 1671 /* Runtime resume for a USB device means resuming both the device
1756 * and all its interfaces. 1672 * and all its interfaces.
1757 */ 1673 */
1758 if (is_usb_device(dev)) { 1674 status = usb_resume_both(udev, PMSG_AUTO_RESUME);
1759 struct usb_device *udev = to_usb_device(dev); 1675 return status;
1760 int status;
1761
1762 status = usb_resume_both(udev, PMSG_AUTO_RESUME);
1763 udev->last_busy = jiffies;
1764 return status;
1765 }
1766
1767 /* Runtime resume for a USB interface doesn't mean anything. */
1768 return 0;
1769} 1676}
1770 1677
1771static int usb_runtime_idle(struct device *dev) 1678static int usb_runtime_idle(struct device *dev)
1772{ 1679{
1680 struct usb_device *udev = to_usb_device(dev);
1681
1773 /* An idle USB device can be suspended if it passes the various 1682 /* An idle USB device can be suspended if it passes the various
1774 * autosuspend checks. An idle interface can be suspended at 1683 * autosuspend checks.
1775 * any time.
1776 */ 1684 */
1777 if (is_usb_device(dev)) { 1685 if (autosuspend_check(udev) == 0)
1778 struct usb_device *udev = to_usb_device(dev); 1686 pm_runtime_autosuspend(dev);
1779
1780 if (autosuspend_check(udev) != 0)
1781 return 0;
1782 }
1783
1784 pm_runtime_suspend(dev);
1785 return 0; 1687 return 0;
1786} 1688}
1787 1689
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 3799573bd385..b55d46070a25 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -19,7 +19,6 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/pm_runtime.h>
23#include <linux/usb.h> 22#include <linux/usb.h>
24#include <linux/usb/hcd.h> 23#include <linux/usb/hcd.h>
25 24
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ced846ac4141..6a95017fa62b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -38,7 +38,6 @@
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41#include <linux/pm_runtime.h>
42 41
43#include <linux/usb.h> 42#include <linux/usb.h>
44#include <linux/usb/hcd.h> 43#include <linux/usb/hcd.h>
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 27115b45edc5..b98efae6a1cf 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -24,7 +24,6 @@
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/freezer.h> 26#include <linux/freezer.h>
27#include <linux/pm_runtime.h>
28 27
29#include <asm/uaccess.h> 28#include <asm/uaccess.h>
30#include <asm/byteorder.h> 29#include <asm/byteorder.h>
@@ -1804,8 +1803,15 @@ int usb_new_device(struct usb_device *udev)
1804 1803
1805 /* Tell the runtime-PM framework the device is active */ 1804 /* Tell the runtime-PM framework the device is active */
1806 pm_runtime_set_active(&udev->dev); 1805 pm_runtime_set_active(&udev->dev);
1806 pm_runtime_get_noresume(&udev->dev);
1807 pm_runtime_use_autosuspend(&udev->dev);
1807 pm_runtime_enable(&udev->dev); 1808 pm_runtime_enable(&udev->dev);
1808 1809
1810 /* By default, forbid autosuspend for all devices. It will be
1811 * allowed for hubs during binding.
1812 */
1813 usb_disable_autosuspend(udev);
1814
1809 err = usb_enumerate_device(udev); /* Read descriptors */ 1815 err = usb_enumerate_device(udev); /* Read descriptors */
1810 if (err < 0) 1816 if (err < 0)
1811 goto fail; 1817 goto fail;
@@ -1831,6 +1837,8 @@ int usb_new_device(struct usb_device *udev)
1831 } 1837 }
1832 1838
1833 (void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev); 1839 (void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
1840 usb_mark_last_busy(udev);
1841 pm_runtime_put_sync_autosuspend(&udev->dev);
1834 return err; 1842 return err;
1835 1843
1836fail: 1844fail:
@@ -2221,6 +2229,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2221 usb_set_device_state(udev, USB_STATE_SUSPENDED); 2229 usb_set_device_state(udev, USB_STATE_SUSPENDED);
2222 msleep(10); 2230 msleep(10);
2223 } 2231 }
2232 usb_mark_last_busy(hub->hdev);
2224 return status; 2233 return status;
2225} 2234}
2226 2235
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index b690aa35df9a..1b125c224dcf 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -343,17 +343,19 @@ static int usbfs_empty (struct dentry *dentry)
343{ 343{
344 struct list_head *list; 344 struct list_head *list;
345 345
346 spin_lock(&dcache_lock); 346 spin_lock(&dentry->d_lock);
347
348 list_for_each(list, &dentry->d_subdirs) { 347 list_for_each(list, &dentry->d_subdirs) {
349 struct dentry *de = list_entry(list, struct dentry, d_u.d_child); 348 struct dentry *de = list_entry(list, struct dentry, d_u.d_child);
349
350 spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED);
350 if (usbfs_positive(de)) { 351 if (usbfs_positive(de)) {
351 spin_unlock(&dcache_lock); 352 spin_unlock(&de->d_lock);
353 spin_unlock(&dentry->d_lock);
352 return 0; 354 return 0;
353 } 355 }
356 spin_unlock(&de->d_lock);
354 } 357 }
355 358 spin_unlock(&dentry->d_lock);
356 spin_unlock(&dcache_lock);
357 return 1; 359 return 1;
358} 360}
359 361
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index d6e3e410477e..832487423826 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1804,6 +1804,7 @@ free_interfaces:
1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); 1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1805 intf->minor = -1; 1805 intf->minor = -1;
1806 device_initialize(&intf->dev); 1806 device_initialize(&intf->dev);
1807 pm_runtime_no_callbacks(&intf->dev);
1807 dev_set_name(&intf->dev, "%d-%s:%d.%d", 1808 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1808 dev->bus->busnum, dev->devpath, 1809 dev->bus->busnum, dev->devpath,
1809 configuration, alt->desc.bInterfaceNumber); 1810 configuration, alt->desc.bInterfaceNumber);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 25719da45e33..44c595432d6f 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -117,21 +117,6 @@ void usb_detect_quirks(struct usb_device *udev)
117 dev_dbg(&udev->dev, "USB quirks for this device: %x\n", 117 dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
118 udev->quirks); 118 udev->quirks);
119 119
120#ifdef CONFIG_USB_SUSPEND
121
122 /* By default, disable autosuspend for all devices. The hub driver
123 * will enable it for hubs.
124 */
125 usb_disable_autosuspend(udev);
126
127 /* Autosuspend can also be disabled if the initial autosuspend_delay
128 * is negative.
129 */
130 if (udev->autosuspend_delay < 0)
131 usb_autoresume_device(udev);
132
133#endif
134
135 /* For the present, all devices default to USB-PERSIST enabled */ 120 /* For the present, all devices default to USB-PERSIST enabled */
136#if 0 /* was: #ifdef CONFIG_PM */ 121#if 0 /* was: #ifdef CONFIG_PM */
137 /* Hubs are automatically enabled for USB-PERSIST */ 122 /* Hubs are automatically enabled for USB-PERSIST */
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 448f5b47fc48..6781c369ce2d 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -233,8 +233,6 @@ static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
233 233
234#ifdef CONFIG_PM 234#ifdef CONFIG_PM
235 235
236static const char power_group[] = "power";
237
238static ssize_t 236static ssize_t
239show_persist(struct device *dev, struct device_attribute *attr, char *buf) 237show_persist(struct device *dev, struct device_attribute *attr, char *buf)
240{ 238{
@@ -278,7 +276,7 @@ static int add_persist_attributes(struct device *dev)
278 if (udev->descriptor.bDeviceClass != USB_CLASS_HUB) 276 if (udev->descriptor.bDeviceClass != USB_CLASS_HUB)
279 rc = sysfs_add_file_to_group(&dev->kobj, 277 rc = sysfs_add_file_to_group(&dev->kobj,
280 &dev_attr_persist.attr, 278 &dev_attr_persist.attr,
281 power_group); 279 power_group_name);
282 } 280 }
283 return rc; 281 return rc;
284} 282}
@@ -287,7 +285,7 @@ static void remove_persist_attributes(struct device *dev)
287{ 285{
288 sysfs_remove_file_from_group(&dev->kobj, 286 sysfs_remove_file_from_group(&dev->kobj,
289 &dev_attr_persist.attr, 287 &dev_attr_persist.attr,
290 power_group); 288 power_group_name);
291} 289}
292#else 290#else
293 291
@@ -336,44 +334,20 @@ static DEVICE_ATTR(active_duration, S_IRUGO, show_active_duration, NULL);
336static ssize_t 334static ssize_t
337show_autosuspend(struct device *dev, struct device_attribute *attr, char *buf) 335show_autosuspend(struct device *dev, struct device_attribute *attr, char *buf)
338{ 336{
339 struct usb_device *udev = to_usb_device(dev); 337 return sprintf(buf, "%d\n", dev->power.autosuspend_delay / 1000);
340
341 return sprintf(buf, "%d\n", udev->autosuspend_delay / HZ);
342} 338}
343 339
344static ssize_t 340static ssize_t
345set_autosuspend(struct device *dev, struct device_attribute *attr, 341set_autosuspend(struct device *dev, struct device_attribute *attr,
346 const char *buf, size_t count) 342 const char *buf, size_t count)
347{ 343{
348 struct usb_device *udev = to_usb_device(dev); 344 int value;
349 int value, old_delay;
350 int rc;
351 345
352 if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ || 346 if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/1000 ||
353 value <= - INT_MAX/HZ) 347 value <= -INT_MAX/1000)
354 return -EINVAL; 348 return -EINVAL;
355 value *= HZ;
356
357 usb_lock_device(udev);
358 old_delay = udev->autosuspend_delay;
359 udev->autosuspend_delay = value;
360
361 if (old_delay < 0) { /* Autosuspend wasn't allowed */
362 if (value >= 0)
363 usb_autosuspend_device(udev);
364 } else { /* Autosuspend was allowed */
365 if (value < 0) {
366 rc = usb_autoresume_device(udev);
367 if (rc < 0) {
368 count = rc;
369 udev->autosuspend_delay = old_delay;
370 }
371 } else {
372 usb_try_autosuspend_device(udev);
373 }
374 }
375 349
376 usb_unlock_device(udev); 350 pm_runtime_set_autosuspend_delay(dev, value * 1000);
377 return count; 351 return count;
378} 352}
379 353
@@ -438,44 +412,30 @@ set_level(struct device *dev, struct device_attribute *attr,
438 412
439static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level); 413static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
440 414
415static struct attribute *power_attrs[] = {
416 &dev_attr_autosuspend.attr,
417 &dev_attr_level.attr,
418 &dev_attr_connected_duration.attr,
419 &dev_attr_active_duration.attr,
420 NULL,
421};
422static struct attribute_group power_attr_group = {
423 .name = power_group_name,
424 .attrs = power_attrs,
425};
426
441static int add_power_attributes(struct device *dev) 427static int add_power_attributes(struct device *dev)
442{ 428{
443 int rc = 0; 429 int rc = 0;
444 430
445 if (is_usb_device(dev)) { 431 if (is_usb_device(dev))
446 rc = sysfs_add_file_to_group(&dev->kobj, 432 rc = sysfs_merge_group(&dev->kobj, &power_attr_group);
447 &dev_attr_autosuspend.attr,
448 power_group);
449 if (rc == 0)
450 rc = sysfs_add_file_to_group(&dev->kobj,
451 &dev_attr_level.attr,
452 power_group);
453 if (rc == 0)
454 rc = sysfs_add_file_to_group(&dev->kobj,
455 &dev_attr_connected_duration.attr,
456 power_group);
457 if (rc == 0)
458 rc = sysfs_add_file_to_group(&dev->kobj,
459 &dev_attr_active_duration.attr,
460 power_group);
461 }
462 return rc; 433 return rc;
463} 434}
464 435
465static void remove_power_attributes(struct device *dev) 436static void remove_power_attributes(struct device *dev)
466{ 437{
467 sysfs_remove_file_from_group(&dev->kobj, 438 sysfs_unmerge_group(&dev->kobj, &power_attr_group);
468 &dev_attr_active_duration.attr,
469 power_group);
470 sysfs_remove_file_from_group(&dev->kobj,
471 &dev_attr_connected_duration.attr,
472 power_group);
473 sysfs_remove_file_from_group(&dev->kobj,
474 &dev_attr_level.attr,
475 power_group);
476 sysfs_remove_file_from_group(&dev->kobj,
477 &dev_attr_autosuspend.attr,
478 power_group);
479} 439}
480 440
481#else 441#else
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index fdd4130fbb7d..079cb57bab4f 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -445,7 +445,8 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
445 INIT_LIST_HEAD(&dev->filelist); 445 INIT_LIST_HEAD(&dev->filelist);
446 446
447#ifdef CONFIG_PM 447#ifdef CONFIG_PM
448 dev->autosuspend_delay = usb_autosuspend_delay * HZ; 448 pm_runtime_set_autosuspend_delay(&dev->dev,
449 usb_autosuspend_delay * 1000);
449 dev->connect_time = jiffies; 450 dev->connect_time = jiffies;
450 dev->active_duration = -jiffies; 451 dev->active_duration = -jiffies;
451#endif 452#endif
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index cd882203ad34..b975450f403e 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -75,14 +75,12 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
75#ifdef CONFIG_USB_SUSPEND 75#ifdef CONFIG_USB_SUSPEND
76 76
77extern void usb_autosuspend_device(struct usb_device *udev); 77extern void usb_autosuspend_device(struct usb_device *udev);
78extern void usb_try_autosuspend_device(struct usb_device *udev);
79extern int usb_autoresume_device(struct usb_device *udev); 78extern int usb_autoresume_device(struct usb_device *udev);
80extern int usb_remote_wakeup(struct usb_device *dev); 79extern int usb_remote_wakeup(struct usb_device *dev);
81 80
82#else 81#else
83 82
84#define usb_autosuspend_device(udev) do {} while (0) 83#define usb_autosuspend_device(udev) do {} while (0)
85#define usb_try_autosuspend_device(udev) do {} while (0)
86static inline int usb_autoresume_device(struct usb_device *udev) 84static inline int usb_autoresume_device(struct usb_device *udev)
87{ 85{
88 return 0; 86 return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 607d0db4a988..1dc9739277b4 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -338,6 +338,19 @@ config USB_S3C2410_DEBUG
338 boolean "S3C2410 udc debug messages" 338 boolean "S3C2410 udc debug messages"
339 depends on USB_GADGET_S3C2410 339 depends on USB_GADGET_S3C2410
340 340
341config USB_GADGET_PXA_U2O
342 boolean "PXA9xx Processor USB2.0 controller"
343 select USB_GADGET_DUALSPEED
344 help
345 PXA9xx Processor series include a high speed USB2.0 device
346 controller, which support high speed and full speed USB peripheral.
347
348config USB_PXA_U2O
349 tristate
350 depends on USB_GADGET_PXA_U2O
351 default USB_GADGET
352 select USB_GADGET_SELECTED
353
341# 354#
342# Controllers available in both integrated and discrete versions 355# Controllers available in both integrated and discrete versions
343# 356#
@@ -414,8 +427,8 @@ config USB_FSL_QE
414 default USB_GADGET 427 default USB_GADGET
415 select USB_GADGET_SELECTED 428 select USB_GADGET_SELECTED
416 429
417config USB_GADGET_CI13XXX 430config USB_GADGET_CI13XXX_PCI
418 boolean "MIPS USB CI13xxx" 431 boolean "MIPS USB CI13xxx PCI UDC"
419 depends on PCI 432 depends on PCI
420 select USB_GADGET_DUALSPEED 433 select USB_GADGET_DUALSPEED
421 help 434 help
@@ -426,9 +439,9 @@ config USB_GADGET_CI13XXX
426 dynamically linked module called "ci13xxx_udc" and force all 439 dynamically linked module called "ci13xxx_udc" and force all
427 gadget drivers to also be dynamically linked. 440 gadget drivers to also be dynamically linked.
428 441
429config USB_CI13XXX 442config USB_CI13XXX_PCI
430 tristate 443 tristate
431 depends on USB_GADGET_CI13XXX 444 depends on USB_GADGET_CI13XXX_PCI
432 default USB_GADGET 445 default USB_GADGET
433 select USB_GADGET_SELECTED 446 select USB_GADGET_SELECTED
434 447
@@ -495,6 +508,49 @@ config USB_LANGWELL
495 default USB_GADGET 508 default USB_GADGET
496 select USB_GADGET_SELECTED 509 select USB_GADGET_SELECTED
497 510
511config USB_GADGET_EG20T
512 boolean "Intel EG20T(Topcliff) USB Device controller"
513 depends on PCI
514 select USB_GADGET_DUALSPEED
515 help
516 This is a USB device driver for EG20T PCH.
517 EG20T PCH is the platform controller hub that is used in Intel's
518 general embedded platform. EG20T PCH has USB device interface.
519 Using this interface, it is able to access system devices connected
520 to USB device.
521 This driver enables USB device function.
522 USB device is a USB peripheral controller which
523 supports both full and high speed USB 2.0 data transfers.
524 This driver supports both control transfer and bulk transfer modes.
525 This driver dose not support interrupt transfer or isochronous
526 transfer modes.
527
528config USB_EG20T
529 tristate
530 depends on USB_GADGET_EG20T
531 default USB_GADGET
532 select USB_GADGET_SELECTED
533
534config USB_GADGET_CI13XXX_MSM
535 boolean "MIPS USB CI13xxx for MSM"
536 depends on ARCH_MSM
537 select USB_GADGET_DUALSPEED
538 select USB_MSM_OTG_72K
539 help
540 MSM SoC has chipidea USB controller. This driver uses
541 ci13xxx_udc core.
542 This driver depends on OTG driver for PHY initialization,
543 clock management, powering up VBUS, and power management.
544
545 Say "y" to link the driver statically, or "m" to build a
546 dynamically linked module called "ci13xxx_msm" and force all
547 gadget drivers to also be dynamically linked.
548
549config USB_CI13XXX_MSM
550 tristate
551 depends on USB_GADGET_CI13XXX_MSM
552 default USB_GADGET
553 select USB_GADGET_SELECTED
498 554
499# 555#
500# LAST -- dummy/emulated controller 556# LAST -- dummy/emulated controller
@@ -685,6 +741,19 @@ config USB_ETH_EEM
685 If you say "y" here, the Ethernet gadget driver will use the EEM 741 If you say "y" here, the Ethernet gadget driver will use the EEM
686 protocol rather than ECM. If unsure, say "n". 742 protocol rather than ECM. If unsure, say "n".
687 743
744config USB_G_NCM
745 tristate "Network Control Model (NCM) support"
746 depends on NET
747 select CRC32
748 help
749 This driver implements USB CDC NCM subclass standard. NCM is
750 an advanced protocol for Ethernet encapsulation, allows grouping
751 of several ethernet frames into one USB transfer and diffferent
752 alignment possibilities.
753
754 Say "y" to link the driver statically, or "m" to build a
755 dynamically linked module called "g_ncm".
756
688config USB_GADGETFS 757config USB_GADGETFS
689 tristate "Gadget Filesystem (EXPERIMENTAL)" 758 tristate "Gadget Filesystem (EXPERIMENTAL)"
690 depends on EXPERIMENTAL 759 depends on EXPERIMENTAL
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 5780db42417b..55f5e8ae5924 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -21,9 +21,13 @@ fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
21obj-$(CONFIG_USB_M66592) += m66592-udc.o 21obj-$(CONFIG_USB_M66592) += m66592-udc.o
22obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o 22obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
23obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o 23obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
24obj-$(CONFIG_USB_CI13XXX) += ci13xxx_udc.o 24obj-$(CONFIG_USB_CI13XXX_PCI) += ci13xxx_pci.o
25obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o 25obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o
26obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o 26obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
27obj-$(CONFIG_USB_EG20T) += pch_udc.o
28obj-$(CONFIG_USB_PXA_U2O) += mv_udc.o
29mv_udc-y := mv_udc_core.o mv_udc_phy.o
30obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
27 31
28# 32#
29# USB gadget drivers 33# USB gadget drivers
@@ -43,6 +47,7 @@ g_hid-y := hid.o
43g_dbgp-y := dbgp.o 47g_dbgp-y := dbgp.o
44g_nokia-y := nokia.o 48g_nokia-y := nokia.o
45g_webcam-y := webcam.o 49g_webcam-y := webcam.o
50g_ncm-y := ncm.o
46 51
47obj-$(CONFIG_USB_ZERO) += g_zero.o 52obj-$(CONFIG_USB_ZERO) += g_zero.o
48obj-$(CONFIG_USB_AUDIO) += g_audio.o 53obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -60,3 +65,4 @@ obj-$(CONFIG_USB_G_DBGP) += g_dbgp.o
60obj-$(CONFIG_USB_G_MULTI) += g_multi.o 65obj-$(CONFIG_USB_G_MULTI) += g_multi.o
61obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o 66obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
62obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o 67obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
68obj-$(CONFIG_USB_G_NCM) += g_ncm.o
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 9034e0344723..f8dd7269d79c 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -3359,7 +3359,6 @@ static int udc_probe(struct udc *dev)
3359 dev_set_name(&dev->gadget.dev, "gadget"); 3359 dev_set_name(&dev->gadget.dev, "gadget");
3360 dev->gadget.dev.release = gadget_release; 3360 dev->gadget.dev.release = gadget_release;
3361 dev->gadget.name = name; 3361 dev->gadget.name = name;
3362 dev->gadget.name = name;
3363 dev->gadget.is_dualspeed = 1; 3362 dev->gadget.is_dualspeed = 1;
3364 3363
3365 /* init registers, interrupts, ... */ 3364 /* init registers, interrupts, ... */
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 717ff653fa23..e7c65a4408fb 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -2057,8 +2057,10 @@ static int __exit usba_udc_remove(struct platform_device *pdev)
2057 usba_ep_cleanup_debugfs(&usba_ep[i]); 2057 usba_ep_cleanup_debugfs(&usba_ep[i]);
2058 usba_cleanup_debugfs(udc); 2058 usba_cleanup_debugfs(udc);
2059 2059
2060 if (gpio_is_valid(udc->vbus_pin)) 2060 if (gpio_is_valid(udc->vbus_pin)) {
2061 free_irq(gpio_to_irq(udc->vbus_pin), udc);
2061 gpio_free(udc->vbus_pin); 2062 gpio_free(udc->vbus_pin);
2063 }
2062 2064
2063 free_irq(udc->irq, udc); 2065 free_irq(udc->irq, udc);
2064 kfree(usba_ep); 2066 kfree(usba_ep);
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
new file mode 100644
index 000000000000..139ac9419597
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -0,0 +1,134 @@
1/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h>
22#include <linux/usb/msm_hsusb_hw.h>
23#include <linux/usb/ulpi.h>
24
25#include "ci13xxx_udc.c"
26
27#define MSM_USB_BASE (udc->regs)
28
29static irqreturn_t msm_udc_irq(int irq, void *data)
30{
31 return udc_irq();
32}
33
34static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
35{
36 struct device *dev = udc->gadget.dev.parent;
37 int val;
38
39 switch (event) {
40 case CI13XXX_CONTROLLER_RESET_EVENT:
41 dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
42 writel(0, USB_AHBBURST);
43 writel(0, USB_AHBMODE);
44 break;
45 case CI13XXX_CONTROLLER_STOPPED_EVENT:
46 dev_dbg(dev, "CI13XXX_CONTROLLER_STOPPED_EVENT received\n");
47 /*
48 * Put the transceiver in non-driving mode. Otherwise host
49 * may not detect soft-disconnection.
50 */
51 val = otg_io_read(udc->transceiver, ULPI_FUNC_CTRL);
52 val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
53 val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
54 otg_io_write(udc->transceiver, val, ULPI_FUNC_CTRL);
55 break;
56 default:
57 dev_dbg(dev, "unknown ci13xxx_udc event\n");
58 break;
59 }
60}
61
62static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
63 .name = "ci13xxx_msm",
64 .flags = CI13XXX_REGS_SHARED |
65 CI13XXX_REQUIRE_TRANSCEIVER |
66 CI13XXX_PULLUP_ON_VBUS |
67 CI13XXX_DISABLE_STREAMING,
68
69 .notify_event = ci13xxx_msm_notify_event,
70};
71
72static int ci13xxx_msm_probe(struct platform_device *pdev)
73{
74 struct resource *res;
75 void __iomem *regs;
76 int irq;
77 int ret;
78
79 dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
80
81 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
82 if (!res) {
83 dev_err(&pdev->dev, "failed to get platform resource mem\n");
84 return -ENXIO;
85 }
86
87 regs = ioremap(res->start, resource_size(res));
88 if (!regs) {
89 dev_err(&pdev->dev, "ioremap failed\n");
90 return -ENOMEM;
91 }
92
93 ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, regs);
94 if (ret < 0) {
95 dev_err(&pdev->dev, "udc_probe failed\n");
96 goto iounmap;
97 }
98
99 irq = platform_get_irq(pdev, 0);
100 if (irq < 0) {
101 dev_err(&pdev->dev, "IRQ not found\n");
102 ret = -ENXIO;
103 goto udc_remove;
104 }
105
106 ret = request_irq(irq, msm_udc_irq, IRQF_SHARED, pdev->name, pdev);
107 if (ret < 0) {
108 dev_err(&pdev->dev, "request_irq failed\n");
109 goto udc_remove;
110 }
111
112 pm_runtime_no_callbacks(&pdev->dev);
113 pm_runtime_enable(&pdev->dev);
114
115 return 0;
116
117udc_remove:
118 udc_remove();
119iounmap:
120 iounmap(regs);
121
122 return ret;
123}
124
125static struct platform_driver ci13xxx_msm_driver = {
126 .probe = ci13xxx_msm_probe,
127 .driver = { .name = "msm_hsusb", },
128};
129
130static int __init ci13xxx_msm_init(void)
131{
132 return platform_driver_register(&ci13xxx_msm_driver);
133}
134module_init(ci13xxx_msm_init);
diff --git a/drivers/usb/gadget/ci13xxx_pci.c b/drivers/usb/gadget/ci13xxx_pci.c
new file mode 100644
index 000000000000..883ab5e832d1
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_pci.c
@@ -0,0 +1,176 @@
1/*
2 * ci13xxx_pci.c - MIPS USB IP core family device controller
3 *
4 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5 *
6 * Author: David Lopo
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/pci.h>
15
16#include "ci13xxx_udc.c"
17
18/* driver name */
19#define UDC_DRIVER_NAME "ci13xxx_pci"
20
21/******************************************************************************
22 * PCI block
23 *****************************************************************************/
24/**
25 * ci13xxx_pci_irq: interrut handler
26 * @irq: irq number
27 * @pdev: USB Device Controller interrupt source
28 *
29 * This function returns IRQ_HANDLED if the IRQ has been handled
30 * This is an ISR don't trace, use attribute interface instead
31 */
32static irqreturn_t ci13xxx_pci_irq(int irq, void *pdev)
33{
34 if (irq == 0) {
35 dev_err(&((struct pci_dev *)pdev)->dev, "Invalid IRQ0 usage!");
36 return IRQ_HANDLED;
37 }
38 return udc_irq();
39}
40
41static struct ci13xxx_udc_driver ci13xxx_pci_udc_driver = {
42 .name = UDC_DRIVER_NAME,
43};
44
45/**
46 * ci13xxx_pci_probe: PCI probe
47 * @pdev: USB device controller being probed
48 * @id: PCI hotplug ID connecting controller to UDC framework
49 *
50 * This function returns an error code
51 * Allocates basic PCI resources for this USB device controller, and then
52 * invokes the udc_probe() method to start the UDC associated with it
53 */
54static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
55 const struct pci_device_id *id)
56{
57 void __iomem *regs = NULL;
58 int retval = 0;
59
60 if (id == NULL)
61 return -EINVAL;
62
63 retval = pci_enable_device(pdev);
64 if (retval)
65 goto done;
66
67 if (!pdev->irq) {
68 dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
69 retval = -ENODEV;
70 goto disable_device;
71 }
72
73 retval = pci_request_regions(pdev, UDC_DRIVER_NAME);
74 if (retval)
75 goto disable_device;
76
77 /* BAR 0 holds all the registers */
78 regs = pci_iomap(pdev, 0, 0);
79 if (!regs) {
80 dev_err(&pdev->dev, "Error mapping memory!");
81 retval = -EFAULT;
82 goto release_regions;
83 }
84 pci_set_drvdata(pdev, (__force void *)regs);
85
86 pci_set_master(pdev);
87 pci_try_set_mwi(pdev);
88
89 retval = udc_probe(&ci13xxx_pci_udc_driver, &pdev->dev, regs);
90 if (retval)
91 goto iounmap;
92
93 /* our device does not have MSI capability */
94
95 retval = request_irq(pdev->irq, ci13xxx_pci_irq, IRQF_SHARED,
96 UDC_DRIVER_NAME, pdev);
97 if (retval)
98 goto gadget_remove;
99
100 return 0;
101
102 gadget_remove:
103 udc_remove();
104 iounmap:
105 pci_iounmap(pdev, regs);
106 release_regions:
107 pci_release_regions(pdev);
108 disable_device:
109 pci_disable_device(pdev);
110 done:
111 return retval;
112}
113
114/**
115 * ci13xxx_pci_remove: PCI remove
116 * @pdev: USB Device Controller being removed
117 *
118 * Reverses the effect of ci13xxx_pci_probe(),
119 * first invoking the udc_remove() and then releases
120 * all PCI resources allocated for this USB device controller
121 */
122static void __devexit ci13xxx_pci_remove(struct pci_dev *pdev)
123{
124 free_irq(pdev->irq, pdev);
125 udc_remove();
126 pci_iounmap(pdev, (__force void __iomem *)pci_get_drvdata(pdev));
127 pci_release_regions(pdev);
128 pci_disable_device(pdev);
129}
130
131/**
132 * PCI device table
133 * PCI device structure
134 *
135 * Check "pci.h" for details
136 */
137static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
138 { PCI_DEVICE(0x153F, 0x1004) },
139 { PCI_DEVICE(0x153F, 0x1006) },
140 { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
141};
142MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
143
144static struct pci_driver ci13xxx_pci_driver = {
145 .name = UDC_DRIVER_NAME,
146 .id_table = ci13xxx_pci_id_table,
147 .probe = ci13xxx_pci_probe,
148 .remove = __devexit_p(ci13xxx_pci_remove),
149};
150
151/**
152 * ci13xxx_pci_init: module init
153 *
154 * Driver load
155 */
156static int __init ci13xxx_pci_init(void)
157{
158 return pci_register_driver(&ci13xxx_pci_driver);
159}
160module_init(ci13xxx_pci_init);
161
162/**
163 * ci13xxx_pci_exit: module exit
164 *
165 * Driver unload
166 */
167static void __exit ci13xxx_pci_exit(void)
168{
169 pci_unregister_driver(&ci13xxx_pci_driver);
170}
171module_exit(ci13xxx_pci_exit);
172
173MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
174MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
175MODULE_LICENSE("GPL");
176MODULE_VERSION("June 2008");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 98b36fc88c77..31656a2b4ab4 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -22,7 +22,6 @@
22 * - ENDPT: endpoint operations (Gadget API) 22 * - ENDPT: endpoint operations (Gadget API)
23 * - GADGET: gadget operations (Gadget API) 23 * - GADGET: gadget operations (Gadget API)
24 * - BUS: bus glue code, bus abstraction layer 24 * - BUS: bus glue code, bus abstraction layer
25 * - PCI: PCI core interface and PCI resources (interrupts, memory...)
26 * 25 *
27 * Compile Options 26 * Compile Options
28 * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities 27 * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
@@ -60,11 +59,11 @@
60#include <linux/io.h> 59#include <linux/io.h>
61#include <linux/irq.h> 60#include <linux/irq.h>
62#include <linux/kernel.h> 61#include <linux/kernel.h>
63#include <linux/module.h>
64#include <linux/pci.h>
65#include <linux/slab.h> 62#include <linux/slab.h>
63#include <linux/pm_runtime.h>
66#include <linux/usb/ch9.h> 64#include <linux/usb/ch9.h>
67#include <linux/usb/gadget.h> 65#include <linux/usb/gadget.h>
66#include <linux/usb/otg.h>
68 67
69#include "ci13xxx_udc.h" 68#include "ci13xxx_udc.h"
70 69
@@ -75,9 +74,6 @@
75/* ctrl register bank access */ 74/* ctrl register bank access */
76static DEFINE_SPINLOCK(udc_lock); 75static DEFINE_SPINLOCK(udc_lock);
77 76
78/* driver name */
79#define UDC_DRIVER_NAME "ci13xxx_udc"
80
81/* control endpoint description */ 77/* control endpoint description */
82static const struct usb_endpoint_descriptor 78static const struct usb_endpoint_descriptor
83ctrl_endpt_desc = { 79ctrl_endpt_desc = {
@@ -132,6 +128,9 @@ static struct {
132 size_t size; /* bank size */ 128 size_t size; /* bank size */
133} hw_bank; 129} hw_bank;
134 130
131/* MSM specific */
132#define ABS_AHBBURST (0x0090UL)
133#define ABS_AHBMODE (0x0098UL)
135/* UDC register map */ 134/* UDC register map */
136#define ABS_CAPLENGTH (0x100UL) 135#define ABS_CAPLENGTH (0x100UL)
137#define ABS_HCCPARAMS (0x108UL) 136#define ABS_HCCPARAMS (0x108UL)
@@ -248,13 +247,7 @@ static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
248 return (reg & mask) >> ffs_nr(mask); 247 return (reg & mask) >> ffs_nr(mask);
249} 248}
250 249
251/** 250static int hw_device_init(void __iomem *base)
252 * hw_device_reset: resets chip (execute without interruption)
253 * @base: register base address
254 *
255 * This function returns an error code
256 */
257static int hw_device_reset(void __iomem *base)
258{ 251{
259 u32 reg; 252 u32 reg;
260 253
@@ -271,6 +264,28 @@ static int hw_device_reset(void __iomem *base)
271 hw_bank.size += CAP_LAST; 264 hw_bank.size += CAP_LAST;
272 hw_bank.size /= sizeof(u32); 265 hw_bank.size /= sizeof(u32);
273 266
267 reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
268 if (reg == 0 || reg > ENDPT_MAX)
269 return -ENODEV;
270
271 hw_ep_max = reg; /* cache hw ENDPT_MAX */
272
273 /* setup lock mode ? */
274
275 /* ENDPTSETUPSTAT is '0' by default */
276
277 /* HCSPARAMS.bf.ppc SHOULD BE zero for device */
278
279 return 0;
280}
281/**
282 * hw_device_reset: resets chip (execute without interruption)
283 * @base: register base address
284 *
285 * This function returns an error code
286 */
287static int hw_device_reset(struct ci13xxx *udc)
288{
274 /* should flush & stop before reset */ 289 /* should flush & stop before reset */
275 hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); 290 hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
276 hw_cwrite(CAP_USBCMD, USBCMD_RS, 0); 291 hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
@@ -279,6 +294,14 @@ static int hw_device_reset(void __iomem *base)
279 while (hw_cread(CAP_USBCMD, USBCMD_RST)) 294 while (hw_cread(CAP_USBCMD, USBCMD_RST))
280 udelay(10); /* not RTOS friendly */ 295 udelay(10); /* not RTOS friendly */
281 296
297
298 if (udc->udc_driver->notify_event)
299 udc->udc_driver->notify_event(udc,
300 CI13XXX_CONTROLLER_RESET_EVENT);
301
302 if (udc->udc_driver->flags && CI13XXX_DISABLE_STREAMING)
303 hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
304
282 /* USBMODE should be configured step by step */ 305 /* USBMODE should be configured step by step */
283 hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE); 306 hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
284 hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE); 307 hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
@@ -290,18 +313,6 @@ static int hw_device_reset(void __iomem *base)
290 return -ENODEV; 313 return -ENODEV;
291 } 314 }
292 315
293 reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
294 if (reg == 0 || reg > ENDPT_MAX)
295 return -ENODEV;
296
297 hw_ep_max = reg; /* cache hw ENDPT_MAX */
298
299 /* setup lock mode ? */
300
301 /* ENDPTSETUPSTAT is '0' by default */
302
303 /* HCSPARAMS.bf.ppc SHOULD BE zero for device */
304
305 return 0; 316 return 0;
306} 317}
307 318
@@ -1449,7 +1460,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1449 mReq->ptr->page[0] = mReq->req.dma; 1460 mReq->ptr->page[0] = mReq->req.dma;
1450 for (i = 1; i < 5; i++) 1461 for (i = 1; i < 5; i++)
1451 mReq->ptr->page[i] = 1462 mReq->ptr->page[i] =
1452 (mReq->req.dma + i * PAGE_SIZE) & ~TD_RESERVED_MASK; 1463 (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
1453 1464
1454 /* 1465 /*
1455 * QH configuration 1466 * QH configuration
@@ -1540,7 +1551,7 @@ __acquires(mEp->lock)
1540 list_del_init(&mReq->queue); 1551 list_del_init(&mReq->queue);
1541 mReq->req.status = -ESHUTDOWN; 1552 mReq->req.status = -ESHUTDOWN;
1542 1553
1543 if (!mReq->req.no_interrupt && mReq->req.complete != NULL) { 1554 if (mReq->req.complete != NULL) {
1544 spin_unlock(mEp->lock); 1555 spin_unlock(mEp->lock);
1545 mReq->req.complete(&mEp->ep, &mReq->req); 1556 mReq->req.complete(&mEp->ep, &mReq->req);
1546 spin_lock(mEp->lock); 1557 spin_lock(mEp->lock);
@@ -1557,8 +1568,6 @@ __acquires(mEp->lock)
1557 * Caller must hold lock 1568 * Caller must hold lock
1558 */ 1569 */
1559static int _gadget_stop_activity(struct usb_gadget *gadget) 1570static int _gadget_stop_activity(struct usb_gadget *gadget)
1560__releases(udc->lock)
1561__acquires(udc->lock)
1562{ 1571{
1563 struct usb_ep *ep; 1572 struct usb_ep *ep;
1564 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget); 1573 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
@@ -1570,8 +1579,6 @@ __acquires(udc->lock)
1570 if (gadget == NULL) 1579 if (gadget == NULL)
1571 return -EINVAL; 1580 return -EINVAL;
1572 1581
1573 spin_unlock(udc->lock);
1574
1575 /* flush all endpoints */ 1582 /* flush all endpoints */
1576 gadget_for_each_ep(ep, gadget) { 1583 gadget_for_each_ep(ep, gadget) {
1577 usb_ep_fifo_flush(ep); 1584 usb_ep_fifo_flush(ep);
@@ -1591,8 +1598,6 @@ __acquires(udc->lock)
1591 mEp->status = NULL; 1598 mEp->status = NULL;
1592 } 1599 }
1593 1600
1594 spin_lock(udc->lock);
1595
1596 return 0; 1601 return 0;
1597} 1602}
1598 1603
@@ -1621,6 +1626,7 @@ __acquires(udc->lock)
1621 1626
1622 dbg_event(0xFF, "BUS RST", 0); 1627 dbg_event(0xFF, "BUS RST", 0);
1623 1628
1629 spin_unlock(udc->lock);
1624 retval = _gadget_stop_activity(&udc->gadget); 1630 retval = _gadget_stop_activity(&udc->gadget);
1625 if (retval) 1631 if (retval)
1626 goto done; 1632 goto done;
@@ -1629,10 +1635,9 @@ __acquires(udc->lock)
1629 if (retval) 1635 if (retval)
1630 goto done; 1636 goto done;
1631 1637
1632 spin_unlock(udc->lock);
1633 retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc); 1638 retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
1634 if (!retval) { 1639 if (!retval) {
1635 mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_KERNEL); 1640 mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC);
1636 if (mEp->status == NULL) { 1641 if (mEp->status == NULL) {
1637 usb_ep_disable(&mEp->ep); 1642 usb_ep_disable(&mEp->ep);
1638 retval = -ENOMEM; 1643 retval = -ENOMEM;
@@ -1789,18 +1794,20 @@ __acquires(mEp->lock)
1789 1794
1790 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval); 1795 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
1791 1796
1792 if (!mReq->req.no_interrupt && mReq->req.complete != NULL) { 1797 if (!list_empty(&mEp->qh[mEp->dir].queue)) {
1798 struct ci13xxx_req* mReqEnq;
1799
1800 mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next,
1801 struct ci13xxx_req, queue);
1802 _hardware_enqueue(mEp, mReqEnq);
1803 }
1804
1805 if (mReq->req.complete != NULL) {
1793 spin_unlock(mEp->lock); 1806 spin_unlock(mEp->lock);
1794 mReq->req.complete(&mEp->ep, &mReq->req); 1807 mReq->req.complete(&mEp->ep, &mReq->req);
1795 spin_lock(mEp->lock); 1808 spin_lock(mEp->lock);
1796 } 1809 }
1797 1810
1798 if (!list_empty(&mEp->qh[mEp->dir].queue)) {
1799 mReq = list_entry(mEp->qh[mEp->dir].queue.next,
1800 struct ci13xxx_req, queue);
1801 _hardware_enqueue(mEp, mReq);
1802 }
1803
1804 done: 1811 done:
1805 return retval; 1812 return retval;
1806} 1813}
@@ -2061,7 +2068,6 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
2061{ 2068{
2062 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 2069 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2063 struct ci13xxx_req *mReq = NULL; 2070 struct ci13xxx_req *mReq = NULL;
2064 unsigned long flags;
2065 2071
2066 trace("%p, %i", ep, gfp_flags); 2072 trace("%p, %i", ep, gfp_flags);
2067 2073
@@ -2070,8 +2076,6 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
2070 return NULL; 2076 return NULL;
2071 } 2077 }
2072 2078
2073 spin_lock_irqsave(mEp->lock, flags);
2074
2075 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); 2079 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
2076 if (mReq != NULL) { 2080 if (mReq != NULL) {
2077 INIT_LIST_HEAD(&mReq->queue); 2081 INIT_LIST_HEAD(&mReq->queue);
@@ -2086,8 +2090,6 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
2086 2090
2087 dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL); 2091 dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
2088 2092
2089 spin_unlock_irqrestore(mEp->lock, flags);
2090
2091 return (mReq == NULL) ? NULL : &mReq->req; 2093 return (mReq == NULL) ? NULL : &mReq->req;
2092} 2094}
2093 2095
@@ -2157,8 +2159,8 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
2157 goto done; 2159 goto done;
2158 } 2160 }
2159 2161
2160 if (req->length > (4 * PAGE_SIZE)) { 2162 if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
2161 req->length = (4 * PAGE_SIZE); 2163 req->length = (4 * CI13XXX_PAGE_SIZE);
2162 retval = -EMSGSIZE; 2164 retval = -EMSGSIZE;
2163 warn("request length truncated"); 2165 warn("request length truncated");
2164 } 2166 }
@@ -2170,8 +2172,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
2170 mReq->req.actual = 0; 2172 mReq->req.actual = 0;
2171 list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue); 2173 list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
2172 2174
2173 retval = _hardware_enqueue(mEp, mReq); 2175 if (list_is_singular(&mEp->qh[mEp->dir].queue))
2174 if (retval == -EALREADY || retval == -EBUSY) { 2176 retval = _hardware_enqueue(mEp, mReq);
2177
2178 if (retval == -EALREADY) {
2175 dbg_event(_usb_addr(mEp), "QUEUE", retval); 2179 dbg_event(_usb_addr(mEp), "QUEUE", retval);
2176 retval = 0; 2180 retval = 0;
2177 } 2181 }
@@ -2209,7 +2213,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2209 list_del_init(&mReq->queue); 2213 list_del_init(&mReq->queue);
2210 req->status = -ECONNRESET; 2214 req->status = -ECONNRESET;
2211 2215
2212 if (!mReq->req.no_interrupt && mReq->req.complete != NULL) { 2216 if (mReq->req.complete != NULL) {
2213 spin_unlock(mEp->lock); 2217 spin_unlock(mEp->lock);
2214 mReq->req.complete(&mEp->ep, &mReq->req); 2218 mReq->req.complete(&mEp->ep, &mReq->req);
2215 spin_lock(mEp->lock); 2219 spin_lock(mEp->lock);
@@ -2332,12 +2336,47 @@ static const struct usb_ep_ops usb_ep_ops = {
2332/****************************************************************************** 2336/******************************************************************************
2333 * GADGET block 2337 * GADGET block
2334 *****************************************************************************/ 2338 *****************************************************************************/
2339static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
2340{
2341 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
2342 unsigned long flags;
2343 int gadget_ready = 0;
2344
2345 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
2346 return -EOPNOTSUPP;
2347
2348 spin_lock_irqsave(udc->lock, flags);
2349 udc->vbus_active = is_active;
2350 if (udc->driver)
2351 gadget_ready = 1;
2352 spin_unlock_irqrestore(udc->lock, flags);
2353
2354 if (gadget_ready) {
2355 if (is_active) {
2356 pm_runtime_get_sync(&_gadget->dev);
2357 hw_device_reset(udc);
2358 hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
2359 } else {
2360 hw_device_state(0);
2361 if (udc->udc_driver->notify_event)
2362 udc->udc_driver->notify_event(udc,
2363 CI13XXX_CONTROLLER_STOPPED_EVENT);
2364 _gadget_stop_activity(&udc->gadget);
2365 pm_runtime_put_sync(&_gadget->dev);
2366 }
2367 }
2368
2369 return 0;
2370}
2371
2335/** 2372/**
2336 * Device operations part of the API to the USB controller hardware, 2373 * Device operations part of the API to the USB controller hardware,
2337 * which don't involve endpoints (or i/o) 2374 * which don't involve endpoints (or i/o)
2338 * Check "usb_gadget.h" for details 2375 * Check "usb_gadget.h" for details
2339 */ 2376 */
2340static const struct usb_gadget_ops usb_gadget_ops; 2377static const struct usb_gadget_ops usb_gadget_ops = {
2378 .vbus_session = ci13xxx_vbus_session,
2379};
2341 2380
2342/** 2381/**
2343 * usb_gadget_probe_driver: register a gadget driver 2382 * usb_gadget_probe_driver: register a gadget driver
@@ -2358,7 +2397,6 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2358 2397
2359 if (driver == NULL || 2398 if (driver == NULL ||
2360 bind == NULL || 2399 bind == NULL ||
2361 driver->unbind == NULL ||
2362 driver->setup == NULL || 2400 driver->setup == NULL ||
2363 driver->disconnect == NULL || 2401 driver->disconnect == NULL ||
2364 driver->suspend == NULL || 2402 driver->suspend == NULL ||
@@ -2372,13 +2410,13 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2372 /* alloc resources */ 2410 /* alloc resources */
2373 udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev, 2411 udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
2374 sizeof(struct ci13xxx_qh), 2412 sizeof(struct ci13xxx_qh),
2375 64, PAGE_SIZE); 2413 64, CI13XXX_PAGE_SIZE);
2376 if (udc->qh_pool == NULL) 2414 if (udc->qh_pool == NULL)
2377 return -ENOMEM; 2415 return -ENOMEM;
2378 2416
2379 udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev, 2417 udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
2380 sizeof(struct ci13xxx_td), 2418 sizeof(struct ci13xxx_td),
2381 64, PAGE_SIZE); 2419 64, CI13XXX_PAGE_SIZE);
2382 if (udc->td_pool == NULL) { 2420 if (udc->td_pool == NULL) {
2383 dma_pool_destroy(udc->qh_pool); 2421 dma_pool_destroy(udc->qh_pool);
2384 udc->qh_pool = NULL; 2422 udc->qh_pool = NULL;
@@ -2390,7 +2428,6 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2390 info("hw_ep_max = %d", hw_ep_max); 2428 info("hw_ep_max = %d", hw_ep_max);
2391 2429
2392 udc->driver = driver; 2430 udc->driver = driver;
2393 udc->gadget.ops = NULL;
2394 udc->gadget.dev.driver = NULL; 2431 udc->gadget.dev.driver = NULL;
2395 2432
2396 retval = 0; 2433 retval = 0;
@@ -2410,9 +2447,11 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2410 /* this allocation cannot be random */ 2447 /* this allocation cannot be random */
2411 for (k = RX; k <= TX; k++) { 2448 for (k = RX; k <= TX; k++) {
2412 INIT_LIST_HEAD(&mEp->qh[k].queue); 2449 INIT_LIST_HEAD(&mEp->qh[k].queue);
2450 spin_unlock_irqrestore(udc->lock, flags);
2413 mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool, 2451 mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
2414 GFP_KERNEL, 2452 GFP_KERNEL,
2415 &mEp->qh[k].dma); 2453 &mEp->qh[k].dma);
2454 spin_lock_irqsave(udc->lock, flags);
2416 if (mEp->qh[k].ptr == NULL) 2455 if (mEp->qh[k].ptr == NULL)
2417 retval = -ENOMEM; 2456 retval = -ENOMEM;
2418 else 2457 else
@@ -2429,7 +2468,6 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2429 2468
2430 /* bind gadget */ 2469 /* bind gadget */
2431 driver->driver.bus = NULL; 2470 driver->driver.bus = NULL;
2432 udc->gadget.ops = &usb_gadget_ops;
2433 udc->gadget.dev.driver = &driver->driver; 2471 udc->gadget.dev.driver = &driver->driver;
2434 2472
2435 spin_unlock_irqrestore(udc->lock, flags); 2473 spin_unlock_irqrestore(udc->lock, flags);
@@ -2437,12 +2475,24 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2437 spin_lock_irqsave(udc->lock, flags); 2475 spin_lock_irqsave(udc->lock, flags);
2438 2476
2439 if (retval) { 2477 if (retval) {
2440 udc->gadget.ops = NULL;
2441 udc->gadget.dev.driver = NULL; 2478 udc->gadget.dev.driver = NULL;
2442 goto done; 2479 goto done;
2443 } 2480 }
2444 2481
2482 pm_runtime_get_sync(&udc->gadget.dev);
2483 if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
2484 if (udc->vbus_active) {
2485 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
2486 hw_device_reset(udc);
2487 } else {
2488 pm_runtime_put_sync(&udc->gadget.dev);
2489 goto done;
2490 }
2491 }
2492
2445 retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma); 2493 retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
2494 if (retval)
2495 pm_runtime_put_sync(&udc->gadget.dev);
2446 2496
2447 done: 2497 done:
2448 spin_unlock_irqrestore(udc->lock, flags); 2498 spin_unlock_irqrestore(udc->lock, flags);
@@ -2475,19 +2525,22 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2475 2525
2476 spin_lock_irqsave(udc->lock, flags); 2526 spin_lock_irqsave(udc->lock, flags);
2477 2527
2478 hw_device_state(0); 2528 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
2479 2529 udc->vbus_active) {
2480 /* unbind gadget */ 2530 hw_device_state(0);
2481 if (udc->gadget.ops != NULL) { 2531 if (udc->udc_driver->notify_event)
2532 udc->udc_driver->notify_event(udc,
2533 CI13XXX_CONTROLLER_STOPPED_EVENT);
2482 _gadget_stop_activity(&udc->gadget); 2534 _gadget_stop_activity(&udc->gadget);
2535 pm_runtime_put(&udc->gadget.dev);
2536 }
2483 2537
2484 spin_unlock_irqrestore(udc->lock, flags); 2538 /* unbind gadget */
2485 driver->unbind(&udc->gadget); /* MAY SLEEP */ 2539 spin_unlock_irqrestore(udc->lock, flags);
2486 spin_lock_irqsave(udc->lock, flags); 2540 driver->unbind(&udc->gadget); /* MAY SLEEP */
2541 spin_lock_irqsave(udc->lock, flags);
2487 2542
2488 udc->gadget.ops = NULL; 2543 udc->gadget.dev.driver = NULL;
2489 udc->gadget.dev.driver = NULL;
2490 }
2491 2544
2492 /* free resources */ 2545 /* free resources */
2493 for (i = 0; i < hw_ep_max; i++) { 2546 for (i = 0; i < hw_ep_max; i++) {
@@ -2544,6 +2597,14 @@ static irqreturn_t udc_irq(void)
2544 } 2597 }
2545 2598
2546 spin_lock(udc->lock); 2599 spin_lock(udc->lock);
2600
2601 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
2602 if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
2603 USBMODE_CM_DEVICE) {
2604 spin_unlock(udc->lock);
2605 return IRQ_NONE;
2606 }
2607 }
2547 intr = hw_test_and_clear_intr_active(); 2608 intr = hw_test_and_clear_intr_active();
2548 if (intr) { 2609 if (intr) {
2549 isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr; 2610 isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
@@ -2602,14 +2663,16 @@ static void udc_release(struct device *dev)
2602 * No interrupts active, the IRQ has not been requested yet 2663 * No interrupts active, the IRQ has not been requested yet
2603 * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask 2664 * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
2604 */ 2665 */
2605static int udc_probe(struct device *dev, void __iomem *regs, const char *name) 2666static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
2667 void __iomem *regs)
2606{ 2668{
2607 struct ci13xxx *udc; 2669 struct ci13xxx *udc;
2608 int retval = 0; 2670 int retval = 0;
2609 2671
2610 trace("%p, %p, %p", dev, regs, name); 2672 trace("%p, %p, %p", dev, regs, name);
2611 2673
2612 if (dev == NULL || regs == NULL || name == NULL) 2674 if (dev == NULL || regs == NULL || driver == NULL ||
2675 driver->name == NULL)
2613 return -EINVAL; 2676 return -EINVAL;
2614 2677
2615 udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL); 2678 udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
@@ -2617,42 +2680,77 @@ static int udc_probe(struct device *dev, void __iomem *regs, const char *name)
2617 return -ENOMEM; 2680 return -ENOMEM;
2618 2681
2619 udc->lock = &udc_lock; 2682 udc->lock = &udc_lock;
2683 udc->regs = regs;
2684 udc->udc_driver = driver;
2620 2685
2621 retval = hw_device_reset(regs); 2686 udc->gadget.ops = &usb_gadget_ops;
2622 if (retval)
2623 goto done;
2624
2625 udc->gadget.ops = NULL;
2626 udc->gadget.speed = USB_SPEED_UNKNOWN; 2687 udc->gadget.speed = USB_SPEED_UNKNOWN;
2627 udc->gadget.is_dualspeed = 1; 2688 udc->gadget.is_dualspeed = 1;
2628 udc->gadget.is_otg = 0; 2689 udc->gadget.is_otg = 0;
2629 udc->gadget.name = name; 2690 udc->gadget.name = driver->name;
2630 2691
2631 INIT_LIST_HEAD(&udc->gadget.ep_list); 2692 INIT_LIST_HEAD(&udc->gadget.ep_list);
2632 udc->gadget.ep0 = NULL; 2693 udc->gadget.ep0 = NULL;
2633 2694
2634 dev_set_name(&udc->gadget.dev, "gadget"); 2695 dev_set_name(&udc->gadget.dev, "gadget");
2635 udc->gadget.dev.dma_mask = dev->dma_mask; 2696 udc->gadget.dev.dma_mask = dev->dma_mask;
2697 udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
2636 udc->gadget.dev.parent = dev; 2698 udc->gadget.dev.parent = dev;
2637 udc->gadget.dev.release = udc_release; 2699 udc->gadget.dev.release = udc_release;
2638 2700
2701 retval = hw_device_init(regs);
2702 if (retval < 0)
2703 goto free_udc;
2704
2705 udc->transceiver = otg_get_transceiver();
2706
2707 if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
2708 if (udc->transceiver == NULL) {
2709 retval = -ENODEV;
2710 goto free_udc;
2711 }
2712 }
2713
2714 if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
2715 retval = hw_device_reset(udc);
2716 if (retval)
2717 goto put_transceiver;
2718 }
2719
2639 retval = device_register(&udc->gadget.dev); 2720 retval = device_register(&udc->gadget.dev);
2640 if (retval) 2721 if (retval) {
2641 goto done; 2722 put_device(&udc->gadget.dev);
2723 goto put_transceiver;
2724 }
2642 2725
2643#ifdef CONFIG_USB_GADGET_DEBUG_FILES 2726#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2644 retval = dbg_create_files(&udc->gadget.dev); 2727 retval = dbg_create_files(&udc->gadget.dev);
2645#endif 2728#endif
2646 if (retval) { 2729 if (retval)
2647 device_unregister(&udc->gadget.dev); 2730 goto unreg_device;
2648 goto done; 2731
2732 if (udc->transceiver) {
2733 retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
2734 if (retval)
2735 goto remove_dbg;
2649 } 2736 }
2737 pm_runtime_no_callbacks(&udc->gadget.dev);
2738 pm_runtime_enable(&udc->gadget.dev);
2650 2739
2651 _udc = udc; 2740 _udc = udc;
2652 return retval; 2741 return retval;
2653 2742
2654 done:
2655 err("error = %i", retval); 2743 err("error = %i", retval);
2744remove_dbg:
2745#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2746 dbg_remove_files(&udc->gadget.dev);
2747#endif
2748unreg_device:
2749 device_unregister(&udc->gadget.dev);
2750put_transceiver:
2751 if (udc->transceiver)
2752 otg_put_transceiver(udc->transceiver);
2753free_udc:
2656 kfree(udc); 2754 kfree(udc);
2657 _udc = NULL; 2755 _udc = NULL;
2658 return retval; 2756 return retval;
@@ -2672,6 +2770,10 @@ static void udc_remove(void)
2672 return; 2770 return;
2673 } 2771 }
2674 2772
2773 if (udc->transceiver) {
2774 otg_set_peripheral(udc->transceiver, &udc->gadget);
2775 otg_put_transceiver(udc->transceiver);
2776 }
2675#ifdef CONFIG_USB_GADGET_DEBUG_FILES 2777#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2676 dbg_remove_files(&udc->gadget.dev); 2778 dbg_remove_files(&udc->gadget.dev);
2677#endif 2779#endif
@@ -2680,156 +2782,3 @@ static void udc_remove(void)
2680 kfree(udc); 2782 kfree(udc);
2681 _udc = NULL; 2783 _udc = NULL;
2682} 2784}
2683
2684/******************************************************************************
2685 * PCI block
2686 *****************************************************************************/
2687/**
2688 * ci13xxx_pci_irq: interrut handler
2689 * @irq: irq number
2690 * @pdev: USB Device Controller interrupt source
2691 *
2692 * This function returns IRQ_HANDLED if the IRQ has been handled
2693 * This is an ISR don't trace, use attribute interface instead
2694 */
2695static irqreturn_t ci13xxx_pci_irq(int irq, void *pdev)
2696{
2697 if (irq == 0) {
2698 dev_err(&((struct pci_dev *)pdev)->dev, "Invalid IRQ0 usage!");
2699 return IRQ_HANDLED;
2700 }
2701 return udc_irq();
2702}
2703
2704/**
2705 * ci13xxx_pci_probe: PCI probe
2706 * @pdev: USB device controller being probed
2707 * @id: PCI hotplug ID connecting controller to UDC framework
2708 *
2709 * This function returns an error code
2710 * Allocates basic PCI resources for this USB device controller, and then
2711 * invokes the udc_probe() method to start the UDC associated with it
2712 */
2713static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
2714 const struct pci_device_id *id)
2715{
2716 void __iomem *regs = NULL;
2717 int retval = 0;
2718
2719 if (id == NULL)
2720 return -EINVAL;
2721
2722 retval = pci_enable_device(pdev);
2723 if (retval)
2724 goto done;
2725
2726 if (!pdev->irq) {
2727 dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
2728 retval = -ENODEV;
2729 goto disable_device;
2730 }
2731
2732 retval = pci_request_regions(pdev, UDC_DRIVER_NAME);
2733 if (retval)
2734 goto disable_device;
2735
2736 /* BAR 0 holds all the registers */
2737 regs = pci_iomap(pdev, 0, 0);
2738 if (!regs) {
2739 dev_err(&pdev->dev, "Error mapping memory!");
2740 retval = -EFAULT;
2741 goto release_regions;
2742 }
2743 pci_set_drvdata(pdev, (__force void *)regs);
2744
2745 pci_set_master(pdev);
2746 pci_try_set_mwi(pdev);
2747
2748 retval = udc_probe(&pdev->dev, regs, UDC_DRIVER_NAME);
2749 if (retval)
2750 goto iounmap;
2751
2752 /* our device does not have MSI capability */
2753
2754 retval = request_irq(pdev->irq, ci13xxx_pci_irq, IRQF_SHARED,
2755 UDC_DRIVER_NAME, pdev);
2756 if (retval)
2757 goto gadget_remove;
2758
2759 return 0;
2760
2761 gadget_remove:
2762 udc_remove();
2763 iounmap:
2764 pci_iounmap(pdev, regs);
2765 release_regions:
2766 pci_release_regions(pdev);
2767 disable_device:
2768 pci_disable_device(pdev);
2769 done:
2770 return retval;
2771}
2772
2773/**
2774 * ci13xxx_pci_remove: PCI remove
2775 * @pdev: USB Device Controller being removed
2776 *
2777 * Reverses the effect of ci13xxx_pci_probe(),
2778 * first invoking the udc_remove() and then releases
2779 * all PCI resources allocated for this USB device controller
2780 */
2781static void __devexit ci13xxx_pci_remove(struct pci_dev *pdev)
2782{
2783 free_irq(pdev->irq, pdev);
2784 udc_remove();
2785 pci_iounmap(pdev, (__force void __iomem *)pci_get_drvdata(pdev));
2786 pci_release_regions(pdev);
2787 pci_disable_device(pdev);
2788}
2789
2790/**
2791 * PCI device table
2792 * PCI device structure
2793 *
2794 * Check "pci.h" for details
2795 */
2796static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
2797 { PCI_DEVICE(0x153F, 0x1004) },
2798 { PCI_DEVICE(0x153F, 0x1006) },
2799 { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
2800};
2801MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
2802
2803static struct pci_driver ci13xxx_pci_driver = {
2804 .name = UDC_DRIVER_NAME,
2805 .id_table = ci13xxx_pci_id_table,
2806 .probe = ci13xxx_pci_probe,
2807 .remove = __devexit_p(ci13xxx_pci_remove),
2808};
2809
2810/**
2811 * ci13xxx_pci_init: module init
2812 *
2813 * Driver load
2814 */
2815static int __init ci13xxx_pci_init(void)
2816{
2817 return pci_register_driver(&ci13xxx_pci_driver);
2818}
2819module_init(ci13xxx_pci_init);
2820
2821/**
2822 * ci13xxx_pci_exit: module exit
2823 *
2824 * Driver unload
2825 */
2826static void __exit ci13xxx_pci_exit(void)
2827{
2828 pci_unregister_driver(&ci13xxx_pci_driver);
2829}
2830module_exit(ci13xxx_pci_exit);
2831
2832MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
2833MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
2834MODULE_LICENSE("GPL");
2835MODULE_VERSION("June 2008");
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 4026e9cede34..f61fed07f76b 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -19,6 +19,7 @@
19/****************************************************************************** 19/******************************************************************************
20 * DEFINE 20 * DEFINE
21 *****************************************************************************/ 21 *****************************************************************************/
22#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */
22#define ENDPT_MAX (16) 23#define ENDPT_MAX (16)
23#define CTRL_PAYLOAD_MAX (64) 24#define CTRL_PAYLOAD_MAX (64)
24#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */ 25#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */
@@ -97,9 +98,24 @@ struct ci13xxx_ep {
97 struct dma_pool *td_pool; 98 struct dma_pool *td_pool;
98}; 99};
99 100
101struct ci13xxx;
102struct ci13xxx_udc_driver {
103 const char *name;
104 unsigned long flags;
105#define CI13XXX_REGS_SHARED BIT(0)
106#define CI13XXX_REQUIRE_TRANSCEIVER BIT(1)
107#define CI13XXX_PULLUP_ON_VBUS BIT(2)
108#define CI13XXX_DISABLE_STREAMING BIT(3)
109
110#define CI13XXX_CONTROLLER_RESET_EVENT 0
111#define CI13XXX_CONTROLLER_STOPPED_EVENT 1
112 void (*notify_event) (struct ci13xxx *udc, unsigned event);
113};
114
100/* CI13XXX UDC descriptor & global resources */ 115/* CI13XXX UDC descriptor & global resources */
101struct ci13xxx { 116struct ci13xxx {
102 spinlock_t *lock; /* ctrl register bank access */ 117 spinlock_t *lock; /* ctrl register bank access */
118 void __iomem *regs; /* registers address space */
103 119
104 struct dma_pool *qh_pool; /* DMA pool for queue heads */ 120 struct dma_pool *qh_pool; /* DMA pool for queue heads */
105 struct dma_pool *td_pool; /* DMA pool for transfer descs */ 121 struct dma_pool *td_pool; /* DMA pool for transfer descs */
@@ -108,6 +124,9 @@ struct ci13xxx {
108 struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */ 124 struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
109 125
110 struct usb_gadget_driver *driver; /* 3rd party gadget driver */ 126 struct usb_gadget_driver *driver; /* 3rd party gadget driver */
127 struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
128 int vbus_active; /* is VBUS active */
129 struct otg_transceiver *transceiver; /* Transceiver struct */
111}; 130};
112 131
113/****************************************************************************** 132/******************************************************************************
@@ -157,6 +176,7 @@ struct ci13xxx {
157#define USBMODE_CM_DEVICE (0x02UL << 0) 176#define USBMODE_CM_DEVICE (0x02UL << 0)
158#define USBMODE_CM_HOST (0x03UL << 0) 177#define USBMODE_CM_HOST (0x03UL << 0)
159#define USBMODE_SLOM BIT(3) 178#define USBMODE_SLOM BIT(3)
179#define USBMODE_SDIS BIT(4)
160 180
161/* ENDPTCTRL */ 181/* ENDPTCTRL */
162#define ENDPTCTRL_RXS BIT(0) 182#define ENDPTCTRL_RXS BIT(0)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 8572dad5ecbb..f6ff8456d52d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1126,7 +1126,7 @@ static int composite_bind(struct usb_gadget *gadget)
1126 if (bcdDevice) 1126 if (bcdDevice)
1127 cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); 1127 cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
1128 1128
1129 /* stirng overrides */ 1129 /* string overrides */
1130 if (iManufacturer || !cdev->desc.iManufacturer) { 1130 if (iManufacturer || !cdev->desc.iManufacturer) {
1131 if (!iManufacturer && !composite->iManufacturer && 1131 if (!iManufacturer && !composite->iManufacturer &&
1132 !*composite_manufacturer) 1132 !*composite_manufacturer)
@@ -1188,6 +1188,8 @@ composite_suspend(struct usb_gadget *gadget)
1188 composite->suspend(cdev); 1188 composite->suspend(cdev);
1189 1189
1190 cdev->suspended = 1; 1190 cdev->suspended = 1;
1191
1192 usb_gadget_vbus_draw(gadget, 2);
1191} 1193}
1192 1194
1193static void 1195static void
@@ -1195,6 +1197,7 @@ composite_resume(struct usb_gadget *gadget)
1195{ 1197{
1196 struct usb_composite_dev *cdev = get_gadget_data(gadget); 1198 struct usb_composite_dev *cdev = get_gadget_data(gadget);
1197 struct usb_function *f; 1199 struct usb_function *f;
1200 u8 maxpower;
1198 1201
1199 /* REVISIT: should we have config level 1202 /* REVISIT: should we have config level
1200 * suspend/resume callbacks? 1203 * suspend/resume callbacks?
@@ -1207,6 +1210,11 @@ composite_resume(struct usb_gadget *gadget)
1207 if (f->resume) 1210 if (f->resume)
1208 f->resume(f); 1211 f->resume(f);
1209 } 1212 }
1213
1214 maxpower = cdev->config->bMaxPower;
1215
1216 usb_gadget_vbus_draw(gadget, maxpower ?
1217 (2 * maxpower) : CONFIG_USB_GADGET_VBUS_DRAW);
1210 } 1218 }
1211 1219
1212 cdev->suspended = 0; 1220 cdev->suspended = 0;
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 1d2a2abbfa80..13b9f47feecd 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -1197,6 +1197,139 @@ static struct dummy_ep *find_endpoint (struct dummy *dum, u8 address)
1197#define Ep_Request (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT) 1197#define Ep_Request (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
1198#define Ep_InRequest (Ep_Request | USB_DIR_IN) 1198#define Ep_InRequest (Ep_Request | USB_DIR_IN)
1199 1199
1200
1201/**
1202 * handle_control_request() - handles all control transfers
1203 * @dum: pointer to dummy (the_controller)
1204 * @urb: the urb request to handle
1205 * @setup: pointer to the setup data for a USB device control
1206 * request
1207 * @status: pointer to request handling status
1208 *
1209 * Return 0 - if the request was handled
1210 * 1 - if the request wasn't handles
1211 * error code on error
1212 */
1213static int handle_control_request(struct dummy *dum, struct urb *urb,
1214 struct usb_ctrlrequest *setup,
1215 int *status)
1216{
1217 struct dummy_ep *ep2;
1218 int ret_val = 1;
1219 unsigned w_index;
1220 unsigned w_value;
1221
1222 w_index = le16_to_cpu(setup->wIndex);
1223 w_value = le16_to_cpu(setup->wValue);
1224 switch (setup->bRequest) {
1225 case USB_REQ_SET_ADDRESS:
1226 if (setup->bRequestType != Dev_Request)
1227 break;
1228 dum->address = w_value;
1229 *status = 0;
1230 dev_dbg(udc_dev(dum), "set_address = %d\n",
1231 w_value);
1232 ret_val = 0;
1233 break;
1234 case USB_REQ_SET_FEATURE:
1235 if (setup->bRequestType == Dev_Request) {
1236 ret_val = 0;
1237 switch (w_value) {
1238 case USB_DEVICE_REMOTE_WAKEUP:
1239 break;
1240 case USB_DEVICE_B_HNP_ENABLE:
1241 dum->gadget.b_hnp_enable = 1;
1242 break;
1243 case USB_DEVICE_A_HNP_SUPPORT:
1244 dum->gadget.a_hnp_support = 1;
1245 break;
1246 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1247 dum->gadget.a_alt_hnp_support = 1;
1248 break;
1249 default:
1250 ret_val = -EOPNOTSUPP;
1251 }
1252 if (ret_val == 0) {
1253 dum->devstatus |= (1 << w_value);
1254 *status = 0;
1255 }
1256 } else if (setup->bRequestType == Ep_Request) {
1257 /* endpoint halt */
1258 ep2 = find_endpoint(dum, w_index);
1259 if (!ep2 || ep2->ep.name == ep0name) {
1260 ret_val = -EOPNOTSUPP;
1261 break;
1262 }
1263 ep2->halted = 1;
1264 ret_val = 0;
1265 *status = 0;
1266 }
1267 break;
1268 case USB_REQ_CLEAR_FEATURE:
1269 if (setup->bRequestType == Dev_Request) {
1270 ret_val = 0;
1271 switch (w_value) {
1272 case USB_DEVICE_REMOTE_WAKEUP:
1273 w_value = USB_DEVICE_REMOTE_WAKEUP;
1274 break;
1275 default:
1276 ret_val = -EOPNOTSUPP;
1277 break;
1278 }
1279 if (ret_val == 0) {
1280 dum->devstatus &= ~(1 << w_value);
1281 *status = 0;
1282 }
1283 } else if (setup->bRequestType == Ep_Request) {
1284 /* endpoint halt */
1285 ep2 = find_endpoint(dum, w_index);
1286 if (!ep2) {
1287 ret_val = -EOPNOTSUPP;
1288 break;
1289 }
1290 if (!ep2->wedged)
1291 ep2->halted = 0;
1292 ret_val = 0;
1293 *status = 0;
1294 }
1295 break;
1296 case USB_REQ_GET_STATUS:
1297 if (setup->bRequestType == Dev_InRequest
1298 || setup->bRequestType == Intf_InRequest
1299 || setup->bRequestType == Ep_InRequest) {
1300 char *buf;
1301 /*
1302 * device: remote wakeup, selfpowered
1303 * interface: nothing
1304 * endpoint: halt
1305 */
1306 buf = (char *)urb->transfer_buffer;
1307 if (urb->transfer_buffer_length > 0) {
1308 if (setup->bRequestType == Ep_InRequest) {
1309 ep2 = find_endpoint(dum, w_index);
1310 if (!ep2) {
1311 ret_val = -EOPNOTSUPP;
1312 break;
1313 }
1314 buf[0] = ep2->halted;
1315 } else if (setup->bRequestType ==
1316 Dev_InRequest) {
1317 buf[0] = (u8)dum->devstatus;
1318 } else
1319 buf[0] = 0;
1320 }
1321 if (urb->transfer_buffer_length > 1)
1322 buf[1] = 0;
1323 urb->actual_length = min_t(u32, 2,
1324 urb->transfer_buffer_length);
1325 ret_val = 0;
1326 *status = 0;
1327 }
1328 break;
1329 }
1330 return ret_val;
1331}
1332
1200/* drive both sides of the transfers; looks like irq handlers to 1333/* drive both sides of the transfers; looks like irq handlers to
1201 * both drivers except the callbacks aren't in_irq(). 1334 * both drivers except the callbacks aren't in_irq().
1202 */ 1335 */
@@ -1299,14 +1432,8 @@ restart:
1299 if (ep == &dum->ep [0] && ep->setup_stage) { 1432 if (ep == &dum->ep [0] && ep->setup_stage) {
1300 struct usb_ctrlrequest setup; 1433 struct usb_ctrlrequest setup;
1301 int value = 1; 1434 int value = 1;
1302 struct dummy_ep *ep2;
1303 unsigned w_index;
1304 unsigned w_value;
1305 1435
1306 setup = *(struct usb_ctrlrequest*) urb->setup_packet; 1436 setup = *(struct usb_ctrlrequest*) urb->setup_packet;
1307 w_index = le16_to_cpu(setup.wIndex);
1308 w_value = le16_to_cpu(setup.wValue);
1309
1310 /* paranoia, in case of stale queued data */ 1437 /* paranoia, in case of stale queued data */
1311 list_for_each_entry (req, &ep->queue, queue) { 1438 list_for_each_entry (req, &ep->queue, queue) {
1312 list_del_init (&req->queue); 1439 list_del_init (&req->queue);
@@ -1328,117 +1455,9 @@ restart:
1328 ep->last_io = jiffies; 1455 ep->last_io = jiffies;
1329 ep->setup_stage = 0; 1456 ep->setup_stage = 0;
1330 ep->halted = 0; 1457 ep->halted = 0;
1331 switch (setup.bRequest) {
1332 case USB_REQ_SET_ADDRESS:
1333 if (setup.bRequestType != Dev_Request)
1334 break;
1335 dum->address = w_value;
1336 status = 0;
1337 dev_dbg (udc_dev(dum), "set_address = %d\n",
1338 w_value);
1339 value = 0;
1340 break;
1341 case USB_REQ_SET_FEATURE:
1342 if (setup.bRequestType == Dev_Request) {
1343 value = 0;
1344 switch (w_value) {
1345 case USB_DEVICE_REMOTE_WAKEUP:
1346 break;
1347 case USB_DEVICE_B_HNP_ENABLE:
1348 dum->gadget.b_hnp_enable = 1;
1349 break;
1350 case USB_DEVICE_A_HNP_SUPPORT:
1351 dum->gadget.a_hnp_support = 1;
1352 break;
1353 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1354 dum->gadget.a_alt_hnp_support
1355 = 1;
1356 break;
1357 default:
1358 value = -EOPNOTSUPP;
1359 }
1360 if (value == 0) {
1361 dum->devstatus |=
1362 (1 << w_value);
1363 status = 0;
1364 }
1365 1458
1366 } else if (setup.bRequestType == Ep_Request) { 1459 value = handle_control_request(dum, urb, &setup,
1367 // endpoint halt 1460 &status);
1368 ep2 = find_endpoint (dum, w_index);
1369 if (!ep2 || ep2->ep.name == ep0name) {
1370 value = -EOPNOTSUPP;
1371 break;
1372 }
1373 ep2->halted = 1;
1374 value = 0;
1375 status = 0;
1376 }
1377 break;
1378 case USB_REQ_CLEAR_FEATURE:
1379 if (setup.bRequestType == Dev_Request) {
1380 switch (w_value) {
1381 case USB_DEVICE_REMOTE_WAKEUP:
1382 dum->devstatus &= ~(1 <<
1383 USB_DEVICE_REMOTE_WAKEUP);
1384 value = 0;
1385 status = 0;
1386 break;
1387 default:
1388 value = -EOPNOTSUPP;
1389 break;
1390 }
1391 } else if (setup.bRequestType == Ep_Request) {
1392 // endpoint halt
1393 ep2 = find_endpoint (dum, w_index);
1394 if (!ep2) {
1395 value = -EOPNOTSUPP;
1396 break;
1397 }
1398 if (!ep2->wedged)
1399 ep2->halted = 0;
1400 value = 0;
1401 status = 0;
1402 }
1403 break;
1404 case USB_REQ_GET_STATUS:
1405 if (setup.bRequestType == Dev_InRequest
1406 || setup.bRequestType
1407 == Intf_InRequest
1408 || setup.bRequestType
1409 == Ep_InRequest
1410 ) {
1411 char *buf;
1412
1413 // device: remote wakeup, selfpowered
1414 // interface: nothing
1415 // endpoint: halt
1416 buf = (char *)urb->transfer_buffer;
1417 if (urb->transfer_buffer_length > 0) {
1418 if (setup.bRequestType ==
1419 Ep_InRequest) {
1420 ep2 = find_endpoint (dum, w_index);
1421 if (!ep2) {
1422 value = -EOPNOTSUPP;
1423 break;
1424 }
1425 buf [0] = ep2->halted;
1426 } else if (setup.bRequestType ==
1427 Dev_InRequest) {
1428 buf [0] = (u8)
1429 dum->devstatus;
1430 } else
1431 buf [0] = 0;
1432 }
1433 if (urb->transfer_buffer_length > 1)
1434 buf [1] = 0;
1435 urb->actual_length = min_t(u32, 2,
1436 urb->transfer_buffer_length);
1437 value = 0;
1438 status = 0;
1439 }
1440 break;
1441 }
1442 1461
1443 /* gadget driver handles all other requests. block 1462 /* gadget driver handles all other requests. block
1444 * until setup() returns; no reentrancy issues etc. 1463 * until setup() returns; no reentrancy issues etc.
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 484c5ba5450e..1499f9e4afa8 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1,10 +1,10 @@
1/* 1/*
2 * f_fs.c -- user mode filesystem api for usb composite funtcion controllers 2 * f_fs.c -- user mode file system API for USB composite function controllers
3 * 3 *
4 * Copyright (C) 2010 Samsung Electronics 4 * Copyright (C) 2010 Samsung Electronics
5 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com> 5 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
6 * 6 *
7 * Based on inode.c (GadgetFS): 7 * Based on inode.c (GadgetFS) which was:
8 * Copyright (C) 2003-2004 David Brownell 8 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies 9 * Copyright (C) 2003 Agilent Technologies
10 * 10 *
@@ -38,62 +38,56 @@
38#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */ 38#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
39 39
40 40
41/* Debuging *****************************************************************/ 41/* Debugging ****************************************************************/
42
43#define ffs_printk(level, fmt, args...) printk(level "f_fs: " fmt "\n", ## args)
44
45#define FERR(...) ffs_printk(KERN_ERR, __VA_ARGS__)
46#define FINFO(...) ffs_printk(KERN_INFO, __VA_ARGS__)
47
48#ifdef DEBUG
49# define FDBG(...) ffs_printk(KERN_DEBUG, __VA_ARGS__)
50#else
51# define FDBG(...) do { } while (0)
52#endif /* DEBUG */
53
54#ifdef VERBOSE_DEBUG
55# define FVDBG FDBG
56#else
57# define FVDBG(...) do { } while (0)
58#endif /* VERBOSE_DEBUG */
59
60#define ENTER() FVDBG("%s()", __func__)
61 42
62#ifdef VERBOSE_DEBUG 43#ifdef VERBOSE_DEBUG
44# define pr_vdebug pr_debug
63# define ffs_dump_mem(prefix, ptr, len) \ 45# define ffs_dump_mem(prefix, ptr, len) \
64 print_hex_dump_bytes("f_fs" prefix ": ", DUMP_PREFIX_NONE, ptr, len) 46 print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
65#else 47#else
48# define pr_vdebug(...) do { } while (0)
66# define ffs_dump_mem(prefix, ptr, len) do { } while (0) 49# define ffs_dump_mem(prefix, ptr, len) do { } while (0)
67#endif 50#endif /* VERBOSE_DEBUG */
51
52#define ENTER() pr_vdebug("%s()\n", __func__)
68 53
69 54
70/* The data structure and setup file ****************************************/ 55/* The data structure and setup file ****************************************/
71 56
72enum ffs_state { 57enum ffs_state {
73 /* Waiting for descriptors and strings. */ 58 /*
74 /* In this state no open(2), read(2) or write(2) on epfiles 59 * Waiting for descriptors and strings.
60 *
61 * In this state no open(2), read(2) or write(2) on epfiles
75 * may succeed (which should not be the problem as there 62 * may succeed (which should not be the problem as there
76 * should be no such files opened in the firts place). */ 63 * should be no such files opened in the first place).
64 */
77 FFS_READ_DESCRIPTORS, 65 FFS_READ_DESCRIPTORS,
78 FFS_READ_STRINGS, 66 FFS_READ_STRINGS,
79 67
80 /* We've got descriptors and strings. We are or have called 68 /*
69 * We've got descriptors and strings. We are or have called
81 * functionfs_ready_callback(). functionfs_bind() may have 70 * functionfs_ready_callback(). functionfs_bind() may have
82 * been called but we don't know. */ 71 * been called but we don't know.
83 /* This is the only state in which operations on epfiles may 72 *
84 * succeed. */ 73 * This is the only state in which operations on epfiles may
74 * succeed.
75 */
85 FFS_ACTIVE, 76 FFS_ACTIVE,
86 77
87 /* All endpoints have been closed. This state is also set if 78 /*
79 * All endpoints have been closed. This state is also set if
88 * we encounter an unrecoverable error. The only 80 * we encounter an unrecoverable error. The only
89 * unrecoverable error is situation when after reading strings 81 * unrecoverable error is situation when after reading strings
90 * from user space we fail to initialise EP files or 82 * from user space we fail to initialise epfiles or
91 * functionfs_ready_callback() returns with error (<0). */ 83 * functionfs_ready_callback() returns with error (<0).
92 /* In this state no open(2), read(2) or write(2) (both on ep0 84 *
85 * In this state no open(2), read(2) or write(2) (both on ep0
93 * as well as epfile) may succeed (at this point epfiles are 86 * as well as epfile) may succeed (at this point epfiles are
94 * unlinked and all closed so this is not a problem; ep0 is 87 * unlinked and all closed so this is not a problem; ep0 is
95 * also closed but ep0 file exists and so open(2) on ep0 must 88 * also closed but ep0 file exists and so open(2) on ep0 must
96 * fail). */ 89 * fail).
90 */
97 FFS_CLOSING 91 FFS_CLOSING
98}; 92};
99 93
@@ -101,14 +95,18 @@ enum ffs_state {
101enum ffs_setup_state { 95enum ffs_setup_state {
102 /* There is no setup request pending. */ 96 /* There is no setup request pending. */
103 FFS_NO_SETUP, 97 FFS_NO_SETUP,
104 /* User has read events and there was a setup request event 98 /*
99 * User has read events and there was a setup request event
105 * there. The next read/write on ep0 will handle the 100 * there. The next read/write on ep0 will handle the
106 * request. */ 101 * request.
102 */
107 FFS_SETUP_PENDING, 103 FFS_SETUP_PENDING,
108 /* There was event pending but before user space handled it 104 /*
105 * There was event pending but before user space handled it
109 * some other event was introduced which canceled existing 106 * some other event was introduced which canceled existing
110 * setup. If this state is set read/write on ep0 return 107 * setup. If this state is set read/write on ep0 return
111 * -EIDRM. This state is only set when adding event. */ 108 * -EIDRM. This state is only set when adding event.
109 */
112 FFS_SETUP_CANCELED 110 FFS_SETUP_CANCELED
113}; 111};
114 112
@@ -120,23 +118,29 @@ struct ffs_function;
120struct ffs_data { 118struct ffs_data {
121 struct usb_gadget *gadget; 119 struct usb_gadget *gadget;
122 120
123 /* Protect access read/write operations, only one read/write 121 /*
122 * Protect access read/write operations, only one read/write
124 * at a time. As a consequence protects ep0req and company. 123 * at a time. As a consequence protects ep0req and company.
125 * While setup request is being processed (queued) this is 124 * While setup request is being processed (queued) this is
126 * held. */ 125 * held.
126 */
127 struct mutex mutex; 127 struct mutex mutex;
128 128
129 /* Protect access to enpoint related structures (basically 129 /*
130 * Protect access to endpoint related structures (basically
130 * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for 131 * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
131 * endpint zero. */ 132 * endpoint zero.
133 */
132 spinlock_t eps_lock; 134 spinlock_t eps_lock;
133 135
134 /* XXX REVISIT do we need our own request? Since we are not 136 /*
135 * handling setup requests immidiatelly user space may be so 137 * XXX REVISIT do we need our own request? Since we are not
138 * handling setup requests immediately user space may be so
136 * slow that another setup will be sent to the gadget but this 139 * slow that another setup will be sent to the gadget but this
137 * time not to us but another function and then there could be 140 * time not to us but another function and then there could be
138 * a race. Is that the case? Or maybe we can use cdev->req 141 * a race. Is that the case? Or maybe we can use cdev->req
139 * after all, maybe we just need some spinlock for that? */ 142 * after all, maybe we just need some spinlock for that?
143 */
140 struct usb_request *ep0req; /* P: mutex */ 144 struct usb_request *ep0req; /* P: mutex */
141 struct completion ep0req_completion; /* P: mutex */ 145 struct completion ep0req_completion; /* P: mutex */
142 int ep0req_status; /* P: mutex */ 146 int ep0req_status; /* P: mutex */
@@ -150,7 +154,7 @@ struct ffs_data {
150 enum ffs_state state; 154 enum ffs_state state;
151 155
152 /* 156 /*
153 * Possible transations: 157 * Possible transitions:
154 * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock 158 * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
155 * happens only in ep0 read which is P: mutex 159 * happens only in ep0 read which is P: mutex
156 * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock 160 * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
@@ -183,18 +187,21 @@ struct ffs_data {
183 /* Active function */ 187 /* Active function */
184 struct ffs_function *func; 188 struct ffs_function *func;
185 189
186 /* Device name, write once when file system is mounted. 190 /*
187 * Intendet for user to read if she wants. */ 191 * Device name, write once when file system is mounted.
192 * Intended for user to read if she wants.
193 */
188 const char *dev_name; 194 const char *dev_name;
189 /* Private data for our user (ie. gadget). Managed by 195 /* Private data for our user (ie. gadget). Managed by user. */
190 * user. */
191 void *private_data; 196 void *private_data;
192 197
193 /* filled by __ffs_data_got_descs() */ 198 /* filled by __ffs_data_got_descs() */
194 /* real descriptors are 16 bytes after raw_descs (so you need 199 /*
200 * Real descriptors are 16 bytes after raw_descs (so you need
195 * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the 201 * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
196 * first full speed descriptor). raw_descs_length and 202 * first full speed descriptor). raw_descs_length and
197 * raw_fs_descs_length do not have those 16 bytes added. */ 203 * raw_fs_descs_length do not have those 16 bytes added.
204 */
198 const void *raw_descs; 205 const void *raw_descs;
199 unsigned raw_descs_length; 206 unsigned raw_descs_length;
200 unsigned raw_fs_descs_length; 207 unsigned raw_fs_descs_length;
@@ -211,18 +218,23 @@ struct ffs_data {
211 const void *raw_strings; 218 const void *raw_strings;
212 struct usb_gadget_strings **stringtabs; 219 struct usb_gadget_strings **stringtabs;
213 220
214 /* File system's super block, write once when file system is mounted. */ 221 /*
222 * File system's super block, write once when file system is
223 * mounted.
224 */
215 struct super_block *sb; 225 struct super_block *sb;
216 226
217 /* File permissions, written once when fs is mounted*/ 227 /* File permissions, written once when fs is mounted */
218 struct ffs_file_perms { 228 struct ffs_file_perms {
219 umode_t mode; 229 umode_t mode;
220 uid_t uid; 230 uid_t uid;
221 gid_t gid; 231 gid_t gid;
222 } file_perms; 232 } file_perms;
223 233
224 /* The endpoint files, filled by ffs_epfiles_create(), 234 /*
225 * destroyed by ffs_epfiles_destroy(). */ 235 * The endpoint files, filled by ffs_epfiles_create(),
236 * destroyed by ffs_epfiles_destroy().
237 */
226 struct ffs_epfile *epfiles; 238 struct ffs_epfile *epfiles;
227}; 239};
228 240
@@ -236,7 +248,7 @@ static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
236static void ffs_data_opened(struct ffs_data *ffs); 248static void ffs_data_opened(struct ffs_data *ffs);
237static void ffs_data_closed(struct ffs_data *ffs); 249static void ffs_data_closed(struct ffs_data *ffs);
238 250
239/* Called with ffs->mutex held; take over ownerrship of data. */ 251/* Called with ffs->mutex held; take over ownership of data. */
240static int __must_check 252static int __must_check
241__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len); 253__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
242static int __must_check 254static int __must_check
@@ -267,11 +279,9 @@ static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
267 279
268static void ffs_func_free(struct ffs_function *func); 280static void ffs_func_free(struct ffs_function *func);
269 281
270
271static void ffs_func_eps_disable(struct ffs_function *func); 282static void ffs_func_eps_disable(struct ffs_function *func);
272static int __must_check ffs_func_eps_enable(struct ffs_function *func); 283static int __must_check ffs_func_eps_enable(struct ffs_function *func);
273 284
274
275static int ffs_func_bind(struct usb_configuration *, 285static int ffs_func_bind(struct usb_configuration *,
276 struct usb_function *); 286 struct usb_function *);
277static void ffs_func_unbind(struct usb_configuration *, 287static void ffs_func_unbind(struct usb_configuration *,
@@ -288,7 +298,6 @@ static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
288static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf); 298static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
289 299
290 300
291
292/* The endpoints structures *************************************************/ 301/* The endpoints structures *************************************************/
293 302
294struct ffs_ep { 303struct ffs_ep {
@@ -321,7 +330,6 @@ struct ffs_epfile {
321 unsigned char _pad; 330 unsigned char _pad;
322}; 331};
323 332
324
325static int __must_check ffs_epfiles_create(struct ffs_data *ffs); 333static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
326static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); 334static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
327 335
@@ -348,7 +356,6 @@ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
348 complete_all(&ffs->ep0req_completion); 356 complete_all(&ffs->ep0req_completion);
349} 357}
350 358
351
352static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) 359static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
353{ 360{
354 struct usb_request *req = ffs->ep0req; 361 struct usb_request *req = ffs->ep0req;
@@ -380,17 +387,16 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
380static int __ffs_ep0_stall(struct ffs_data *ffs) 387static int __ffs_ep0_stall(struct ffs_data *ffs)
381{ 388{
382 if (ffs->ev.can_stall) { 389 if (ffs->ev.can_stall) {
383 FVDBG("ep0 stall\n"); 390 pr_vdebug("ep0 stall\n");
384 usb_ep_set_halt(ffs->gadget->ep0); 391 usb_ep_set_halt(ffs->gadget->ep0);
385 ffs->setup_state = FFS_NO_SETUP; 392 ffs->setup_state = FFS_NO_SETUP;
386 return -EL2HLT; 393 return -EL2HLT;
387 } else { 394 } else {
388 FDBG("bogus ep0 stall!\n"); 395 pr_debug("bogus ep0 stall!\n");
389 return -ESRCH; 396 return -ESRCH;
390 } 397 }
391} 398}
392 399
393
394static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, 400static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
395 size_t len, loff_t *ptr) 401 size_t len, loff_t *ptr)
396{ 402{
@@ -409,7 +415,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
409 if (unlikely(ret < 0)) 415 if (unlikely(ret < 0))
410 return ret; 416 return ret;
411 417
412
413 /* Check state */ 418 /* Check state */
414 switch (ffs->state) { 419 switch (ffs->state) {
415 case FFS_READ_DESCRIPTORS: 420 case FFS_READ_DESCRIPTORS:
@@ -421,14 +426,14 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
421 } 426 }
422 427
423 data = ffs_prepare_buffer(buf, len); 428 data = ffs_prepare_buffer(buf, len);
424 if (unlikely(IS_ERR(data))) { 429 if (IS_ERR(data)) {
425 ret = PTR_ERR(data); 430 ret = PTR_ERR(data);
426 break; 431 break;
427 } 432 }
428 433
429 /* Handle data */ 434 /* Handle data */
430 if (ffs->state == FFS_READ_DESCRIPTORS) { 435 if (ffs->state == FFS_READ_DESCRIPTORS) {
431 FINFO("read descriptors"); 436 pr_info("read descriptors\n");
432 ret = __ffs_data_got_descs(ffs, data, len); 437 ret = __ffs_data_got_descs(ffs, data, len);
433 if (unlikely(ret < 0)) 438 if (unlikely(ret < 0))
434 break; 439 break;
@@ -436,7 +441,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
436 ffs->state = FFS_READ_STRINGS; 441 ffs->state = FFS_READ_STRINGS;
437 ret = len; 442 ret = len;
438 } else { 443 } else {
439 FINFO("read strings"); 444 pr_info("read strings\n");
440 ret = __ffs_data_got_strings(ffs, data, len); 445 ret = __ffs_data_got_strings(ffs, data, len);
441 if (unlikely(ret < 0)) 446 if (unlikely(ret < 0))
442 break; 447 break;
@@ -461,11 +466,12 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
461 } 466 }
462 break; 467 break;
463 468
464
465 case FFS_ACTIVE: 469 case FFS_ACTIVE:
466 data = NULL; 470 data = NULL;
467 /* We're called from user space, we can use _irq 471 /*
468 * rather then _irqsave */ 472 * We're called from user space, we can use _irq
473 * rather then _irqsave
474 */
469 spin_lock_irq(&ffs->ev.waitq.lock); 475 spin_lock_irq(&ffs->ev.waitq.lock);
470 switch (FFS_SETUP_STATE(ffs)) { 476 switch (FFS_SETUP_STATE(ffs)) {
471 case FFS_SETUP_CANCELED: 477 case FFS_SETUP_CANCELED:
@@ -493,23 +499,25 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
493 spin_unlock_irq(&ffs->ev.waitq.lock); 499 spin_unlock_irq(&ffs->ev.waitq.lock);
494 500
495 data = ffs_prepare_buffer(buf, len); 501 data = ffs_prepare_buffer(buf, len);
496 if (unlikely(IS_ERR(data))) { 502 if (IS_ERR(data)) {
497 ret = PTR_ERR(data); 503 ret = PTR_ERR(data);
498 break; 504 break;
499 } 505 }
500 506
501 spin_lock_irq(&ffs->ev.waitq.lock); 507 spin_lock_irq(&ffs->ev.waitq.lock);
502 508
503 /* We are guaranteed to be still in FFS_ACTIVE state 509 /*
510 * We are guaranteed to be still in FFS_ACTIVE state
504 * but the state of setup could have changed from 511 * but the state of setup could have changed from
505 * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need 512 * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
506 * to check for that. If that happened we copied data 513 * to check for that. If that happened we copied data
507 * from user space in vain but it's unlikely. */ 514 * from user space in vain but it's unlikely.
508 /* For sure we are not in FFS_NO_SETUP since this is 515 *
516 * For sure we are not in FFS_NO_SETUP since this is
509 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP 517 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
510 * transition can be performed and it's protected by 518 * transition can be performed and it's protected by
511 * mutex. */ 519 * mutex.
512 520 */
513 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) { 521 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
514 ret = -EIDRM; 522 ret = -EIDRM;
515done_spin: 523done_spin:
@@ -521,25 +529,22 @@ done_spin:
521 kfree(data); 529 kfree(data);
522 break; 530 break;
523 531
524
525 default: 532 default:
526 ret = -EBADFD; 533 ret = -EBADFD;
527 break; 534 break;
528 } 535 }
529 536
530
531 mutex_unlock(&ffs->mutex); 537 mutex_unlock(&ffs->mutex);
532 return ret; 538 return ret;
533} 539}
534 540
535
536
537static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf, 541static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
538 size_t n) 542 size_t n)
539{ 543{
540 /* We are holding ffs->ev.waitq.lock and ffs->mutex and we need 544 /*
541 * to release them. */ 545 * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
542 546 * to release them.
547 */
543 struct usb_functionfs_event events[n]; 548 struct usb_functionfs_event events[n];
544 unsigned i = 0; 549 unsigned i = 0;
545 550
@@ -568,7 +573,6 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
568 ? -EFAULT : sizeof events; 573 ? -EFAULT : sizeof events;
569} 574}
570 575
571
572static ssize_t ffs_ep0_read(struct file *file, char __user *buf, 576static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
573 size_t len, loff_t *ptr) 577 size_t len, loff_t *ptr)
574{ 578{
@@ -588,16 +592,16 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
588 if (unlikely(ret < 0)) 592 if (unlikely(ret < 0))
589 return ret; 593 return ret;
590 594
591
592 /* Check state */ 595 /* Check state */
593 if (ffs->state != FFS_ACTIVE) { 596 if (ffs->state != FFS_ACTIVE) {
594 ret = -EBADFD; 597 ret = -EBADFD;
595 goto done_mutex; 598 goto done_mutex;
596 } 599 }
597 600
598 601 /*
599 /* We're called from user space, we can use _irq rather then 602 * We're called from user space, we can use _irq rather then
600 * _irqsave */ 603 * _irqsave
604 */
601 spin_lock_irq(&ffs->ev.waitq.lock); 605 spin_lock_irq(&ffs->ev.waitq.lock);
602 606
603 switch (FFS_SETUP_STATE(ffs)) { 607 switch (FFS_SETUP_STATE(ffs)) {
@@ -617,7 +621,8 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
617 break; 621 break;
618 } 622 }
619 623
620 if (unlikely(wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, ffs->ev.count))) { 624 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
625 ffs->ev.count)) {
621 ret = -EINTR; 626 ret = -EINTR;
622 break; 627 break;
623 } 628 }
@@ -625,7 +630,6 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
625 return __ffs_ep0_read_events(ffs, buf, 630 return __ffs_ep0_read_events(ffs, buf,
626 min(n, (size_t)ffs->ev.count)); 631 min(n, (size_t)ffs->ev.count));
627 632
628
629 case FFS_SETUP_PENDING: 633 case FFS_SETUP_PENDING:
630 if (ffs->ev.setup.bRequestType & USB_DIR_IN) { 634 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
631 spin_unlock_irq(&ffs->ev.waitq.lock); 635 spin_unlock_irq(&ffs->ev.waitq.lock);
@@ -671,8 +675,6 @@ done_mutex:
671 return ret; 675 return ret;
672} 676}
673 677
674
675
676static int ffs_ep0_open(struct inode *inode, struct file *file) 678static int ffs_ep0_open(struct inode *inode, struct file *file)
677{ 679{
678 struct ffs_data *ffs = inode->i_private; 680 struct ffs_data *ffs = inode->i_private;
@@ -688,7 +690,6 @@ static int ffs_ep0_open(struct inode *inode, struct file *file)
688 return 0; 690 return 0;
689} 691}
690 692
691
692static int ffs_ep0_release(struct inode *inode, struct file *file) 693static int ffs_ep0_release(struct inode *inode, struct file *file)
693{ 694{
694 struct ffs_data *ffs = file->private_data; 695 struct ffs_data *ffs = file->private_data;
@@ -700,7 +701,6 @@ static int ffs_ep0_release(struct inode *inode, struct file *file)
700 return 0; 701 return 0;
701} 702}
702 703
703
704static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) 704static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
705{ 705{
706 struct ffs_data *ffs = file->private_data; 706 struct ffs_data *ffs = file->private_data;
@@ -721,7 +721,6 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
721 return ret; 721 return ret;
722} 722}
723 723
724
725static const struct file_operations ffs_ep0_operations = { 724static const struct file_operations ffs_ep0_operations = {
726 .owner = THIS_MODULE, 725 .owner = THIS_MODULE,
727 .llseek = no_llseek, 726 .llseek = no_llseek,
@@ -736,7 +735,6 @@ static const struct file_operations ffs_ep0_operations = {
736 735
737/* "Normal" endpoints operations ********************************************/ 736/* "Normal" endpoints operations ********************************************/
738 737
739
740static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) 738static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
741{ 739{
742 ENTER(); 740 ENTER();
@@ -747,7 +745,6 @@ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
747 } 745 }
748} 746}
749 747
750
751static ssize_t ffs_epfile_io(struct file *file, 748static ssize_t ffs_epfile_io(struct file *file,
752 char __user *buf, size_t len, int read) 749 char __user *buf, size_t len, int read)
753{ 750{
@@ -777,8 +774,8 @@ first_try:
777 goto error; 774 goto error;
778 } 775 }
779 776
780 if (unlikely(wait_event_interruptible 777 if (wait_event_interruptible(epfile->wait,
781 (epfile->wait, (ep = epfile->ep)))) { 778 (ep = epfile->ep))) {
782 ret = -EINTR; 779 ret = -EINTR;
783 goto error; 780 goto error;
784 } 781 }
@@ -810,12 +807,16 @@ first_try:
810 if (unlikely(ret)) 807 if (unlikely(ret))
811 goto error; 808 goto error;
812 809
813 /* We're called from user space, we can use _irq rather then 810 /*
814 * _irqsave */ 811 * We're called from user space, we can use _irq rather then
812 * _irqsave
813 */
815 spin_lock_irq(&epfile->ffs->eps_lock); 814 spin_lock_irq(&epfile->ffs->eps_lock);
816 815
817 /* While we were acquiring mutex endpoint got disabled 816 /*
818 * or changed? */ 817 * While we were acquiring mutex endpoint got disabled
818 * or changed?
819 */
819 } while (unlikely(epfile->ep != ep)); 820 } while (unlikely(epfile->ep != ep));
820 821
821 /* Halt */ 822 /* Halt */
@@ -857,7 +858,6 @@ error:
857 return ret; 858 return ret;
858} 859}
859 860
860
861static ssize_t 861static ssize_t
862ffs_epfile_write(struct file *file, const char __user *buf, size_t len, 862ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
863 loff_t *ptr) 863 loff_t *ptr)
@@ -903,7 +903,6 @@ ffs_epfile_release(struct inode *inode, struct file *file)
903 return 0; 903 return 0;
904} 904}
905 905
906
907static long ffs_epfile_ioctl(struct file *file, unsigned code, 906static long ffs_epfile_ioctl(struct file *file, unsigned code,
908 unsigned long value) 907 unsigned long value)
909{ 908{
@@ -942,7 +941,6 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
942 return ret; 941 return ret;
943} 942}
944 943
945
946static const struct file_operations ffs_epfile_operations = { 944static const struct file_operations ffs_epfile_operations = {
947 .owner = THIS_MODULE, 945 .owner = THIS_MODULE,
948 .llseek = no_llseek, 946 .llseek = no_llseek,
@@ -955,15 +953,13 @@ static const struct file_operations ffs_epfile_operations = {
955}; 953};
956 954
957 955
958
959/* File system and super block operations ***********************************/ 956/* File system and super block operations ***********************************/
960 957
961/* 958/*
962 * Mounting the filesystem creates a controller file, used first for 959 * Mounting the file system creates a controller file, used first for
963 * function configuration then later for event monitoring. 960 * function configuration then later for event monitoring.
964 */ 961 */
965 962
966
967static struct inode *__must_check 963static struct inode *__must_check
968ffs_sb_make_inode(struct super_block *sb, void *data, 964ffs_sb_make_inode(struct super_block *sb, void *data,
969 const struct file_operations *fops, 965 const struct file_operations *fops,
@@ -996,9 +992,7 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
996 return inode; 992 return inode;
997} 993}
998 994
999
1000/* Create "regular" file */ 995/* Create "regular" file */
1001
1002static struct inode *ffs_sb_create_file(struct super_block *sb, 996static struct inode *ffs_sb_create_file(struct super_block *sb,
1003 const char *name, void *data, 997 const char *name, void *data,
1004 const struct file_operations *fops, 998 const struct file_operations *fops,
@@ -1027,9 +1021,7 @@ static struct inode *ffs_sb_create_file(struct super_block *sb,
1027 return inode; 1021 return inode;
1028} 1022}
1029 1023
1030
1031/* Super block */ 1024/* Super block */
1032
1033static const struct super_operations ffs_sb_operations = { 1025static const struct super_operations ffs_sb_operations = {
1034 .statfs = simple_statfs, 1026 .statfs = simple_statfs,
1035 .drop_inode = generic_delete_inode, 1027 .drop_inode = generic_delete_inode,
@@ -1050,7 +1042,7 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1050 1042
1051 ENTER(); 1043 ENTER();
1052 1044
1053 /* Initialize data */ 1045 /* Initialise data */
1054 ffs = ffs_data_new(); 1046 ffs = ffs_data_new();
1055 if (unlikely(!ffs)) 1047 if (unlikely(!ffs))
1056 goto enomem0; 1048 goto enomem0;
@@ -1096,7 +1088,6 @@ enomem0:
1096 return -ENOMEM; 1088 return -ENOMEM;
1097} 1089}
1098 1090
1099
1100static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) 1091static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1101{ 1092{
1102 ENTER(); 1093 ENTER();
@@ -1116,7 +1107,7 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1116 /* Value limit */ 1107 /* Value limit */
1117 eq = strchr(opts, '='); 1108 eq = strchr(opts, '=');
1118 if (unlikely(!eq)) { 1109 if (unlikely(!eq)) {
1119 FERR("'=' missing in %s", opts); 1110 pr_err("'=' missing in %s\n", opts);
1120 return -EINVAL; 1111 return -EINVAL;
1121 } 1112 }
1122 *eq = 0; 1113 *eq = 0;
@@ -1124,7 +1115,7 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1124 /* Parse value */ 1115 /* Parse value */
1125 value = simple_strtoul(eq + 1, &end, 0); 1116 value = simple_strtoul(eq + 1, &end, 0);
1126 if (unlikely(*end != ',' && *end != 0)) { 1117 if (unlikely(*end != ',' && *end != 0)) {
1127 FERR("%s: invalid value: %s", opts, eq + 1); 1118 pr_err("%s: invalid value: %s\n", opts, eq + 1);
1128 return -EINVAL; 1119 return -EINVAL;
1129 } 1120 }
1130 1121
@@ -1159,7 +1150,7 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1159 1150
1160 default: 1151 default:
1161invalid: 1152invalid:
1162 FERR("%s: invalid option", opts); 1153 pr_err("%s: invalid option\n", opts);
1163 return -EINVAL; 1154 return -EINVAL;
1164 } 1155 }
1165 1156
@@ -1172,7 +1163,6 @@ invalid:
1172 return 0; 1163 return 0;
1173} 1164}
1174 1165
1175
1176/* "mount -t functionfs dev_name /dev/function" ends up here */ 1166/* "mount -t functionfs dev_name /dev/function" ends up here */
1177 1167
1178static struct dentry * 1168static struct dentry *
@@ -1224,10 +1214,8 @@ static struct file_system_type ffs_fs_type = {
1224}; 1214};
1225 1215
1226 1216
1227
1228/* Driver's main init/cleanup functions *************************************/ 1217/* Driver's main init/cleanup functions *************************************/
1229 1218
1230
1231static int functionfs_init(void) 1219static int functionfs_init(void)
1232{ 1220{
1233 int ret; 1221 int ret;
@@ -1236,9 +1224,9 @@ static int functionfs_init(void)
1236 1224
1237 ret = register_filesystem(&ffs_fs_type); 1225 ret = register_filesystem(&ffs_fs_type);
1238 if (likely(!ret)) 1226 if (likely(!ret))
1239 FINFO("file system registered"); 1227 pr_info("file system registered\n");
1240 else 1228 else
1241 FERR("failed registering file system (%d)", ret); 1229 pr_err("failed registering file system (%d)\n", ret);
1242 1230
1243 return ret; 1231 return ret;
1244} 1232}
@@ -1247,18 +1235,16 @@ static void functionfs_cleanup(void)
1247{ 1235{
1248 ENTER(); 1236 ENTER();
1249 1237
1250 FINFO("unloading"); 1238 pr_info("unloading\n");
1251 unregister_filesystem(&ffs_fs_type); 1239 unregister_filesystem(&ffs_fs_type);
1252} 1240}
1253 1241
1254 1242
1255
1256/* ffs_data and ffs_function construction and destruction code **************/ 1243/* ffs_data and ffs_function construction and destruction code **************/
1257 1244
1258static void ffs_data_clear(struct ffs_data *ffs); 1245static void ffs_data_clear(struct ffs_data *ffs);
1259static void ffs_data_reset(struct ffs_data *ffs); 1246static void ffs_data_reset(struct ffs_data *ffs);
1260 1247
1261
1262static void ffs_data_get(struct ffs_data *ffs) 1248static void ffs_data_get(struct ffs_data *ffs)
1263{ 1249{
1264 ENTER(); 1250 ENTER();
@@ -1279,7 +1265,7 @@ static void ffs_data_put(struct ffs_data *ffs)
1279 ENTER(); 1265 ENTER();
1280 1266
1281 if (unlikely(atomic_dec_and_test(&ffs->ref))) { 1267 if (unlikely(atomic_dec_and_test(&ffs->ref))) {
1282 FINFO("%s(): freeing", __func__); 1268 pr_info("%s(): freeing\n", __func__);
1283 ffs_data_clear(ffs); 1269 ffs_data_clear(ffs);
1284 BUG_ON(mutex_is_locked(&ffs->mutex) || 1270 BUG_ON(mutex_is_locked(&ffs->mutex) ||
1285 spin_is_locked(&ffs->ev.waitq.lock) || 1271 spin_is_locked(&ffs->ev.waitq.lock) ||
@@ -1289,8 +1275,6 @@ static void ffs_data_put(struct ffs_data *ffs)
1289 } 1275 }
1290} 1276}
1291 1277
1292
1293
1294static void ffs_data_closed(struct ffs_data *ffs) 1278static void ffs_data_closed(struct ffs_data *ffs)
1295{ 1279{
1296 ENTER(); 1280 ENTER();
@@ -1303,7 +1287,6 @@ static void ffs_data_closed(struct ffs_data *ffs)
1303 ffs_data_put(ffs); 1287 ffs_data_put(ffs);
1304} 1288}
1305 1289
1306
1307static struct ffs_data *ffs_data_new(void) 1290static struct ffs_data *ffs_data_new(void)
1308{ 1291{
1309 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); 1292 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
@@ -1326,7 +1309,6 @@ static struct ffs_data *ffs_data_new(void)
1326 return ffs; 1309 return ffs;
1327} 1310}
1328 1311
1329
1330static void ffs_data_clear(struct ffs_data *ffs) 1312static void ffs_data_clear(struct ffs_data *ffs)
1331{ 1313{
1332 ENTER(); 1314 ENTER();
@@ -1344,7 +1326,6 @@ static void ffs_data_clear(struct ffs_data *ffs)
1344 kfree(ffs->stringtabs); 1326 kfree(ffs->stringtabs);
1345} 1327}
1346 1328
1347
1348static void ffs_data_reset(struct ffs_data *ffs) 1329static void ffs_data_reset(struct ffs_data *ffs)
1349{ 1330{
1350 ENTER(); 1331 ENTER();
@@ -1407,7 +1388,6 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1407 return 0; 1388 return 0;
1408} 1389}
1409 1390
1410
1411static void functionfs_unbind(struct ffs_data *ffs) 1391static void functionfs_unbind(struct ffs_data *ffs)
1412{ 1392{
1413 ENTER(); 1393 ENTER();
@@ -1420,7 +1400,6 @@ static void functionfs_unbind(struct ffs_data *ffs)
1420 } 1400 }
1421} 1401}
1422 1402
1423
1424static int ffs_epfiles_create(struct ffs_data *ffs) 1403static int ffs_epfiles_create(struct ffs_data *ffs)
1425{ 1404{
1426 struct ffs_epfile *epfile, *epfiles; 1405 struct ffs_epfile *epfile, *epfiles;
@@ -1451,7 +1430,6 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
1451 return 0; 1430 return 0;
1452} 1431}
1453 1432
1454
1455static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) 1433static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1456{ 1434{
1457 struct ffs_epfile *epfile = epfiles; 1435 struct ffs_epfile *epfile = epfiles;
@@ -1471,7 +1449,6 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1471 kfree(epfiles); 1449 kfree(epfiles);
1472} 1450}
1473 1451
1474
1475static int functionfs_bind_config(struct usb_composite_dev *cdev, 1452static int functionfs_bind_config(struct usb_composite_dev *cdev,
1476 struct usb_configuration *c, 1453 struct usb_configuration *c,
1477 struct ffs_data *ffs) 1454 struct ffs_data *ffs)
@@ -1491,7 +1468,6 @@ static int functionfs_bind_config(struct usb_composite_dev *cdev,
1491 func->function.bind = ffs_func_bind; 1468 func->function.bind = ffs_func_bind;
1492 func->function.unbind = ffs_func_unbind; 1469 func->function.unbind = ffs_func_unbind;
1493 func->function.set_alt = ffs_func_set_alt; 1470 func->function.set_alt = ffs_func_set_alt;
1494 /*func->function.get_alt = ffs_func_get_alt;*/
1495 func->function.disable = ffs_func_disable; 1471 func->function.disable = ffs_func_disable;
1496 func->function.setup = ffs_func_setup; 1472 func->function.setup = ffs_func_setup;
1497 func->function.suspend = ffs_func_suspend; 1473 func->function.suspend = ffs_func_suspend;
@@ -1516,14 +1492,15 @@ static void ffs_func_free(struct ffs_function *func)
1516 ffs_data_put(func->ffs); 1492 ffs_data_put(func->ffs);
1517 1493
1518 kfree(func->eps); 1494 kfree(func->eps);
1519 /* eps and interfaces_nums are allocated in the same chunk so 1495 /*
1496 * eps and interfaces_nums are allocated in the same chunk so
1520 * only one free is required. Descriptors are also allocated 1497 * only one free is required. Descriptors are also allocated
1521 * in the same chunk. */ 1498 * in the same chunk.
1499 */
1522 1500
1523 kfree(func); 1501 kfree(func);
1524} 1502}
1525 1503
1526
1527static void ffs_func_eps_disable(struct ffs_function *func) 1504static void ffs_func_eps_disable(struct ffs_function *func)
1528{ 1505{
1529 struct ffs_ep *ep = func->eps; 1506 struct ffs_ep *ep = func->eps;
@@ -1581,11 +1558,12 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1581 1558
1582/* Parsing and building descriptors and strings *****************************/ 1559/* Parsing and building descriptors and strings *****************************/
1583 1560
1584 1561/*
1585/* This validates if data pointed by data is a valid USB descriptor as 1562 * This validates if data pointed by data is a valid USB descriptor as
1586 * well as record how many interfaces, endpoints and strings are 1563 * well as record how many interfaces, endpoints and strings are
1587 * required by given configuration. Returns address afther the 1564 * required by given configuration. Returns address after the
1588 * descriptor or NULL if data is invalid. */ 1565 * descriptor or NULL if data is invalid.
1566 */
1589 1567
1590enum ffs_entity_type { 1568enum ffs_entity_type {
1591 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT 1569 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
@@ -1607,14 +1585,14 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
1607 1585
1608 /* At least two bytes are required: length and type */ 1586 /* At least two bytes are required: length and type */
1609 if (len < 2) { 1587 if (len < 2) {
1610 FVDBG("descriptor too short"); 1588 pr_vdebug("descriptor too short\n");
1611 return -EINVAL; 1589 return -EINVAL;
1612 } 1590 }
1613 1591
1614 /* If we have at least as many bytes as the descriptor takes? */ 1592 /* If we have at least as many bytes as the descriptor takes? */
1615 length = _ds->bLength; 1593 length = _ds->bLength;
1616 if (len < length) { 1594 if (len < length) {
1617 FVDBG("descriptor longer then available data"); 1595 pr_vdebug("descriptor longer then available data\n");
1618 return -EINVAL; 1596 return -EINVAL;
1619 } 1597 }
1620 1598
@@ -1622,15 +1600,15 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
1622#define __entity_check_STRING(val) (val) 1600#define __entity_check_STRING(val) (val)
1623#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK) 1601#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1624#define __entity(type, val) do { \ 1602#define __entity(type, val) do { \
1625 FVDBG("entity " #type "(%02x)", (val)); \ 1603 pr_vdebug("entity " #type "(%02x)\n", (val)); \
1626 if (unlikely(!__entity_check_ ##type(val))) { \ 1604 if (unlikely(!__entity_check_ ##type(val))) { \
1627 FVDBG("invalid entity's value"); \ 1605 pr_vdebug("invalid entity's value\n"); \
1628 return -EINVAL; \ 1606 return -EINVAL; \
1629 } \ 1607 } \
1630 ret = entity(FFS_ ##type, &val, _ds, priv); \ 1608 ret = entity(FFS_ ##type, &val, _ds, priv); \
1631 if (unlikely(ret < 0)) { \ 1609 if (unlikely(ret < 0)) { \
1632 FDBG("entity " #type "(%02x); ret = %d", \ 1610 pr_debug("entity " #type "(%02x); ret = %d\n", \
1633 (val), ret); \ 1611 (val), ret); \
1634 return ret; \ 1612 return ret; \
1635 } \ 1613 } \
1636 } while (0) 1614 } while (0)
@@ -1642,12 +1620,13 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
1642 case USB_DT_STRING: 1620 case USB_DT_STRING:
1643 case USB_DT_DEVICE_QUALIFIER: 1621 case USB_DT_DEVICE_QUALIFIER:
1644 /* function can't have any of those */ 1622 /* function can't have any of those */
1645 FVDBG("descriptor reserved for gadget: %d", _ds->bDescriptorType); 1623 pr_vdebug("descriptor reserved for gadget: %d\n",
1624 _ds->bDescriptorType);
1646 return -EINVAL; 1625 return -EINVAL;
1647 1626
1648 case USB_DT_INTERFACE: { 1627 case USB_DT_INTERFACE: {
1649 struct usb_interface_descriptor *ds = (void *)_ds; 1628 struct usb_interface_descriptor *ds = (void *)_ds;
1650 FVDBG("interface descriptor"); 1629 pr_vdebug("interface descriptor\n");
1651 if (length != sizeof *ds) 1630 if (length != sizeof *ds)
1652 goto inv_length; 1631 goto inv_length;
1653 1632
@@ -1659,7 +1638,7 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
1659 1638
1660 case USB_DT_ENDPOINT: { 1639 case USB_DT_ENDPOINT: {
1661 struct usb_endpoint_descriptor *ds = (void *)_ds; 1640 struct usb_endpoint_descriptor *ds = (void *)_ds;
1662 FVDBG("endpoint descriptor"); 1641 pr_vdebug("endpoint descriptor\n");
1663 if (length != USB_DT_ENDPOINT_SIZE && 1642 if (length != USB_DT_ENDPOINT_SIZE &&
1664 length != USB_DT_ENDPOINT_AUDIO_SIZE) 1643 length != USB_DT_ENDPOINT_AUDIO_SIZE)
1665 goto inv_length; 1644 goto inv_length;
@@ -1674,7 +1653,7 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
1674 1653
1675 case USB_DT_INTERFACE_ASSOCIATION: { 1654 case USB_DT_INTERFACE_ASSOCIATION: {
1676 struct usb_interface_assoc_descriptor *ds = (void *)_ds; 1655 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
1677 FVDBG("interface association descriptor"); 1656 pr_vdebug("interface association descriptor\n");
1678 if (length != sizeof *ds) 1657 if (length != sizeof *ds)
1679 goto inv_length; 1658 goto inv_length;
1680 if (ds->iFunction) 1659 if (ds->iFunction)
@@ -1688,17 +1667,17 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
1688 case USB_DT_SECURITY: 1667 case USB_DT_SECURITY:
1689 case USB_DT_CS_RADIO_CONTROL: 1668 case USB_DT_CS_RADIO_CONTROL:
1690 /* TODO */ 1669 /* TODO */
1691 FVDBG("unimplemented descriptor: %d", _ds->bDescriptorType); 1670 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
1692 return -EINVAL; 1671 return -EINVAL;
1693 1672
1694 default: 1673 default:
1695 /* We should never be here */ 1674 /* We should never be here */
1696 FVDBG("unknown descriptor: %d", _ds->bDescriptorType); 1675 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
1697 return -EINVAL; 1676 return -EINVAL;
1698 1677
1699 inv_length: 1678inv_length:
1700 FVDBG("invalid length: %d (descriptor %d)", 1679 pr_vdebug("invalid length: %d (descriptor %d)\n",
1701 _ds->bLength, _ds->bDescriptorType); 1680 _ds->bLength, _ds->bDescriptorType);
1702 return -EINVAL; 1681 return -EINVAL;
1703 } 1682 }
1704 1683
@@ -1711,7 +1690,6 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
1711 return length; 1690 return length;
1712} 1691}
1713 1692
1714
1715static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, 1693static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1716 ffs_entity_callback entity, void *priv) 1694 ffs_entity_callback entity, void *priv)
1717{ 1695{
@@ -1726,10 +1704,11 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1726 if (num == count) 1704 if (num == count)
1727 data = NULL; 1705 data = NULL;
1728 1706
1729 /* Record "descriptor" entitny */ 1707 /* Record "descriptor" entity */
1730 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv); 1708 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
1731 if (unlikely(ret < 0)) { 1709 if (unlikely(ret < 0)) {
1732 FDBG("entity DESCRIPTOR(%02lx); ret = %d", num, ret); 1710 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
1711 num, ret);
1733 return ret; 1712 return ret;
1734 } 1713 }
1735 1714
@@ -1738,7 +1717,7 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1738 1717
1739 ret = ffs_do_desc(data, len, entity, priv); 1718 ret = ffs_do_desc(data, len, entity, priv);
1740 if (unlikely(ret < 0)) { 1719 if (unlikely(ret < 0)) {
1741 FDBG("%s returns %d", __func__, ret); 1720 pr_debug("%s returns %d\n", __func__, ret);
1742 return ret; 1721 return ret;
1743 } 1722 }
1744 1723
@@ -1748,7 +1727,6 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1748 } 1727 }
1749} 1728}
1750 1729
1751
1752static int __ffs_data_do_entity(enum ffs_entity_type type, 1730static int __ffs_data_do_entity(enum ffs_entity_type type,
1753 u8 *valuep, struct usb_descriptor_header *desc, 1731 u8 *valuep, struct usb_descriptor_header *desc,
1754 void *priv) 1732 void *priv)
@@ -1762,16 +1740,20 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
1762 break; 1740 break;
1763 1741
1764 case FFS_INTERFACE: 1742 case FFS_INTERFACE:
1765 /* Interfaces are indexed from zero so if we 1743 /*
1744 * Interfaces are indexed from zero so if we
1766 * encountered interface "n" then there are at least 1745 * encountered interface "n" then there are at least
1767 * "n+1" interfaces. */ 1746 * "n+1" interfaces.
1747 */
1768 if (*valuep >= ffs->interfaces_count) 1748 if (*valuep >= ffs->interfaces_count)
1769 ffs->interfaces_count = *valuep + 1; 1749 ffs->interfaces_count = *valuep + 1;
1770 break; 1750 break;
1771 1751
1772 case FFS_STRING: 1752 case FFS_STRING:
1773 /* Strings are indexed from 1 (0 is magic ;) reserved 1753 /*
1774 * for languages list or some such) */ 1754 * Strings are indexed from 1 (0 is magic ;) reserved
1755 * for languages list or some such)
1756 */
1775 if (*valuep > ffs->strings_count) 1757 if (*valuep > ffs->strings_count)
1776 ffs->strings_count = *valuep; 1758 ffs->strings_count = *valuep;
1777 break; 1759 break;
@@ -1786,7 +1768,6 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
1786 return 0; 1768 return 0;
1787} 1769}
1788 1770
1789
1790static int __ffs_data_got_descs(struct ffs_data *ffs, 1771static int __ffs_data_got_descs(struct ffs_data *ffs,
1791 char *const _data, size_t len) 1772 char *const _data, size_t len)
1792{ 1773{
@@ -1849,8 +1830,6 @@ error:
1849 return ret; 1830 return ret;
1850} 1831}
1851 1832
1852
1853
1854static int __ffs_data_got_strings(struct ffs_data *ffs, 1833static int __ffs_data_got_strings(struct ffs_data *ffs,
1855 char *const _data, size_t len) 1834 char *const _data, size_t len)
1856{ 1835{
@@ -1876,17 +1855,17 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
1876 if (unlikely(str_count < needed_count)) 1855 if (unlikely(str_count < needed_count))
1877 goto error; 1856 goto error;
1878 1857
1879 /* If we don't need any strings just return and free all 1858 /*
1880 * memory */ 1859 * If we don't need any strings just return and free all
1860 * memory.
1861 */
1881 if (!needed_count) { 1862 if (!needed_count) {
1882 kfree(_data); 1863 kfree(_data);
1883 return 0; 1864 return 0;
1884 } 1865 }
1885 1866
1886 /* Allocate */ 1867 /* Allocate everything in one chunk so there's less maintenance. */
1887 { 1868 {
1888 /* Allocate everything in one chunk so there's less
1889 * maintanance. */
1890 struct { 1869 struct {
1891 struct usb_gadget_strings *stringtabs[lang_count + 1]; 1870 struct usb_gadget_strings *stringtabs[lang_count + 1];
1892 struct usb_gadget_strings stringtab[lang_count]; 1871 struct usb_gadget_strings stringtab[lang_count];
@@ -1937,13 +1916,17 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
1937 if (unlikely(length == len)) 1916 if (unlikely(length == len))
1938 goto error_free; 1917 goto error_free;
1939 1918
1940 /* user may provide more strings then we need, 1919 /*
1941 * if that's the case we simply ingore the 1920 * User may provide more strings then we need,
1942 * rest */ 1921 * if that's the case we simply ignore the
1922 * rest
1923 */
1943 if (likely(needed)) { 1924 if (likely(needed)) {
1944 /* s->id will be set while adding 1925 /*
1926 * s->id will be set while adding
1945 * function to configuration so for 1927 * function to configuration so for
1946 * now just leave garbage here. */ 1928 * now just leave garbage here.
1929 */
1947 s->s = data; 1930 s->s = data;
1948 --needed; 1931 --needed;
1949 ++s; 1932 ++s;
@@ -1977,8 +1960,6 @@ error:
1977} 1960}
1978 1961
1979 1962
1980
1981
1982/* Events handling and management *******************************************/ 1963/* Events handling and management *******************************************/
1983 1964
1984static void __ffs_event_add(struct ffs_data *ffs, 1965static void __ffs_event_add(struct ffs_data *ffs,
@@ -1987,29 +1968,32 @@ static void __ffs_event_add(struct ffs_data *ffs,
1987 enum usb_functionfs_event_type rem_type1, rem_type2 = type; 1968 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
1988 int neg = 0; 1969 int neg = 0;
1989 1970
1990 /* Abort any unhandled setup */ 1971 /*
1991 /* We do not need to worry about some cmpxchg() changing value 1972 * Abort any unhandled setup
1973 *
1974 * We do not need to worry about some cmpxchg() changing value
1992 * of ffs->setup_state without holding the lock because when 1975 * of ffs->setup_state without holding the lock because when
1993 * state is FFS_SETUP_PENDING cmpxchg() in several places in 1976 * state is FFS_SETUP_PENDING cmpxchg() in several places in
1994 * the source does nothing. */ 1977 * the source does nothing.
1978 */
1995 if (ffs->setup_state == FFS_SETUP_PENDING) 1979 if (ffs->setup_state == FFS_SETUP_PENDING)
1996 ffs->setup_state = FFS_SETUP_CANCELED; 1980 ffs->setup_state = FFS_SETUP_CANCELED;
1997 1981
1998 switch (type) { 1982 switch (type) {
1999 case FUNCTIONFS_RESUME: 1983 case FUNCTIONFS_RESUME:
2000 rem_type2 = FUNCTIONFS_SUSPEND; 1984 rem_type2 = FUNCTIONFS_SUSPEND;
2001 /* FALL THGOUTH */ 1985 /* FALL THROUGH */
2002 case FUNCTIONFS_SUSPEND: 1986 case FUNCTIONFS_SUSPEND:
2003 case FUNCTIONFS_SETUP: 1987 case FUNCTIONFS_SETUP:
2004 rem_type1 = type; 1988 rem_type1 = type;
2005 /* discard all similar events */ 1989 /* Discard all similar events */
2006 break; 1990 break;
2007 1991
2008 case FUNCTIONFS_BIND: 1992 case FUNCTIONFS_BIND:
2009 case FUNCTIONFS_UNBIND: 1993 case FUNCTIONFS_UNBIND:
2010 case FUNCTIONFS_DISABLE: 1994 case FUNCTIONFS_DISABLE:
2011 case FUNCTIONFS_ENABLE: 1995 case FUNCTIONFS_ENABLE:
2012 /* discard everything other then power management. */ 1996 /* Discard everything other then power management. */
2013 rem_type1 = FUNCTIONFS_SUSPEND; 1997 rem_type1 = FUNCTIONFS_SUSPEND;
2014 rem_type2 = FUNCTIONFS_RESUME; 1998 rem_type2 = FUNCTIONFS_RESUME;
2015 neg = 1; 1999 neg = 1;
@@ -2026,11 +2010,11 @@ static void __ffs_event_add(struct ffs_data *ffs,
2026 if ((*ev == rem_type1 || *ev == rem_type2) == neg) 2010 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2027 *out++ = *ev; 2011 *out++ = *ev;
2028 else 2012 else
2029 FVDBG("purging event %d", *ev); 2013 pr_vdebug("purging event %d\n", *ev);
2030 ffs->ev.count = out - ffs->ev.types; 2014 ffs->ev.count = out - ffs->ev.types;
2031 } 2015 }
2032 2016
2033 FVDBG("adding event %d", type); 2017 pr_vdebug("adding event %d\n", type);
2034 ffs->ev.types[ffs->ev.count++] = type; 2018 ffs->ev.types[ffs->ev.count++] = type;
2035 wake_up_locked(&ffs->ev.waitq); 2019 wake_up_locked(&ffs->ev.waitq);
2036} 2020}
@@ -2055,8 +2039,10 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2055 struct ffs_function *func = priv; 2039 struct ffs_function *func = priv;
2056 struct ffs_ep *ffs_ep; 2040 struct ffs_ep *ffs_ep;
2057 2041
2058 /* If hs_descriptors is not NULL then we are reading hs 2042 /*
2059 * descriptors now */ 2043 * If hs_descriptors is not NULL then we are reading hs
2044 * descriptors now
2045 */
2060 const int isHS = func->function.hs_descriptors != NULL; 2046 const int isHS = func->function.hs_descriptors != NULL;
2061 unsigned idx; 2047 unsigned idx;
2062 2048
@@ -2075,9 +2061,9 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2075 ffs_ep = func->eps + idx; 2061 ffs_ep = func->eps + idx;
2076 2062
2077 if (unlikely(ffs_ep->descs[isHS])) { 2063 if (unlikely(ffs_ep->descs[isHS])) {
2078 FVDBG("two %sspeed descriptors for EP %d", 2064 pr_vdebug("two %sspeed descriptors for EP %d\n",
2079 isHS ? "high" : "full", 2065 isHS ? "high" : "full",
2080 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 2066 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2081 return -EINVAL; 2067 return -EINVAL;
2082 } 2068 }
2083 ffs_ep->descs[isHS] = ds; 2069 ffs_ep->descs[isHS] = ds;
@@ -2091,11 +2077,11 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2091 struct usb_request *req; 2077 struct usb_request *req;
2092 struct usb_ep *ep; 2078 struct usb_ep *ep;
2093 2079
2094 FVDBG("autoconfig"); 2080 pr_vdebug("autoconfig\n");
2095 ep = usb_ep_autoconfig(func->gadget, ds); 2081 ep = usb_ep_autoconfig(func->gadget, ds);
2096 if (unlikely(!ep)) 2082 if (unlikely(!ep))
2097 return -ENOTSUPP; 2083 return -ENOTSUPP;
2098 ep->driver_data = func->eps + idx;; 2084 ep->driver_data = func->eps + idx;
2099 2085
2100 req = usb_ep_alloc_request(ep, GFP_KERNEL); 2086 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2101 if (unlikely(!req)) 2087 if (unlikely(!req))
@@ -2111,7 +2097,6 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2111 return 0; 2097 return 0;
2112} 2098}
2113 2099
2114
2115static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep, 2100static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2116 struct usb_descriptor_header *desc, 2101 struct usb_descriptor_header *desc,
2117 void *priv) 2102 void *priv)
@@ -2143,8 +2128,10 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2143 break; 2128 break;
2144 2129
2145 case FFS_ENDPOINT: 2130 case FFS_ENDPOINT:
2146 /* USB_DT_ENDPOINT are handled in 2131 /*
2147 * __ffs_func_bind_do_descs(). */ 2132 * USB_DT_ENDPOINT are handled in
2133 * __ffs_func_bind_do_descs().
2134 */
2148 if (desc->bDescriptorType == USB_DT_ENDPOINT) 2135 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2149 return 0; 2136 return 0;
2150 2137
@@ -2160,7 +2147,7 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2160 break; 2147 break;
2161 } 2148 }
2162 2149
2163 FVDBG("%02x -> %02x", *valuep, newValue); 2150 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2164 *valuep = newValue; 2151 *valuep = newValue;
2165 return 0; 2152 return 0;
2166} 2153}
@@ -2211,9 +2198,11 @@ static int ffs_func_bind(struct usb_configuration *c,
2211 func->eps = data->eps; 2198 func->eps = data->eps;
2212 func->interfaces_nums = data->inums; 2199 func->interfaces_nums = data->inums;
2213 2200
2214 /* Go throught all the endpoint descriptors and allocate 2201 /*
2202 * Go through all the endpoint descriptors and allocate
2215 * endpoints first, so that later we can rewrite the endpoint 2203 * endpoints first, so that later we can rewrite the endpoint
2216 * numbers without worying that it may be described later on. */ 2204 * numbers without worrying that it may be described later on.
2205 */
2217 if (likely(full)) { 2206 if (likely(full)) {
2218 func->function.descriptors = data->fs_descs; 2207 func->function.descriptors = data->fs_descs;
2219 ret = ffs_do_descs(ffs->fs_descs_count, 2208 ret = ffs_do_descs(ffs->fs_descs_count,
@@ -2234,9 +2223,11 @@ static int ffs_func_bind(struct usb_configuration *c,
2234 __ffs_func_bind_do_descs, func); 2223 __ffs_func_bind_do_descs, func);
2235 } 2224 }
2236 2225
2237 /* Now handle interface numbers allocation and interface and 2226 /*
2238 * enpoint numbers rewritting. We can do that in one go 2227 * Now handle interface numbers allocation and interface and
2239 * now. */ 2228 * endpoint numbers rewriting. We can do that in one go
2229 * now.
2230 */
2240 ret = ffs_do_descs(ffs->fs_descs_count + 2231 ret = ffs_do_descs(ffs->fs_descs_count +
2241 (high ? ffs->hs_descs_count : 0), 2232 (high ? ffs->hs_descs_count : 0),
2242 data->raw_descs, sizeof data->raw_descs, 2233 data->raw_descs, sizeof data->raw_descs,
@@ -2274,7 +2265,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
2274 ffs_func_free(func); 2265 ffs_func_free(func);
2275} 2266}
2276 2267
2277
2278static int ffs_func_set_alt(struct usb_function *f, 2268static int ffs_func_set_alt(struct usb_function *f,
2279 unsigned interface, unsigned alt) 2269 unsigned interface, unsigned alt)
2280{ 2270{
@@ -2322,20 +2312,21 @@ static int ffs_func_setup(struct usb_function *f,
2322 2312
2323 ENTER(); 2313 ENTER();
2324 2314
2325 FVDBG("creq->bRequestType = %02x", creq->bRequestType); 2315 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
2326 FVDBG("creq->bRequest = %02x", creq->bRequest); 2316 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
2327 FVDBG("creq->wValue = %04x", le16_to_cpu(creq->wValue)); 2317 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
2328 FVDBG("creq->wIndex = %04x", le16_to_cpu(creq->wIndex)); 2318 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
2329 FVDBG("creq->wLength = %04x", le16_to_cpu(creq->wLength)); 2319 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
2330 2320
2331 /* Most requests directed to interface go throught here 2321 /*
2322 * Most requests directed to interface go through here
2332 * (notable exceptions are set/get interface) so we need to 2323 * (notable exceptions are set/get interface) so we need to
2333 * handle them. All other either handled by composite or 2324 * handle them. All other either handled by composite or
2334 * passed to usb_configuration->setup() (if one is set). No 2325 * passed to usb_configuration->setup() (if one is set). No
2335 * matter, we will handle requests directed to endpoint here 2326 * matter, we will handle requests directed to endpoint here
2336 * as well (as it's straightforward) but what to do with any 2327 * as well (as it's straightforward) but what to do with any
2337 * other request? */ 2328 * other request?
2338 2329 */
2339 if (ffs->state != FFS_ACTIVE) 2330 if (ffs->state != FFS_ACTIVE)
2340 return -ENODEV; 2331 return -ENODEV;
2341 2332
@@ -2378,8 +2369,7 @@ static void ffs_func_resume(struct usb_function *f)
2378} 2369}
2379 2370
2380 2371
2381 2372/* Endpoint and interface numbers reverse mapping ***************************/
2382/* Enpoint and interface numbers reverse mapping ****************************/
2383 2373
2384static int ffs_func_revmap_ep(struct ffs_function *func, u8 num) 2374static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
2385{ 2375{
@@ -2410,7 +2400,6 @@ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
2410 : mutex_lock_interruptible(mutex); 2400 : mutex_lock_interruptible(mutex);
2411} 2401}
2412 2402
2413
2414static char *ffs_prepare_buffer(const char * __user buf, size_t len) 2403static char *ffs_prepare_buffer(const char * __user buf, size_t len)
2415{ 2404{
2416 char *data; 2405 char *data;
@@ -2427,7 +2416,7 @@ static char *ffs_prepare_buffer(const char * __user buf, size_t len)
2427 return ERR_PTR(-EFAULT); 2416 return ERR_PTR(-EFAULT);
2428 } 2417 }
2429 2418
2430 FVDBG("Buffer from user space:"); 2419 pr_vdebug("Buffer from user space:\n");
2431 ffs_dump_mem("", data, len); 2420 ffs_dump_mem("", data, len);
2432 2421
2433 return data; 2422 return data;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 838286b1cd14..b5dbb2308f56 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -37,7 +37,6 @@
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */ 38 */
39 39
40
41/* 40/*
42 * The Mass Storage Function acts as a USB Mass Storage device, 41 * The Mass Storage Function acts as a USB Mass Storage device,
43 * appearing to the host as a disk drive or as a CD-ROM drive. In 42 * appearing to the host as a disk drive or as a CD-ROM drive. In
@@ -185,7 +184,6 @@
185 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>. 184 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
186 */ 185 */
187 186
188
189/* 187/*
190 * Driver Design 188 * Driver Design
191 * 189 *
@@ -275,7 +273,6 @@
275/* #define VERBOSE_DEBUG */ 273/* #define VERBOSE_DEBUG */
276/* #define DUMP_MSGS */ 274/* #define DUMP_MSGS */
277 275
278
279#include <linux/blkdev.h> 276#include <linux/blkdev.h>
280#include <linux/completion.h> 277#include <linux/completion.h>
281#include <linux/dcache.h> 278#include <linux/dcache.h>
@@ -300,7 +297,6 @@
300#include "gadget_chips.h" 297#include "gadget_chips.h"
301 298
302 299
303
304/*------------------------------------------------------------------------*/ 300/*------------------------------------------------------------------------*/
305 301
306#define FSG_DRIVER_DESC "Mass Storage Function" 302#define FSG_DRIVER_DESC "Mass Storage Function"
@@ -308,7 +304,6 @@
308 304
309static const char fsg_string_interface[] = "Mass Storage"; 305static const char fsg_string_interface[] = "Mass Storage";
310 306
311
312#define FSG_NO_INTR_EP 1 307#define FSG_NO_INTR_EP 1
313#define FSG_NO_DEVICE_STRINGS 1 308#define FSG_NO_DEVICE_STRINGS 1
314#define FSG_NO_OTG 1 309#define FSG_NO_OTG 1
@@ -324,25 +319,30 @@ struct fsg_common;
324 319
325/* FSF callback functions */ 320/* FSF callback functions */
326struct fsg_operations { 321struct fsg_operations {
327 /* Callback function to call when thread exits. If no 322 /*
323 * Callback function to call when thread exits. If no
328 * callback is set or it returns value lower then zero MSF 324 * callback is set or it returns value lower then zero MSF
329 * will force eject all LUNs it operates on (including those 325 * will force eject all LUNs it operates on (including those
330 * marked as non-removable or with prevent_medium_removal flag 326 * marked as non-removable or with prevent_medium_removal flag
331 * set). */ 327 * set).
328 */
332 int (*thread_exits)(struct fsg_common *common); 329 int (*thread_exits)(struct fsg_common *common);
333 330
334 /* Called prior to ejection. Negative return means error, 331 /*
332 * Called prior to ejection. Negative return means error,
335 * zero means to continue with ejection, positive means not to 333 * zero means to continue with ejection, positive means not to
336 * eject. */ 334 * eject.
335 */
337 int (*pre_eject)(struct fsg_common *common, 336 int (*pre_eject)(struct fsg_common *common,
338 struct fsg_lun *lun, int num); 337 struct fsg_lun *lun, int num);
339 /* Called after ejection. Negative return means error, zero 338 /*
340 * or positive is just a success. */ 339 * Called after ejection. Negative return means error, zero
340 * or positive is just a success.
341 */
341 int (*post_eject)(struct fsg_common *common, 342 int (*post_eject)(struct fsg_common *common,
342 struct fsg_lun *lun, int num); 343 struct fsg_lun *lun, int num);
343}; 344};
344 345
345
346/* Data shared by all the FSG instances. */ 346/* Data shared by all the FSG instances. */
347struct fsg_common { 347struct fsg_common {
348 struct usb_gadget *gadget; 348 struct usb_gadget *gadget;
@@ -398,14 +398,15 @@ struct fsg_common {
398 /* Gadget's private data. */ 398 /* Gadget's private data. */
399 void *private_data; 399 void *private_data;
400 400
401 /* Vendor (8 chars), product (16 chars), release (4 401 /*
402 * hexadecimal digits) and NUL byte */ 402 * Vendor (8 chars), product (16 chars), release (4
403 * hexadecimal digits) and NUL byte
404 */
403 char inquiry_string[8 + 16 + 4 + 1]; 405 char inquiry_string[8 + 16 + 4 + 1];
404 406
405 struct kref ref; 407 struct kref ref;
406}; 408};
407 409
408
409struct fsg_config { 410struct fsg_config {
410 unsigned nluns; 411 unsigned nluns;
411 struct fsg_lun_config { 412 struct fsg_lun_config {
@@ -431,7 +432,6 @@ struct fsg_config {
431 char can_stall; 432 char can_stall;
432}; 433};
433 434
434
435struct fsg_dev { 435struct fsg_dev {
436 struct usb_function function; 436 struct usb_function function;
437 struct usb_gadget *gadget; /* Copy of cdev->gadget */ 437 struct usb_gadget *gadget; /* Copy of cdev->gadget */
@@ -449,7 +449,6 @@ struct fsg_dev {
449 struct usb_ep *bulk_out; 449 struct usb_ep *bulk_out;
450}; 450};
451 451
452
453static inline int __fsg_is_set(struct fsg_common *common, 452static inline int __fsg_is_set(struct fsg_common *common,
454 const char *func, unsigned line) 453 const char *func, unsigned line)
455{ 454{
@@ -462,13 +461,11 @@ static inline int __fsg_is_set(struct fsg_common *common,
462 461
463#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__)) 462#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
464 463
465
466static inline struct fsg_dev *fsg_from_func(struct usb_function *f) 464static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
467{ 465{
468 return container_of(f, struct fsg_dev, function); 466 return container_of(f, struct fsg_dev, function);
469} 467}
470 468
471
472typedef void (*fsg_routine_t)(struct fsg_dev *); 469typedef void (*fsg_routine_t)(struct fsg_dev *);
473 470
474static int exception_in_progress(struct fsg_common *common) 471static int exception_in_progress(struct fsg_common *common)
@@ -478,7 +475,7 @@ static int exception_in_progress(struct fsg_common *common)
478 475
479/* Make bulk-out requests be divisible by the maxpacket size */ 476/* Make bulk-out requests be divisible by the maxpacket size */
480static void set_bulk_out_req_length(struct fsg_common *common, 477static void set_bulk_out_req_length(struct fsg_common *common,
481 struct fsg_buffhd *bh, unsigned int length) 478 struct fsg_buffhd *bh, unsigned int length)
482{ 479{
483 unsigned int rem; 480 unsigned int rem;
484 481
@@ -489,6 +486,7 @@ static void set_bulk_out_req_length(struct fsg_common *common,
489 bh->outreq->length = length; 486 bh->outreq->length = length;
490} 487}
491 488
489
492/*-------------------------------------------------------------------------*/ 490/*-------------------------------------------------------------------------*/
493 491
494static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) 492static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
@@ -519,14 +517,15 @@ static void wakeup_thread(struct fsg_common *common)
519 wake_up_process(common->thread_task); 517 wake_up_process(common->thread_task);
520} 518}
521 519
522
523static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 520static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
524{ 521{
525 unsigned long flags; 522 unsigned long flags;
526 523
527 /* Do nothing if a higher-priority exception is already in progress. 524 /*
525 * Do nothing if a higher-priority exception is already in progress.
528 * If a lower-or-equal priority exception is in progress, preempt it 526 * If a lower-or-equal priority exception is in progress, preempt it
529 * and notify the main thread by sending it a signal. */ 527 * and notify the main thread by sending it a signal.
528 */
530 spin_lock_irqsave(&common->lock, flags); 529 spin_lock_irqsave(&common->lock, flags);
531 if (common->state <= new_state) { 530 if (common->state <= new_state) {
532 common->exception_req_tag = common->ep0_req_tag; 531 common->exception_req_tag = common->ep0_req_tag;
@@ -555,10 +554,10 @@ static int ep0_queue(struct fsg_common *common)
555 return rc; 554 return rc;
556} 555}
557 556
557
558/*-------------------------------------------------------------------------*/ 558/*-------------------------------------------------------------------------*/
559 559
560/* Bulk and interrupt endpoint completion handlers. 560/* Completion handlers. These always run in_irq. */
561 * These always run in_irq. */
562 561
563static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 562static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
564{ 563{
@@ -567,7 +566,7 @@ static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
567 566
568 if (req->status || req->actual != req->length) 567 if (req->status || req->actual != req->length)
569 DBG(common, "%s --> %d, %u/%u\n", __func__, 568 DBG(common, "%s --> %d, %u/%u\n", __func__,
570 req->status, req->actual, req->length); 569 req->status, req->actual, req->length);
571 if (req->status == -ECONNRESET) /* Request was cancelled */ 570 if (req->status == -ECONNRESET) /* Request was cancelled */
572 usb_ep_fifo_flush(ep); 571 usb_ep_fifo_flush(ep);
573 572
@@ -588,8 +587,7 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
588 dump_msg(common, "bulk-out", req->buf, req->actual); 587 dump_msg(common, "bulk-out", req->buf, req->actual);
589 if (req->status || req->actual != bh->bulk_out_intended_length) 588 if (req->status || req->actual != bh->bulk_out_intended_length)
590 DBG(common, "%s --> %d, %u/%u\n", __func__, 589 DBG(common, "%s --> %d, %u/%u\n", __func__,
591 req->status, req->actual, 590 req->status, req->actual, bh->bulk_out_intended_length);
592 bh->bulk_out_intended_length);
593 if (req->status == -ECONNRESET) /* Request was cancelled */ 591 if (req->status == -ECONNRESET) /* Request was cancelled */
594 usb_ep_fifo_flush(ep); 592 usb_ep_fifo_flush(ep);
595 593
@@ -602,13 +600,8 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
602 spin_unlock(&common->lock); 600 spin_unlock(&common->lock);
603} 601}
604 602
605
606/*-------------------------------------------------------------------------*/
607
608/* Ep0 class-specific handlers. These always run in_irq. */
609
610static int fsg_setup(struct usb_function *f, 603static int fsg_setup(struct usb_function *f,
611 const struct usb_ctrlrequest *ctrl) 604 const struct usb_ctrlrequest *ctrl)
612{ 605{
613 struct fsg_dev *fsg = fsg_from_func(f); 606 struct fsg_dev *fsg = fsg_from_func(f);
614 struct usb_request *req = fsg->common->ep0req; 607 struct usb_request *req = fsg->common->ep0req;
@@ -628,8 +621,10 @@ static int fsg_setup(struct usb_function *f,
628 if (w_index != fsg->interface_number || w_value != 0) 621 if (w_index != fsg->interface_number || w_value != 0)
629 return -EDOM; 622 return -EDOM;
630 623
631 /* Raise an exception to stop the current operation 624 /*
632 * and reinitialize our state. */ 625 * Raise an exception to stop the current operation
626 * and reinitialize our state.
627 */
633 DBG(fsg, "bulk reset request\n"); 628 DBG(fsg, "bulk reset request\n");
634 raise_exception(fsg->common, FSG_STATE_RESET); 629 raise_exception(fsg->common, FSG_STATE_RESET);
635 return DELAYED_STATUS; 630 return DELAYED_STATUS;
@@ -641,7 +636,7 @@ static int fsg_setup(struct usb_function *f,
641 if (w_index != fsg->interface_number || w_value != 0) 636 if (w_index != fsg->interface_number || w_value != 0)
642 return -EDOM; 637 return -EDOM;
643 VDBG(fsg, "get max LUN\n"); 638 VDBG(fsg, "get max LUN\n");
644 *(u8 *) req->buf = fsg->common->nluns - 1; 639 *(u8 *)req->buf = fsg->common->nluns - 1;
645 640
646 /* Respond with data/status */ 641 /* Respond with data/status */
647 req->length = min((u16)1, w_length); 642 req->length = min((u16)1, w_length);
@@ -649,8 +644,7 @@ static int fsg_setup(struct usb_function *f,
649 } 644 }
650 645
651 VDBG(fsg, 646 VDBG(fsg,
652 "unknown class-specific control req " 647 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
653 "%02x.%02x v%04x i%04x l%u\n",
654 ctrl->bRequestType, ctrl->bRequest, 648 ctrl->bRequestType, ctrl->bRequest,
655 le16_to_cpu(ctrl->wValue), w_index, w_length); 649 le16_to_cpu(ctrl->wValue), w_index, w_length);
656 return -EOPNOTSUPP; 650 return -EOPNOTSUPP;
@@ -661,11 +655,10 @@ static int fsg_setup(struct usb_function *f,
661 655
662/* All the following routines run in process context */ 656/* All the following routines run in process context */
663 657
664
665/* Use this for bulk or interrupt transfers, not ep0 */ 658/* Use this for bulk or interrupt transfers, not ep0 */
666static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, 659static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
667 struct usb_request *req, int *pbusy, 660 struct usb_request *req, int *pbusy,
668 enum fsg_buffer_state *state) 661 enum fsg_buffer_state *state)
669{ 662{
670 int rc; 663 int rc;
671 664
@@ -683,25 +676,34 @@ static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
683 676
684 /* We can't do much more than wait for a reset */ 677 /* We can't do much more than wait for a reset */
685 678
686 /* Note: currently the net2280 driver fails zero-length 679 /*
687 * submissions if DMA is enabled. */ 680 * Note: currently the net2280 driver fails zero-length
688 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && 681 * submissions if DMA is enabled.
689 req->length == 0)) 682 */
683 if (rc != -ESHUTDOWN &&
684 !(rc == -EOPNOTSUPP && req->length == 0))
690 WARNING(fsg, "error in submission: %s --> %d\n", 685 WARNING(fsg, "error in submission: %s --> %d\n",
691 ep->name, rc); 686 ep->name, rc);
692 } 687 }
693} 688}
694 689
695#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \ 690static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
696 if (fsg_is_set(common)) \ 691{
697 start_transfer((common)->fsg, (common)->fsg->ep_name, \ 692 if (!fsg_is_set(common))
698 req, pbusy, state); \ 693 return false;
699 else 694 start_transfer(common->fsg, common->fsg->bulk_in,
700 695 bh->inreq, &bh->inreq_busy, &bh->state);
701#define START_TRANSFER(common, ep_name, req, pbusy, state) \ 696 return true;
702 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0 697}
703
704 698
699static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
700{
701 if (!fsg_is_set(common))
702 return false;
703 start_transfer(common->fsg, common->fsg->bulk_out,
704 bh->outreq, &bh->outreq_busy, &bh->state);
705 return true;
706}
705 707
706static int sleep_thread(struct fsg_common *common) 708static int sleep_thread(struct fsg_common *common)
707{ 709{
@@ -739,16 +741,20 @@ static int do_read(struct fsg_common *common)
739 unsigned int partial_page; 741 unsigned int partial_page;
740 ssize_t nread; 742 ssize_t nread;
741 743
742 /* Get the starting Logical Block Address and check that it's 744 /*
743 * not too big */ 745 * Get the starting Logical Block Address and check that it's
746 * not too big.
747 */
744 if (common->cmnd[0] == READ_6) 748 if (common->cmnd[0] == READ_6)
745 lba = get_unaligned_be24(&common->cmnd[1]); 749 lba = get_unaligned_be24(&common->cmnd[1]);
746 else { 750 else {
747 lba = get_unaligned_be32(&common->cmnd[2]); 751 lba = get_unaligned_be32(&common->cmnd[2]);
748 752
749 /* We allow DPO (Disable Page Out = don't save data in the 753 /*
754 * We allow DPO (Disable Page Out = don't save data in the
750 * cache) and FUA (Force Unit Access = don't read from the 755 * cache) and FUA (Force Unit Access = don't read from the
751 * cache), but we don't implement them. */ 756 * cache), but we don't implement them.
757 */
752 if ((common->cmnd[1] & ~0x18) != 0) { 758 if ((common->cmnd[1] & ~0x18) != 0) {
753 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 759 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
754 return -EINVAL; 760 return -EINVAL;
@@ -766,22 +772,23 @@ static int do_read(struct fsg_common *common)
766 return -EIO; /* No default reply */ 772 return -EIO; /* No default reply */
767 773
768 for (;;) { 774 for (;;) {
769 775 /*
770 /* Figure out how much we need to read: 776 * Figure out how much we need to read:
771 * Try to read the remaining amount. 777 * Try to read the remaining amount.
772 * But don't read more than the buffer size. 778 * But don't read more than the buffer size.
773 * And don't try to read past the end of the file. 779 * And don't try to read past the end of the file.
774 * Finally, if we're not at a page boundary, don't read past 780 * Finally, if we're not at a page boundary, don't read past
775 * the next page. 781 * the next page.
776 * If this means reading 0 then we were asked to read past 782 * If this means reading 0 then we were asked to read past
777 * the end of file. */ 783 * the end of file.
784 */
778 amount = min(amount_left, FSG_BUFLEN); 785 amount = min(amount_left, FSG_BUFLEN);
779 amount = min((loff_t) amount, 786 amount = min((loff_t)amount,
780 curlun->file_length - file_offset); 787 curlun->file_length - file_offset);
781 partial_page = file_offset & (PAGE_CACHE_SIZE - 1); 788 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
782 if (partial_page > 0) 789 if (partial_page > 0)
783 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE - 790 amount = min(amount, (unsigned int)PAGE_CACHE_SIZE -
784 partial_page); 791 partial_page);
785 792
786 /* Wait for the next buffer to become available */ 793 /* Wait for the next buffer to become available */
787 bh = common->next_buffhd_to_fill; 794 bh = common->next_buffhd_to_fill;
@@ -791,8 +798,10 @@ static int do_read(struct fsg_common *common)
791 return rc; 798 return rc;
792 } 799 }
793 800
794 /* If we were asked to read past the end of file, 801 /*
795 * end with an empty buffer. */ 802 * If we were asked to read past the end of file,
803 * end with an empty buffer.
804 */
796 if (amount == 0) { 805 if (amount == 0) {
797 curlun->sense_data = 806 curlun->sense_data =
798 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 807 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
@@ -806,21 +815,19 @@ static int do_read(struct fsg_common *common)
806 /* Perform the read */ 815 /* Perform the read */
807 file_offset_tmp = file_offset; 816 file_offset_tmp = file_offset;
808 nread = vfs_read(curlun->filp, 817 nread = vfs_read(curlun->filp,
809 (char __user *) bh->buf, 818 (char __user *)bh->buf,
810 amount, &file_offset_tmp); 819 amount, &file_offset_tmp);
811 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 820 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
812 (unsigned long long) file_offset, 821 (unsigned long long)file_offset, (int)nread);
813 (int) nread);
814 if (signal_pending(current)) 822 if (signal_pending(current))
815 return -EINTR; 823 return -EINTR;
816 824
817 if (nread < 0) { 825 if (nread < 0) {
818 LDBG(curlun, "error in file read: %d\n", 826 LDBG(curlun, "error in file read: %d\n", (int)nread);
819 (int) nread);
820 nread = 0; 827 nread = 0;
821 } else if (nread < amount) { 828 } else if (nread < amount) {
822 LDBG(curlun, "partial file read: %d/%u\n", 829 LDBG(curlun, "partial file read: %d/%u\n",
823 (int) nread, amount); 830 (int)nread, amount);
824 nread -= (nread & 511); /* Round down to a block */ 831 nread -= (nread & 511); /* Round down to a block */
825 } 832 }
826 file_offset += nread; 833 file_offset += nread;
@@ -842,10 +849,8 @@ static int do_read(struct fsg_common *common)
842 849
843 /* Send this buffer and go read some more */ 850 /* Send this buffer and go read some more */
844 bh->inreq->zero = 0; 851 bh->inreq->zero = 0;
845 START_TRANSFER_OR(common, bulk_in, bh->inreq, 852 if (!start_in_transfer(common, bh))
846 &bh->inreq_busy, &bh->state) 853 /* Don't know what to do if common->fsg is NULL */
847 /* Don't know what to do if
848 * common->fsg is NULL */
849 return -EIO; 854 return -EIO;
850 common->next_buffhd_to_fill = bh->next; 855 common->next_buffhd_to_fill = bh->next;
851 } 856 }
@@ -877,17 +882,21 @@ static int do_write(struct fsg_common *common)
877 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */ 882 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
878 spin_unlock(&curlun->filp->f_lock); 883 spin_unlock(&curlun->filp->f_lock);
879 884
880 /* Get the starting Logical Block Address and check that it's 885 /*
881 * not too big */ 886 * Get the starting Logical Block Address and check that it's
887 * not too big
888 */
882 if (common->cmnd[0] == WRITE_6) 889 if (common->cmnd[0] == WRITE_6)
883 lba = get_unaligned_be24(&common->cmnd[1]); 890 lba = get_unaligned_be24(&common->cmnd[1]);
884 else { 891 else {
885 lba = get_unaligned_be32(&common->cmnd[2]); 892 lba = get_unaligned_be32(&common->cmnd[2]);
886 893
887 /* We allow DPO (Disable Page Out = don't save data in the 894 /*
895 * We allow DPO (Disable Page Out = don't save data in the
888 * cache) and FUA (Force Unit Access = write directly to the 896 * cache) and FUA (Force Unit Access = write directly to the
889 * medium). We don't implement DPO; we implement FUA by 897 * medium). We don't implement DPO; we implement FUA by
890 * performing synchronous output. */ 898 * performing synchronous output.
899 */
891 if (common->cmnd[1] & ~0x18) { 900 if (common->cmnd[1] & ~0x18) {
892 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 901 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
893 return -EINVAL; 902 return -EINVAL;
@@ -915,7 +924,8 @@ static int do_write(struct fsg_common *common)
915 bh = common->next_buffhd_to_fill; 924 bh = common->next_buffhd_to_fill;
916 if (bh->state == BUF_STATE_EMPTY && get_some_more) { 925 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
917 926
918 /* Figure out how much we want to get: 927 /*
928 * Figure out how much we want to get:
919 * Try to get the remaining amount. 929 * Try to get the remaining amount.
920 * But don't get more than the buffer size. 930 * But don't get more than the buffer size.
921 * And don't try to go past the end of the file. 931 * And don't try to go past the end of the file.
@@ -923,14 +933,15 @@ static int do_write(struct fsg_common *common)
923 * don't go past the next page. 933 * don't go past the next page.
924 * If this means getting 0, then we were asked 934 * If this means getting 0, then we were asked
925 * to write past the end of file. 935 * to write past the end of file.
926 * Finally, round down to a block boundary. */ 936 * Finally, round down to a block boundary.
937 */
927 amount = min(amount_left_to_req, FSG_BUFLEN); 938 amount = min(amount_left_to_req, FSG_BUFLEN);
928 amount = min((loff_t) amount, curlun->file_length - 939 amount = min((loff_t)amount,
929 usb_offset); 940 curlun->file_length - usb_offset);
930 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1); 941 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
931 if (partial_page > 0) 942 if (partial_page > 0)
932 amount = min(amount, 943 amount = min(amount,
933 (unsigned int) PAGE_CACHE_SIZE - partial_page); 944 (unsigned int)PAGE_CACHE_SIZE - partial_page);
934 945
935 if (amount == 0) { 946 if (amount == 0) {
936 get_some_more = 0; 947 get_some_more = 0;
@@ -940,11 +951,13 @@ static int do_write(struct fsg_common *common)
940 curlun->info_valid = 1; 951 curlun->info_valid = 1;
941 continue; 952 continue;
942 } 953 }
943 amount -= (amount & 511); 954 amount -= amount & 511;
944 if (amount == 0) { 955 if (amount == 0) {
945 956
946 /* Why were we were asked to transfer a 957 /*
947 * partial block? */ 958 * Why were we were asked to transfer a
959 * partial block?
960 */
948 get_some_more = 0; 961 get_some_more = 0;
949 continue; 962 continue;
950 } 963 }
@@ -956,15 +969,15 @@ static int do_write(struct fsg_common *common)
956 if (amount_left_to_req == 0) 969 if (amount_left_to_req == 0)
957 get_some_more = 0; 970 get_some_more = 0;
958 971
959 /* amount is always divisible by 512, hence by 972 /*
960 * the bulk-out maxpacket size */ 973 * amount is always divisible by 512, hence by
974 * the bulk-out maxpacket size
975 */
961 bh->outreq->length = amount; 976 bh->outreq->length = amount;
962 bh->bulk_out_intended_length = amount; 977 bh->bulk_out_intended_length = amount;
963 bh->outreq->short_not_ok = 1; 978 bh->outreq->short_not_ok = 1;
964 START_TRANSFER_OR(common, bulk_out, bh->outreq, 979 if (!start_out_transfer(common, bh))
965 &bh->outreq_busy, &bh->state) 980 /* Dunno what to do if common->fsg is NULL */
966 /* Don't know what to do if
967 * common->fsg is NULL */
968 return -EIO; 981 return -EIO;
969 common->next_buffhd_to_fill = bh->next; 982 common->next_buffhd_to_fill = bh->next;
970 continue; 983 continue;
@@ -990,30 +1003,29 @@ static int do_write(struct fsg_common *common)
990 amount = bh->outreq->actual; 1003 amount = bh->outreq->actual;
991 if (curlun->file_length - file_offset < amount) { 1004 if (curlun->file_length - file_offset < amount) {
992 LERROR(curlun, 1005 LERROR(curlun,
993 "write %u @ %llu beyond end %llu\n", 1006 "write %u @ %llu beyond end %llu\n",
994 amount, (unsigned long long) file_offset, 1007 amount, (unsigned long long)file_offset,
995 (unsigned long long) curlun->file_length); 1008 (unsigned long long)curlun->file_length);
996 amount = curlun->file_length - file_offset; 1009 amount = curlun->file_length - file_offset;
997 } 1010 }
998 1011
999 /* Perform the write */ 1012 /* Perform the write */
1000 file_offset_tmp = file_offset; 1013 file_offset_tmp = file_offset;
1001 nwritten = vfs_write(curlun->filp, 1014 nwritten = vfs_write(curlun->filp,
1002 (char __user *) bh->buf, 1015 (char __user *)bh->buf,
1003 amount, &file_offset_tmp); 1016 amount, &file_offset_tmp);
1004 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, 1017 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
1005 (unsigned long long) file_offset, 1018 (unsigned long long)file_offset, (int)nwritten);
1006 (int) nwritten);
1007 if (signal_pending(current)) 1019 if (signal_pending(current))
1008 return -EINTR; /* Interrupted! */ 1020 return -EINTR; /* Interrupted! */
1009 1021
1010 if (nwritten < 0) { 1022 if (nwritten < 0) {
1011 LDBG(curlun, "error in file write: %d\n", 1023 LDBG(curlun, "error in file write: %d\n",
1012 (int) nwritten); 1024 (int)nwritten);
1013 nwritten = 0; 1025 nwritten = 0;
1014 } else if (nwritten < amount) { 1026 } else if (nwritten < amount) {
1015 LDBG(curlun, "partial file write: %d/%u\n", 1027 LDBG(curlun, "partial file write: %d/%u\n",
1016 (int) nwritten, amount); 1028 (int)nwritten, amount);
1017 nwritten -= (nwritten & 511); 1029 nwritten -= (nwritten & 511);
1018 /* Round down to a block */ 1030 /* Round down to a block */
1019 } 1031 }
@@ -1086,16 +1098,20 @@ static int do_verify(struct fsg_common *common)
1086 unsigned int amount; 1098 unsigned int amount;
1087 ssize_t nread; 1099 ssize_t nread;
1088 1100
1089 /* Get the starting Logical Block Address and check that it's 1101 /*
1090 * not too big */ 1102 * Get the starting Logical Block Address and check that it's
1103 * not too big.
1104 */
1091 lba = get_unaligned_be32(&common->cmnd[2]); 1105 lba = get_unaligned_be32(&common->cmnd[2]);
1092 if (lba >= curlun->num_sectors) { 1106 if (lba >= curlun->num_sectors) {
1093 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1107 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1094 return -EINVAL; 1108 return -EINVAL;
1095 } 1109 }
1096 1110
1097 /* We allow DPO (Disable Page Out = don't save data in the 1111 /*
1098 * cache) but we don't implement it. */ 1112 * We allow DPO (Disable Page Out = don't save data in the
1113 * cache) but we don't implement it.
1114 */
1099 if (common->cmnd[1] & ~0x10) { 1115 if (common->cmnd[1] & ~0x10) {
1100 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1116 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1101 return -EINVAL; 1117 return -EINVAL;
@@ -1120,16 +1136,17 @@ static int do_verify(struct fsg_common *common)
1120 1136
1121 /* Just try to read the requested blocks */ 1137 /* Just try to read the requested blocks */
1122 while (amount_left > 0) { 1138 while (amount_left > 0) {
1123 1139 /*
1124 /* Figure out how much we need to read: 1140 * Figure out how much we need to read:
1125 * Try to read the remaining amount, but not more than 1141 * Try to read the remaining amount, but not more than
1126 * the buffer size. 1142 * the buffer size.
1127 * And don't try to read past the end of the file. 1143 * And don't try to read past the end of the file.
1128 * If this means reading 0 then we were asked to read 1144 * If this means reading 0 then we were asked to read
1129 * past the end of file. */ 1145 * past the end of file.
1146 */
1130 amount = min(amount_left, FSG_BUFLEN); 1147 amount = min(amount_left, FSG_BUFLEN);
1131 amount = min((loff_t) amount, 1148 amount = min((loff_t)amount,
1132 curlun->file_length - file_offset); 1149 curlun->file_length - file_offset);
1133 if (amount == 0) { 1150 if (amount == 0) {
1134 curlun->sense_data = 1151 curlun->sense_data =
1135 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1152 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
@@ -1150,13 +1167,12 @@ static int do_verify(struct fsg_common *common)
1150 return -EINTR; 1167 return -EINTR;
1151 1168
1152 if (nread < 0) { 1169 if (nread < 0) {
1153 LDBG(curlun, "error in file verify: %d\n", 1170 LDBG(curlun, "error in file verify: %d\n", (int)nread);
1154 (int) nread);
1155 nread = 0; 1171 nread = 0;
1156 } else if (nread < amount) { 1172 } else if (nread < amount) {
1157 LDBG(curlun, "partial file verify: %d/%u\n", 1173 LDBG(curlun, "partial file verify: %d/%u\n",
1158 (int) nread, amount); 1174 (int)nread, amount);
1159 nread -= (nread & 511); /* Round down to a sector */ 1175 nread -= nread & 511; /* Round down to a sector */
1160 } 1176 }
1161 if (nread == 0) { 1177 if (nread == 0) {
1162 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1178 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
@@ -1198,7 +1214,6 @@ static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1198 return 36; 1214 return 36;
1199} 1215}
1200 1216
1201
1202static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1217static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1203{ 1218{
1204 struct fsg_lun *curlun = common->curlun; 1219 struct fsg_lun *curlun = common->curlun;
@@ -1252,13 +1267,12 @@ static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1252 return 18; 1267 return 18;
1253} 1268}
1254 1269
1255
1256static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh) 1270static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1257{ 1271{
1258 struct fsg_lun *curlun = common->curlun; 1272 struct fsg_lun *curlun = common->curlun;
1259 u32 lba = get_unaligned_be32(&common->cmnd[2]); 1273 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1260 int pmi = common->cmnd[8]; 1274 int pmi = common->cmnd[8];
1261 u8 *buf = (u8 *) bh->buf; 1275 u8 *buf = (u8 *)bh->buf;
1262 1276
1263 /* Check the PMI and LBA fields */ 1277 /* Check the PMI and LBA fields */
1264 if (pmi > 1 || (pmi == 0 && lba != 0)) { 1278 if (pmi > 1 || (pmi == 0 && lba != 0)) {
@@ -1272,13 +1286,12 @@ static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1272 return 8; 1286 return 8;
1273} 1287}
1274 1288
1275
1276static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh) 1289static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1277{ 1290{
1278 struct fsg_lun *curlun = common->curlun; 1291 struct fsg_lun *curlun = common->curlun;
1279 int msf = common->cmnd[1] & 0x02; 1292 int msf = common->cmnd[1] & 0x02;
1280 u32 lba = get_unaligned_be32(&common->cmnd[2]); 1293 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1281 u8 *buf = (u8 *) bh->buf; 1294 u8 *buf = (u8 *)bh->buf;
1282 1295
1283 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */ 1296 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1284 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1297 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
@@ -1295,13 +1308,12 @@ static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1295 return 8; 1308 return 8;
1296} 1309}
1297 1310
1298
1299static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) 1311static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1300{ 1312{
1301 struct fsg_lun *curlun = common->curlun; 1313 struct fsg_lun *curlun = common->curlun;
1302 int msf = common->cmnd[1] & 0x02; 1314 int msf = common->cmnd[1] & 0x02;
1303 int start_track = common->cmnd[6]; 1315 int start_track = common->cmnd[6];
1304 u8 *buf = (u8 *) bh->buf; 1316 u8 *buf = (u8 *)bh->buf;
1305 1317
1306 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ 1318 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1307 start_track > 1) { 1319 start_track > 1) {
@@ -1323,7 +1335,6 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1323 return 20; 1335 return 20;
1324} 1336}
1325 1337
1326
1327static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1338static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1328{ 1339{
1329 struct fsg_lun *curlun = common->curlun; 1340 struct fsg_lun *curlun = common->curlun;
@@ -1348,10 +1359,12 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1348 changeable_values = (pc == 1); 1359 changeable_values = (pc == 1);
1349 all_pages = (page_code == 0x3f); 1360 all_pages = (page_code == 0x3f);
1350 1361
1351 /* Write the mode parameter header. Fixed values are: default 1362 /*
1363 * Write the mode parameter header. Fixed values are: default
1352 * medium type, no cache control (DPOFUA), and no block descriptors. 1364 * medium type, no cache control (DPOFUA), and no block descriptors.
1353 * The only variable value is the WriteProtect bit. We will fill in 1365 * The only variable value is the WriteProtect bit. We will fill in
1354 * the mode data length later. */ 1366 * the mode data length later.
1367 */
1355 memset(buf, 0, 8); 1368 memset(buf, 0, 8);
1356 if (mscmnd == MODE_SENSE) { 1369 if (mscmnd == MODE_SENSE) {
1357 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1370 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
@@ -1365,8 +1378,10 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1365 1378
1366 /* No block descriptors */ 1379 /* No block descriptors */
1367 1380
1368 /* The mode pages, in numerical order. The only page we support 1381 /*
1369 * is the Caching page. */ 1382 * The mode pages, in numerical order. The only page we support
1383 * is the Caching page.
1384 */
1370 if (page_code == 0x08 || all_pages) { 1385 if (page_code == 0x08 || all_pages) {
1371 valid_page = 1; 1386 valid_page = 1;
1372 buf[0] = 0x08; /* Page code */ 1387 buf[0] = 0x08; /* Page code */
@@ -1388,8 +1403,10 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1388 buf += 12; 1403 buf += 12;
1389 } 1404 }
1390 1405
1391 /* Check that a valid page was requested and the mode data length 1406 /*
1392 * isn't too long. */ 1407 * Check that a valid page was requested and the mode data length
1408 * isn't too long.
1409 */
1393 len = buf - buf0; 1410 len = buf - buf0;
1394 if (!valid_page || len > limit) { 1411 if (!valid_page || len > limit) {
1395 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1412 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
@@ -1404,7 +1421,6 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1404 return len; 1421 return len;
1405} 1422}
1406 1423
1407
1408static int do_start_stop(struct fsg_common *common) 1424static int do_start_stop(struct fsg_common *common)
1409{ 1425{
1410 struct fsg_lun *curlun = common->curlun; 1426 struct fsg_lun *curlun = common->curlun;
@@ -1424,8 +1440,10 @@ static int do_start_stop(struct fsg_common *common)
1424 loej = common->cmnd[4] & 0x02; 1440 loej = common->cmnd[4] & 0x02;
1425 start = common->cmnd[4] & 0x01; 1441 start = common->cmnd[4] & 0x01;
1426 1442
1427 /* Our emulation doesn't support mounting; the medium is 1443 /*
1428 * available for use as soon as it is loaded. */ 1444 * Our emulation doesn't support mounting; the medium is
1445 * available for use as soon as it is loaded.
1446 */
1429 if (start) { 1447 if (start) {
1430 if (!fsg_lun_is_open(curlun)) { 1448 if (!fsg_lun_is_open(curlun)) {
1431 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1449 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
@@ -1466,7 +1484,6 @@ static int do_start_stop(struct fsg_common *common)
1466 : 0; 1484 : 0;
1467} 1485}
1468 1486
1469
1470static int do_prevent_allow(struct fsg_common *common) 1487static int do_prevent_allow(struct fsg_common *common)
1471{ 1488{
1472 struct fsg_lun *curlun = common->curlun; 1489 struct fsg_lun *curlun = common->curlun;
@@ -1491,7 +1508,6 @@ static int do_prevent_allow(struct fsg_common *common)
1491 return 0; 1508 return 0;
1492} 1509}
1493 1510
1494
1495static int do_read_format_capacities(struct fsg_common *common, 1511static int do_read_format_capacities(struct fsg_common *common,
1496 struct fsg_buffhd *bh) 1512 struct fsg_buffhd *bh)
1497{ 1513{
@@ -1509,7 +1525,6 @@ static int do_read_format_capacities(struct fsg_common *common,
1509 return 12; 1525 return 12;
1510} 1526}
1511 1527
1512
1513static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) 1528static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1514{ 1529{
1515 struct fsg_lun *curlun = common->curlun; 1530 struct fsg_lun *curlun = common->curlun;
@@ -1591,7 +1606,7 @@ static int pad_with_zeros(struct fsg_dev *fsg)
1591 bh->inreq->length = nsend; 1606 bh->inreq->length = nsend;
1592 bh->inreq->zero = 0; 1607 bh->inreq->zero = 0;
1593 start_transfer(fsg, fsg->bulk_in, bh->inreq, 1608 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1594 &bh->inreq_busy, &bh->state); 1609 &bh->inreq_busy, &bh->state);
1595 bh = fsg->common->next_buffhd_to_fill = bh->next; 1610 bh = fsg->common->next_buffhd_to_fill = bh->next;
1596 fsg->common->usb_amount_left -= nsend; 1611 fsg->common->usb_amount_left -= nsend;
1597 nkeep = 0; 1612 nkeep = 0;
@@ -1617,7 +1632,7 @@ static int throw_away_data(struct fsg_common *common)
1617 1632
1618 /* A short packet or an error ends everything */ 1633 /* A short packet or an error ends everything */
1619 if (bh->outreq->actual != bh->outreq->length || 1634 if (bh->outreq->actual != bh->outreq->length ||
1620 bh->outreq->status != 0) { 1635 bh->outreq->status != 0) {
1621 raise_exception(common, 1636 raise_exception(common,
1622 FSG_STATE_ABORT_BULK_OUT); 1637 FSG_STATE_ABORT_BULK_OUT);
1623 return -EINTR; 1638 return -EINTR;
@@ -1631,15 +1646,15 @@ static int throw_away_data(struct fsg_common *common)
1631 && common->usb_amount_left > 0) { 1646 && common->usb_amount_left > 0) {
1632 amount = min(common->usb_amount_left, FSG_BUFLEN); 1647 amount = min(common->usb_amount_left, FSG_BUFLEN);
1633 1648
1634 /* amount is always divisible by 512, hence by 1649 /*
1635 * the bulk-out maxpacket size */ 1650 * amount is always divisible by 512, hence by
1651 * the bulk-out maxpacket size.
1652 */
1636 bh->outreq->length = amount; 1653 bh->outreq->length = amount;
1637 bh->bulk_out_intended_length = amount; 1654 bh->bulk_out_intended_length = amount;
1638 bh->outreq->short_not_ok = 1; 1655 bh->outreq->short_not_ok = 1;
1639 START_TRANSFER_OR(common, bulk_out, bh->outreq, 1656 if (!start_out_transfer(common, bh))
1640 &bh->outreq_busy, &bh->state) 1657 /* Dunno what to do if common->fsg is NULL */
1641 /* Don't know what to do if
1642 * common->fsg is NULL */
1643 return -EIO; 1658 return -EIO;
1644 common->next_buffhd_to_fill = bh->next; 1659 common->next_buffhd_to_fill = bh->next;
1645 common->usb_amount_left -= amount; 1660 common->usb_amount_left -= amount;
@@ -1654,7 +1669,6 @@ static int throw_away_data(struct fsg_common *common)
1654 return 0; 1669 return 0;
1655} 1670}
1656 1671
1657
1658static int finish_reply(struct fsg_common *common) 1672static int finish_reply(struct fsg_common *common)
1659{ 1673{
1660 struct fsg_buffhd *bh = common->next_buffhd_to_fill; 1674 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
@@ -1664,10 +1678,12 @@ static int finish_reply(struct fsg_common *common)
1664 case DATA_DIR_NONE: 1678 case DATA_DIR_NONE:
1665 break; /* Nothing to send */ 1679 break; /* Nothing to send */
1666 1680
1667 /* If we don't know whether the host wants to read or write, 1681 /*
1682 * If we don't know whether the host wants to read or write,
1668 * this must be CB or CBI with an unknown command. We mustn't 1683 * this must be CB or CBI with an unknown command. We mustn't
1669 * try to send or receive any data. So stall both bulk pipes 1684 * try to send or receive any data. So stall both bulk pipes
1670 * if we can and wait for a reset. */ 1685 * if we can and wait for a reset.
1686 */
1671 case DATA_DIR_UNKNOWN: 1687 case DATA_DIR_UNKNOWN:
1672 if (!common->can_stall) { 1688 if (!common->can_stall) {
1673 /* Nothing */ 1689 /* Nothing */
@@ -1688,18 +1704,18 @@ static int finish_reply(struct fsg_common *common)
1688 /* If there's no residue, simply send the last buffer */ 1704 /* If there's no residue, simply send the last buffer */
1689 } else if (common->residue == 0) { 1705 } else if (common->residue == 0) {
1690 bh->inreq->zero = 0; 1706 bh->inreq->zero = 0;
1691 START_TRANSFER_OR(common, bulk_in, bh->inreq, 1707 if (!start_in_transfer(common, bh))
1692 &bh->inreq_busy, &bh->state)
1693 return -EIO; 1708 return -EIO;
1694 common->next_buffhd_to_fill = bh->next; 1709 common->next_buffhd_to_fill = bh->next;
1695 1710
1696 /* For Bulk-only, if we're allowed to stall then send the 1711 /*
1712 * For Bulk-only, if we're allowed to stall then send the
1697 * short packet and halt the bulk-in endpoint. If we can't 1713 * short packet and halt the bulk-in endpoint. If we can't
1698 * stall, pad out the remaining data with 0's. */ 1714 * stall, pad out the remaining data with 0's.
1715 */
1699 } else if (common->can_stall) { 1716 } else if (common->can_stall) {
1700 bh->inreq->zero = 1; 1717 bh->inreq->zero = 1;
1701 START_TRANSFER_OR(common, bulk_in, bh->inreq, 1718 if (!start_in_transfer(common, bh))
1702 &bh->inreq_busy, &bh->state)
1703 /* Don't know what to do if 1719 /* Don't know what to do if
1704 * common->fsg is NULL */ 1720 * common->fsg is NULL */
1705 rc = -EIO; 1721 rc = -EIO;
@@ -1714,8 +1730,10 @@ static int finish_reply(struct fsg_common *common)
1714 } 1730 }
1715 break; 1731 break;
1716 1732
1717 /* We have processed all we want from the data the host has sent. 1733 /*
1718 * There may still be outstanding bulk-out requests. */ 1734 * We have processed all we want from the data the host has sent.
1735 * There may still be outstanding bulk-out requests.
1736 */
1719 case DATA_DIR_FROM_HOST: 1737 case DATA_DIR_FROM_HOST:
1720 if (common->residue == 0) { 1738 if (common->residue == 0) {
1721 /* Nothing to receive */ 1739 /* Nothing to receive */
@@ -1725,12 +1743,14 @@ static int finish_reply(struct fsg_common *common)
1725 raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1743 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1726 rc = -EINTR; 1744 rc = -EINTR;
1727 1745
1728 /* We haven't processed all the incoming data. Even though 1746 /*
1747 * We haven't processed all the incoming data. Even though
1729 * we may be allowed to stall, doing so would cause a race. 1748 * we may be allowed to stall, doing so would cause a race.
1730 * The controller may already have ACK'ed all the remaining 1749 * The controller may already have ACK'ed all the remaining
1731 * bulk-out packets, in which case the host wouldn't see a 1750 * bulk-out packets, in which case the host wouldn't see a
1732 * STALL. Not realizing the endpoint was halted, it wouldn't 1751 * STALL. Not realizing the endpoint was halted, it wouldn't
1733 * clear the halt -- leading to problems later on. */ 1752 * clear the halt -- leading to problems later on.
1753 */
1734#if 0 1754#if 0
1735 } else if (common->can_stall) { 1755 } else if (common->can_stall) {
1736 if (fsg_is_set(common)) 1756 if (fsg_is_set(common))
@@ -1740,8 +1760,10 @@ static int finish_reply(struct fsg_common *common)
1740 rc = -EINTR; 1760 rc = -EINTR;
1741#endif 1761#endif
1742 1762
1743 /* We can't stall. Read in the excess data and throw it 1763 /*
1744 * all away. */ 1764 * We can't stall. Read in the excess data and throw it
1765 * all away.
1766 */
1745 } else { 1767 } else {
1746 rc = throw_away_data(common); 1768 rc = throw_away_data(common);
1747 } 1769 }
@@ -1750,7 +1772,6 @@ static int finish_reply(struct fsg_common *common)
1750 return rc; 1772 return rc;
1751} 1773}
1752 1774
1753
1754static int send_status(struct fsg_common *common) 1775static int send_status(struct fsg_common *common)
1755{ 1776{
1756 struct fsg_lun *curlun = common->curlun; 1777 struct fsg_lun *curlun = common->curlun;
@@ -1798,8 +1819,7 @@ static int send_status(struct fsg_common *common)
1798 1819
1799 bh->inreq->length = USB_BULK_CS_WRAP_LEN; 1820 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1800 bh->inreq->zero = 0; 1821 bh->inreq->zero = 0;
1801 START_TRANSFER_OR(common, bulk_in, bh->inreq, 1822 if (!start_in_transfer(common, bh))
1802 &bh->inreq_busy, &bh->state)
1803 /* Don't know what to do if common->fsg is NULL */ 1823 /* Don't know what to do if common->fsg is NULL */
1804 return -EIO; 1824 return -EIO;
1805 1825
@@ -1810,11 +1830,13 @@ static int send_status(struct fsg_common *common)
1810 1830
1811/*-------------------------------------------------------------------------*/ 1831/*-------------------------------------------------------------------------*/
1812 1832
1813/* Check whether the command is properly formed and whether its data size 1833/*
1814 * and direction agree with the values we already have. */ 1834 * Check whether the command is properly formed and whether its data size
1835 * and direction agree with the values we already have.
1836 */
1815static int check_command(struct fsg_common *common, int cmnd_size, 1837static int check_command(struct fsg_common *common, int cmnd_size,
1816 enum data_direction data_dir, unsigned int mask, 1838 enum data_direction data_dir, unsigned int mask,
1817 int needs_medium, const char *name) 1839 int needs_medium, const char *name)
1818{ 1840{
1819 int i; 1841 int i;
1820 int lun = common->cmnd[1] >> 5; 1842 int lun = common->cmnd[1] >> 5;
@@ -1825,19 +1847,23 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1825 hdlen[0] = 0; 1847 hdlen[0] = 0;
1826 if (common->data_dir != DATA_DIR_UNKNOWN) 1848 if (common->data_dir != DATA_DIR_UNKNOWN)
1827 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir], 1849 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1828 common->data_size); 1850 common->data_size);
1829 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", 1851 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1830 name, cmnd_size, dirletter[(int) data_dir], 1852 name, cmnd_size, dirletter[(int) data_dir],
1831 common->data_size_from_cmnd, common->cmnd_size, hdlen); 1853 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1832 1854
1833 /* We can't reply at all until we know the correct data direction 1855 /*
1834 * and size. */ 1856 * We can't reply at all until we know the correct data direction
1857 * and size.
1858 */
1835 if (common->data_size_from_cmnd == 0) 1859 if (common->data_size_from_cmnd == 0)
1836 data_dir = DATA_DIR_NONE; 1860 data_dir = DATA_DIR_NONE;
1837 if (common->data_size < common->data_size_from_cmnd) { 1861 if (common->data_size < common->data_size_from_cmnd) {
1838 /* Host data size < Device data size is a phase error. 1862 /*
1863 * Host data size < Device data size is a phase error.
1839 * Carry out the command, but only transfer as much as 1864 * Carry out the command, but only transfer as much as
1840 * we are allowed. */ 1865 * we are allowed.
1866 */
1841 common->data_size_from_cmnd = common->data_size; 1867 common->data_size_from_cmnd = common->data_size;
1842 common->phase_error = 1; 1868 common->phase_error = 1;
1843 } 1869 }
@@ -1845,8 +1871,7 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1845 common->usb_amount_left = common->data_size; 1871 common->usb_amount_left = common->data_size;
1846 1872
1847 /* Conflicting data directions is a phase error */ 1873 /* Conflicting data directions is a phase error */
1848 if (common->data_dir != data_dir 1874 if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
1849 && common->data_size_from_cmnd > 0) {
1850 common->phase_error = 1; 1875 common->phase_error = 1;
1851 return -EINVAL; 1876 return -EINVAL;
1852 } 1877 }
@@ -1854,7 +1879,8 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1854 /* Verify the length of the command itself */ 1879 /* Verify the length of the command itself */
1855 if (cmnd_size != common->cmnd_size) { 1880 if (cmnd_size != common->cmnd_size) {
1856 1881
1857 /* Special case workaround: There are plenty of buggy SCSI 1882 /*
1883 * Special case workaround: There are plenty of buggy SCSI
1858 * implementations. Many have issues with cbw->Length 1884 * implementations. Many have issues with cbw->Length
1859 * field passing a wrong command size. For those cases we 1885 * field passing a wrong command size. For those cases we
1860 * always try to work around the problem by using the length 1886 * always try to work around the problem by using the length
@@ -1896,8 +1922,10 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1896 curlun = NULL; 1922 curlun = NULL;
1897 common->bad_lun_okay = 0; 1923 common->bad_lun_okay = 0;
1898 1924
1899 /* INQUIRY and REQUEST SENSE commands are explicitly allowed 1925 /*
1900 * to use unsupported LUNs; all others may not. */ 1926 * INQUIRY and REQUEST SENSE commands are explicitly allowed
1927 * to use unsupported LUNs; all others may not.
1928 */
1901 if (common->cmnd[0] != INQUIRY && 1929 if (common->cmnd[0] != INQUIRY &&
1902 common->cmnd[0] != REQUEST_SENSE) { 1930 common->cmnd[0] != REQUEST_SENSE) {
1903 DBG(common, "unsupported LUN %d\n", common->lun); 1931 DBG(common, "unsupported LUN %d\n", common->lun);
@@ -1905,11 +1933,13 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1905 } 1933 }
1906 } 1934 }
1907 1935
1908 /* If a unit attention condition exists, only INQUIRY and 1936 /*
1909 * REQUEST SENSE commands are allowed; anything else must fail. */ 1937 * If a unit attention condition exists, only INQUIRY and
1938 * REQUEST SENSE commands are allowed; anything else must fail.
1939 */
1910 if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 1940 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1911 common->cmnd[0] != INQUIRY && 1941 common->cmnd[0] != INQUIRY &&
1912 common->cmnd[0] != REQUEST_SENSE) { 1942 common->cmnd[0] != REQUEST_SENSE) {
1913 curlun->sense_data = curlun->unit_attention_data; 1943 curlun->sense_data = curlun->unit_attention_data;
1914 curlun->unit_attention_data = SS_NO_SENSE; 1944 curlun->unit_attention_data = SS_NO_SENSE;
1915 return -EINVAL; 1945 return -EINVAL;
@@ -1935,7 +1965,6 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1935 return 0; 1965 return 0;
1936} 1966}
1937 1967
1938
1939static int do_scsi_command(struct fsg_common *common) 1968static int do_scsi_command(struct fsg_common *common)
1940{ 1969{
1941 struct fsg_buffhd *bh; 1970 struct fsg_buffhd *bh;
@@ -2123,8 +2152,10 @@ static int do_scsi_command(struct fsg_common *common)
2123 "TEST UNIT READY"); 2152 "TEST UNIT READY");
2124 break; 2153 break;
2125 2154
2126 /* Although optional, this command is used by MS-Windows. We 2155 /*
2127 * support a minimal version: BytChk must be 0. */ 2156 * Although optional, this command is used by MS-Windows. We
2157 * support a minimal version: BytChk must be 0.
2158 */
2128 case VERIFY: 2159 case VERIFY:
2129 common->data_size_from_cmnd = 0; 2160 common->data_size_from_cmnd = 0;
2130 reply = check_command(common, 10, DATA_DIR_NONE, 2161 reply = check_command(common, 10, DATA_DIR_NONE,
@@ -2164,10 +2195,12 @@ static int do_scsi_command(struct fsg_common *common)
2164 reply = do_write(common); 2195 reply = do_write(common);
2165 break; 2196 break;
2166 2197
2167 /* Some mandatory commands that we recognize but don't implement. 2198 /*
2199 * Some mandatory commands that we recognize but don't implement.
2168 * They don't mean much in this setting. It's left as an exercise 2200 * They don't mean much in this setting. It's left as an exercise
2169 * for anyone interested to implement RESERVE and RELEASE in terms 2201 * for anyone interested to implement RESERVE and RELEASE in terms
2170 * of Posix locks. */ 2202 * of Posix locks.
2203 */
2171 case FORMAT_UNIT: 2204 case FORMAT_UNIT:
2172 case RELEASE: 2205 case RELEASE:
2173 case RESERVE: 2206 case RESERVE:
@@ -2195,7 +2228,7 @@ unknown_cmnd:
2195 if (reply == -EINVAL) 2228 if (reply == -EINVAL)
2196 reply = 0; /* Error reply length */ 2229 reply = 0; /* Error reply length */
2197 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) { 2230 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2198 reply = min((u32) reply, common->data_size_from_cmnd); 2231 reply = min((u32)reply, common->data_size_from_cmnd);
2199 bh->inreq->length = reply; 2232 bh->inreq->length = reply;
2200 bh->state = BUF_STATE_FULL; 2233 bh->state = BUF_STATE_FULL;
2201 common->residue -= reply; 2234 common->residue -= reply;
@@ -2225,7 +2258,8 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2225 req->actual, 2258 req->actual,
2226 le32_to_cpu(cbw->Signature)); 2259 le32_to_cpu(cbw->Signature));
2227 2260
2228 /* The Bulk-only spec says we MUST stall the IN endpoint 2261 /*
2262 * The Bulk-only spec says we MUST stall the IN endpoint
2229 * (6.6.1), so it's unavoidable. It also says we must 2263 * (6.6.1), so it's unavoidable. It also says we must
2230 * retain this state until the next reset, but there's 2264 * retain this state until the next reset, but there's
2231 * no way to tell the controller driver it should ignore 2265 * no way to tell the controller driver it should ignore
@@ -2233,7 +2267,8 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2233 * 2267 *
2234 * We aren't required to halt the OUT endpoint; instead 2268 * We aren't required to halt the OUT endpoint; instead
2235 * we can simply accept and discard any data received 2269 * we can simply accept and discard any data received
2236 * until the next reset. */ 2270 * until the next reset.
2271 */
2237 wedge_bulk_in_endpoint(fsg); 2272 wedge_bulk_in_endpoint(fsg);
2238 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2273 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2239 return -EINVAL; 2274 return -EINVAL;
@@ -2246,8 +2281,10 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2246 "cmdlen %u\n", 2281 "cmdlen %u\n",
2247 cbw->Lun, cbw->Flags, cbw->Length); 2282 cbw->Lun, cbw->Flags, cbw->Length);
2248 2283
2249 /* We can do anything we want here, so let's stall the 2284 /*
2250 * bulk pipes if we are allowed to. */ 2285 * We can do anything we want here, so let's stall the
2286 * bulk pipes if we are allowed to.
2287 */
2251 if (common->can_stall) { 2288 if (common->can_stall) {
2252 fsg_set_halt(fsg, fsg->bulk_out); 2289 fsg_set_halt(fsg, fsg->bulk_out);
2253 halt_bulk_in_endpoint(fsg); 2290 halt_bulk_in_endpoint(fsg);
@@ -2270,7 +2307,6 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2270 return 0; 2307 return 0;
2271} 2308}
2272 2309
2273
2274static int get_next_command(struct fsg_common *common) 2310static int get_next_command(struct fsg_common *common)
2275{ 2311{
2276 struct fsg_buffhd *bh; 2312 struct fsg_buffhd *bh;
@@ -2287,14 +2323,15 @@ static int get_next_command(struct fsg_common *common)
2287 /* Queue a request to read a Bulk-only CBW */ 2323 /* Queue a request to read a Bulk-only CBW */
2288 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN); 2324 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
2289 bh->outreq->short_not_ok = 1; 2325 bh->outreq->short_not_ok = 1;
2290 START_TRANSFER_OR(common, bulk_out, bh->outreq, 2326 if (!start_out_transfer(common, bh))
2291 &bh->outreq_busy, &bh->state)
2292 /* Don't know what to do if common->fsg is NULL */ 2327 /* Don't know what to do if common->fsg is NULL */
2293 return -EIO; 2328 return -EIO;
2294 2329
2295 /* We will drain the buffer in software, which means we 2330 /*
2331 * We will drain the buffer in software, which means we
2296 * can reuse it for the next filling. No need to advance 2332 * can reuse it for the next filling. No need to advance
2297 * next_buffhd_to_fill. */ 2333 * next_buffhd_to_fill.
2334 */
2298 2335
2299 /* Wait for the CBW to arrive */ 2336 /* Wait for the CBW to arrive */
2300 while (bh->state != BUF_STATE_FULL) { 2337 while (bh->state != BUF_STATE_FULL) {
@@ -2425,7 +2462,6 @@ reset:
2425 2462
2426/****************************** ALT CONFIGS ******************************/ 2463/****************************** ALT CONFIGS ******************************/
2427 2464
2428
2429static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2465static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2430{ 2466{
2431 struct fsg_dev *fsg = fsg_from_func(f); 2467 struct fsg_dev *fsg = fsg_from_func(f);
@@ -2453,8 +2489,10 @@ static void handle_exception(struct fsg_common *common)
2453 struct fsg_lun *curlun; 2489 struct fsg_lun *curlun;
2454 unsigned int exception_req_tag; 2490 unsigned int exception_req_tag;
2455 2491
2456 /* Clear the existing signals. Anything but SIGUSR1 is converted 2492 /*
2457 * into a high-priority EXIT exception. */ 2493 * Clear the existing signals. Anything but SIGUSR1 is converted
2494 * into a high-priority EXIT exception.
2495 */
2458 for (;;) { 2496 for (;;) {
2459 int sig = 2497 int sig =
2460 dequeue_signal_lock(current, &current->blocked, &info); 2498 dequeue_signal_lock(current, &current->blocked, &info);
@@ -2498,8 +2536,10 @@ static void handle_exception(struct fsg_common *common)
2498 usb_ep_fifo_flush(common->fsg->bulk_out); 2536 usb_ep_fifo_flush(common->fsg->bulk_out);
2499 } 2537 }
2500 2538
2501 /* Reset the I/O buffer states and pointers, the SCSI 2539 /*
2502 * state, and the exception. Then invoke the handler. */ 2540 * Reset the I/O buffer states and pointers, the SCSI
2541 * state, and the exception. Then invoke the handler.
2542 */
2503 spin_lock_irq(&common->lock); 2543 spin_lock_irq(&common->lock);
2504 2544
2505 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2545 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
@@ -2537,9 +2577,11 @@ static void handle_exception(struct fsg_common *common)
2537 break; 2577 break;
2538 2578
2539 case FSG_STATE_RESET: 2579 case FSG_STATE_RESET:
2540 /* In case we were forced against our will to halt a 2580 /*
2581 * In case we were forced against our will to halt a
2541 * bulk endpoint, clear the halt now. (The SuperH UDC 2582 * bulk endpoint, clear the halt now. (The SuperH UDC
2542 * requires this.) */ 2583 * requires this.)
2584 */
2543 if (!fsg_is_set(common)) 2585 if (!fsg_is_set(common))
2544 break; 2586 break;
2545 if (test_and_clear_bit(IGNORE_BULK_OUT, 2587 if (test_and_clear_bit(IGNORE_BULK_OUT,
@@ -2549,9 +2591,11 @@ static void handle_exception(struct fsg_common *common)
2549 if (common->ep0_req_tag == exception_req_tag) 2591 if (common->ep0_req_tag == exception_req_tag)
2550 ep0_queue(common); /* Complete the status stage */ 2592 ep0_queue(common); /* Complete the status stage */
2551 2593
2552 /* Technically this should go here, but it would only be 2594 /*
2595 * Technically this should go here, but it would only be
2553 * a waste of time. Ditto for the INTERFACE_CHANGE and 2596 * a waste of time. Ditto for the INTERFACE_CHANGE and
2554 * CONFIG_CHANGE cases. */ 2597 * CONFIG_CHANGE cases.
2598 */
2555 /* for (i = 0; i < common->nluns; ++i) */ 2599 /* for (i = 0; i < common->nluns; ++i) */
2556 /* common->luns[i].unit_attention_data = */ 2600 /* common->luns[i].unit_attention_data = */
2557 /* SS_RESET_OCCURRED; */ 2601 /* SS_RESET_OCCURRED; */
@@ -2586,8 +2630,10 @@ static int fsg_main_thread(void *common_)
2586{ 2630{
2587 struct fsg_common *common = common_; 2631 struct fsg_common *common = common_;
2588 2632
2589 /* Allow the thread to be killed by a signal, but set the signal mask 2633 /*
2590 * to block everything but INT, TERM, KILL, and USR1. */ 2634 * Allow the thread to be killed by a signal, but set the signal mask
2635 * to block everything but INT, TERM, KILL, and USR1.
2636 */
2591 allow_signal(SIGINT); 2637 allow_signal(SIGINT);
2592 allow_signal(SIGTERM); 2638 allow_signal(SIGTERM);
2593 allow_signal(SIGKILL); 2639 allow_signal(SIGKILL);
@@ -2596,9 +2642,11 @@ static int fsg_main_thread(void *common_)
2596 /* Allow the thread to be frozen */ 2642 /* Allow the thread to be frozen */
2597 set_freezable(); 2643 set_freezable();
2598 2644
2599 /* Arrange for userspace references to be interpreted as kernel 2645 /*
2646 * Arrange for userspace references to be interpreted as kernel
2600 * pointers. That way we can pass a kernel pointer to a routine 2647 * pointers. That way we can pass a kernel pointer to a routine
2601 * that expects a __user pointer and it will work okay. */ 2648 * that expects a __user pointer and it will work okay.
2649 */
2602 set_fs(get_ds()); 2650 set_fs(get_ds());
2603 2651
2604 /* The main loop */ 2652 /* The main loop */
@@ -2658,7 +2706,7 @@ static int fsg_main_thread(void *common_)
2658 up_write(&common->filesem); 2706 up_write(&common->filesem);
2659 } 2707 }
2660 2708
2661 /* Let the unbind and cleanup routines know the thread has exited */ 2709 /* Let fsg_unbind() know the thread has exited */
2662 complete_and_exit(&common->thread_notifier, 0); 2710 complete_and_exit(&common->thread_notifier, 0);
2663} 2711}
2664 2712
@@ -2690,7 +2738,6 @@ static inline void fsg_common_put(struct fsg_common *common)
2690 kref_put(&common->ref, fsg_common_release); 2738 kref_put(&common->ref, fsg_common_release);
2691} 2739}
2692 2740
2693
2694static struct fsg_common *fsg_common_init(struct fsg_common *common, 2741static struct fsg_common *fsg_common_init(struct fsg_common *common,
2695 struct usb_composite_dev *cdev, 2742 struct usb_composite_dev *cdev,
2696 struct fsg_config *cfg) 2743 struct fsg_config *cfg)
@@ -2736,8 +2783,10 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2736 fsg_intf_desc.iInterface = rc; 2783 fsg_intf_desc.iInterface = rc;
2737 } 2784 }
2738 2785
2739 /* Create the LUNs, open their backing files, and register the 2786 /*
2740 * LUN devices in sysfs. */ 2787 * Create the LUNs, open their backing files, and register the
2788 * LUN devices in sysfs.
2789 */
2741 curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL); 2790 curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
2742 if (unlikely(!curlun)) { 2791 if (unlikely(!curlun)) {
2743 rc = -ENOMEM; 2792 rc = -ENOMEM;
@@ -2765,6 +2814,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2765 if (rc) { 2814 if (rc) {
2766 INFO(common, "failed to register LUN%d: %d\n", i, rc); 2815 INFO(common, "failed to register LUN%d: %d\n", i, rc);
2767 common->nluns = i; 2816 common->nluns = i;
2817 put_device(&curlun->dev);
2768 goto error_release; 2818 goto error_release;
2769 } 2819 }
2770 2820
@@ -2790,7 +2840,6 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2790 } 2840 }
2791 common->nluns = nluns; 2841 common->nluns = nluns;
2792 2842
2793
2794 /* Data buffers cyclic list */ 2843 /* Data buffers cyclic list */
2795 bh = common->buffhds; 2844 bh = common->buffhds;
2796 i = FSG_NUM_BUFFERS; 2845 i = FSG_NUM_BUFFERS;
@@ -2807,7 +2856,6 @@ buffhds_first_it:
2807 } while (--i); 2856 } while (--i);
2808 bh->next = common->buffhds; 2857 bh->next = common->buffhds;
2809 2858
2810
2811 /* Prepare inquiryString */ 2859 /* Prepare inquiryString */
2812 if (cfg->release != 0xffff) { 2860 if (cfg->release != 0xffff) {
2813 i = cfg->release; 2861 i = cfg->release;
@@ -2821,41 +2869,35 @@ buffhds_first_it:
2821 i = 0x0399; 2869 i = 0x0399;
2822 } 2870 }
2823 } 2871 }
2824#define OR(x, y) ((x) ? (x) : (y))
2825 snprintf(common->inquiry_string, sizeof common->inquiry_string, 2872 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2826 "%-8s%-16s%04x", 2873 "%-8s%-16s%04x", cfg->vendor_name ?: "Linux",
2827 OR(cfg->vendor_name, "Linux "),
2828 /* Assume product name dependent on the first LUN */ 2874 /* Assume product name dependent on the first LUN */
2829 OR(cfg->product_name, common->luns->cdrom 2875 cfg->product_name ?: (common->luns->cdrom
2830 ? "File-Stor Gadget" 2876 ? "File-Stor Gadget"
2831 : "File-CD Gadget "), 2877 : "File-CD Gadget"),
2832 i); 2878 i);
2833 2879
2834 2880 /*
2835 /* Some peripheral controllers are known not to be able to 2881 * Some peripheral controllers are known not to be able to
2836 * halt bulk endpoints correctly. If one of them is present, 2882 * halt bulk endpoints correctly. If one of them is present,
2837 * disable stalls. 2883 * disable stalls.
2838 */ 2884 */
2839 common->can_stall = cfg->can_stall && 2885 common->can_stall = cfg->can_stall &&
2840 !(gadget_is_at91(common->gadget)); 2886 !(gadget_is_at91(common->gadget));
2841 2887
2842
2843 spin_lock_init(&common->lock); 2888 spin_lock_init(&common->lock);
2844 kref_init(&common->ref); 2889 kref_init(&common->ref);
2845 2890
2846
2847 /* Tell the thread to start working */ 2891 /* Tell the thread to start working */
2848 common->thread_task = 2892 common->thread_task =
2849 kthread_create(fsg_main_thread, common, 2893 kthread_create(fsg_main_thread, common,
2850 OR(cfg->thread_name, "file-storage")); 2894 cfg->thread_name ?: "file-storage");
2851 if (IS_ERR(common->thread_task)) { 2895 if (IS_ERR(common->thread_task)) {
2852 rc = PTR_ERR(common->thread_task); 2896 rc = PTR_ERR(common->thread_task);
2853 goto error_release; 2897 goto error_release;
2854 } 2898 }
2855 init_completion(&common->thread_notifier); 2899 init_completion(&common->thread_notifier);
2856 init_waitqueue_head(&common->fsg_wait); 2900 init_waitqueue_head(&common->fsg_wait);
2857#undef OR
2858
2859 2901
2860 /* Information */ 2902 /* Information */
2861 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n"); 2903 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
@@ -2889,18 +2931,15 @@ buffhds_first_it:
2889 2931
2890 return common; 2932 return common;
2891 2933
2892
2893error_luns: 2934error_luns:
2894 common->nluns = i + 1; 2935 common->nluns = i + 1;
2895error_release: 2936error_release:
2896 common->state = FSG_STATE_TERMINATED; /* The thread is dead */ 2937 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
2897 /* Call fsg_common_release() directly, ref might be not 2938 /* Call fsg_common_release() directly, ref might be not initialised. */
2898 * initialised */
2899 fsg_common_release(&common->ref); 2939 fsg_common_release(&common->ref);
2900 return ERR_PTR(rc); 2940 return ERR_PTR(rc);
2901} 2941}
2902 2942
2903
2904static void fsg_common_release(struct kref *ref) 2943static void fsg_common_release(struct kref *ref)
2905{ 2944{
2906 struct fsg_common *common = container_of(ref, struct fsg_common, ref); 2945 struct fsg_common *common = container_of(ref, struct fsg_common, ref);
@@ -2909,9 +2948,6 @@ static void fsg_common_release(struct kref *ref)
2909 if (common->state != FSG_STATE_TERMINATED) { 2948 if (common->state != FSG_STATE_TERMINATED) {
2910 raise_exception(common, FSG_STATE_EXIT); 2949 raise_exception(common, FSG_STATE_EXIT);
2911 wait_for_completion(&common->thread_notifier); 2950 wait_for_completion(&common->thread_notifier);
2912
2913 /* The cleanup routine waits for this completion also */
2914 complete(&common->thread_notifier);
2915 } 2951 }
2916 2952
2917 if (likely(common->luns)) { 2953 if (likely(common->luns)) {
@@ -2945,7 +2981,6 @@ static void fsg_common_release(struct kref *ref)
2945 2981
2946/*-------------------------------------------------------------------------*/ 2982/*-------------------------------------------------------------------------*/
2947 2983
2948
2949static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) 2984static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2950{ 2985{
2951 struct fsg_dev *fsg = fsg_from_func(f); 2986 struct fsg_dev *fsg = fsg_from_func(f);
@@ -2965,7 +3000,6 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2965 kfree(fsg); 3000 kfree(fsg);
2966} 3001}
2967 3002
2968
2969static int fsg_bind(struct usb_configuration *c, struct usb_function *f) 3003static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2970{ 3004{
2971 struct fsg_dev *fsg = fsg_from_func(f); 3005 struct fsg_dev *fsg = fsg_from_func(f);
@@ -3048,11 +3082,13 @@ static int fsg_bind_config(struct usb_composite_dev *cdev,
3048 fsg->function.disable = fsg_disable; 3082 fsg->function.disable = fsg_disable;
3049 3083
3050 fsg->common = common; 3084 fsg->common = common;
3051 /* Our caller holds a reference to common structure so we 3085 /*
3086 * Our caller holds a reference to common structure so we
3052 * don't have to be worry about it being freed until we return 3087 * don't have to be worry about it being freed until we return
3053 * from this function. So instead of incrementing counter now 3088 * from this function. So instead of incrementing counter now
3054 * and decrement in error recovery we increment it only when 3089 * and decrement in error recovery we increment it only when
3055 * call to usb_add_function() was successful. */ 3090 * call to usb_add_function() was successful.
3091 */
3056 3092
3057 rc = usb_add_function(c, &fsg->function); 3093 rc = usb_add_function(c, &fsg->function);
3058 if (unlikely(rc)) 3094 if (unlikely(rc))
@@ -3063,8 +3099,7 @@ static int fsg_bind_config(struct usb_composite_dev *cdev,
3063} 3099}
3064 3100
3065static inline int __deprecated __maybe_unused 3101static inline int __deprecated __maybe_unused
3066fsg_add(struct usb_composite_dev *cdev, 3102fsg_add(struct usb_composite_dev *cdev, struct usb_configuration *c,
3067 struct usb_configuration *c,
3068 struct fsg_common *common) 3103 struct fsg_common *common)
3069{ 3104{
3070 return fsg_bind_config(cdev, c, common); 3105 return fsg_bind_config(cdev, c, common);
@@ -3073,7 +3108,6 @@ fsg_add(struct usb_composite_dev *cdev,
3073 3108
3074/************************* Module parameters *************************/ 3109/************************* Module parameters *************************/
3075 3110
3076
3077struct fsg_module_parameters { 3111struct fsg_module_parameters {
3078 char *file[FSG_MAX_LUNS]; 3112 char *file[FSG_MAX_LUNS];
3079 int ro[FSG_MAX_LUNS]; 3113 int ro[FSG_MAX_LUNS];
@@ -3087,7 +3121,6 @@ struct fsg_module_parameters {
3087 int stall; /* can_stall */ 3121 int stall; /* can_stall */
3088}; 3122};
3089 3123
3090
3091#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \ 3124#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
3092 module_param_array_named(prefix ## name, params.name, type, \ 3125 module_param_array_named(prefix ## name, params.name, type, \
3093 &prefix ## params.name ## _count, \ 3126 &prefix ## params.name ## _count, \
@@ -3115,7 +3148,6 @@ struct fsg_module_parameters {
3115 _FSG_MODULE_PARAM(prefix, params, stall, bool, \ 3148 _FSG_MODULE_PARAM(prefix, params, stall, bool, \
3116 "false to prevent bulk stalls") 3149 "false to prevent bulk stalls")
3117 3150
3118
3119static void 3151static void
3120fsg_config_from_params(struct fsg_config *cfg, 3152fsg_config_from_params(struct fsg_config *cfg,
3121 const struct fsg_module_parameters *params) 3153 const struct fsg_module_parameters *params)
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
new file mode 100644
index 000000000000..130eee678c8b
--- /dev/null
+++ b/drivers/usb/gadget/f_ncm.c
@@ -0,0 +1,1407 @@
1/*
2 * f_ncm.c -- USB CDC Network (NCM) link function driver
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com>
6 *
7 * The driver borrows from f_ecm.c which is:
8 *
9 * Copyright (C) 2003-2005,2008 David Brownell
10 * Copyright (C) 2008 Nokia Corporation
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/device.h>
29#include <linux/etherdevice.h>
30#include <linux/crc32.h>
31
32#include <linux/usb/cdc.h>
33
34#include "u_ether.h"
35
36/*
37 * This function is a "CDC Network Control Model" (CDC NCM) Ethernet link.
38 * NCM is intended to be used with high-speed network attachments.
39 *
40 * Note that NCM requires the use of "alternate settings" for its data
41 * interface. This means that the set_alt() method has real work to do,
42 * and also means that a get_alt() method is required.
43 */
44
45/* to trigger crc/non-crc ndp signature */
46
47#define NCM_NDP_HDR_CRC_MASK 0x01000000
48#define NCM_NDP_HDR_CRC 0x01000000
49#define NCM_NDP_HDR_NOCRC 0x00000000
50
51struct ncm_ep_descs {
52 struct usb_endpoint_descriptor *in;
53 struct usb_endpoint_descriptor *out;
54 struct usb_endpoint_descriptor *notify;
55};
56
57enum ncm_notify_state {
58 NCM_NOTIFY_NONE, /* don't notify */
59 NCM_NOTIFY_CONNECT, /* issue CONNECT next */
60 NCM_NOTIFY_SPEED, /* issue SPEED_CHANGE next */
61};
62
63struct f_ncm {
64 struct gether port;
65 u8 ctrl_id, data_id;
66
67 char ethaddr[14];
68
69 struct ncm_ep_descs fs;
70 struct ncm_ep_descs hs;
71
72 struct usb_ep *notify;
73 struct usb_endpoint_descriptor *notify_desc;
74 struct usb_request *notify_req;
75 u8 notify_state;
76 bool is_open;
77
78 struct ndp_parser_opts *parser_opts;
79 bool is_crc;
80
81 /*
82 * for notification, it is accessed from both
83 * callback and ethernet open/close
84 */
85 spinlock_t lock;
86};
87
88static inline struct f_ncm *func_to_ncm(struct usb_function *f)
89{
90 return container_of(f, struct f_ncm, port.func);
91}
92
93/* peak (theoretical) bulk transfer rate in bits-per-second */
94static inline unsigned ncm_bitrate(struct usb_gadget *g)
95{
96 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
97 return 13 * 512 * 8 * 1000 * 8;
98 else
99 return 19 * 64 * 1 * 1000 * 8;
100}
101
102/*-------------------------------------------------------------------------*/
103
104/*
105 * We cannot group frames so use just the minimal size which ok to put
106 * one max-size ethernet frame.
107 * If the host can group frames, allow it to do that, 16K is selected,
108 * because it's used by default by the current linux host driver
109 */
110#define NTB_DEFAULT_IN_SIZE USB_CDC_NCM_NTB_MIN_IN_SIZE
111#define NTB_OUT_SIZE 16384
112
113/*
114 * skbs of size less than that will not be alligned
115 * to NCM's dwNtbInMaxSize to save bus bandwidth
116 */
117
118#define MAX_TX_NONFIXED (512 * 3)
119
120#define FORMATS_SUPPORTED (USB_CDC_NCM_NTB16_SUPPORTED | \
121 USB_CDC_NCM_NTB32_SUPPORTED)
122
123static struct usb_cdc_ncm_ntb_parameters ntb_parameters = {
124 .wLength = sizeof ntb_parameters,
125 .bmNtbFormatsSupported = cpu_to_le16(FORMATS_SUPPORTED),
126 .dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE),
127 .wNdpInDivisor = cpu_to_le16(4),
128 .wNdpInPayloadRemainder = cpu_to_le16(0),
129 .wNdpInAlignment = cpu_to_le16(4),
130
131 .dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE),
132 .wNdpOutDivisor = cpu_to_le16(4),
133 .wNdpOutPayloadRemainder = cpu_to_le16(0),
134 .wNdpOutAlignment = cpu_to_le16(4),
135};
136
137/*
138 * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
139 * packet, to simplify cancellation; and a big transfer interval, to
140 * waste less bandwidth.
141 */
142
143#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
144#define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */
145
146static struct usb_interface_assoc_descriptor ncm_iad_desc __initdata = {
147 .bLength = sizeof ncm_iad_desc,
148 .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
149
150 /* .bFirstInterface = DYNAMIC, */
151 .bInterfaceCount = 2, /* control + data */
152 .bFunctionClass = USB_CLASS_COMM,
153 .bFunctionSubClass = USB_CDC_SUBCLASS_NCM,
154 .bFunctionProtocol = USB_CDC_PROTO_NONE,
155 /* .iFunction = DYNAMIC */
156};
157
158/* interface descriptor: */
159
160static struct usb_interface_descriptor ncm_control_intf __initdata = {
161 .bLength = sizeof ncm_control_intf,
162 .bDescriptorType = USB_DT_INTERFACE,
163
164 /* .bInterfaceNumber = DYNAMIC */
165 .bNumEndpoints = 1,
166 .bInterfaceClass = USB_CLASS_COMM,
167 .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
168 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
169 /* .iInterface = DYNAMIC */
170};
171
172static struct usb_cdc_header_desc ncm_header_desc __initdata = {
173 .bLength = sizeof ncm_header_desc,
174 .bDescriptorType = USB_DT_CS_INTERFACE,
175 .bDescriptorSubType = USB_CDC_HEADER_TYPE,
176
177 .bcdCDC = cpu_to_le16(0x0110),
178};
179
180static struct usb_cdc_union_desc ncm_union_desc __initdata = {
181 .bLength = sizeof(ncm_union_desc),
182 .bDescriptorType = USB_DT_CS_INTERFACE,
183 .bDescriptorSubType = USB_CDC_UNION_TYPE,
184 /* .bMasterInterface0 = DYNAMIC */
185 /* .bSlaveInterface0 = DYNAMIC */
186};
187
188static struct usb_cdc_ether_desc ecm_desc __initdata = {
189 .bLength = sizeof ecm_desc,
190 .bDescriptorType = USB_DT_CS_INTERFACE,
191 .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
192
193 /* this descriptor actually adds value, surprise! */
194 /* .iMACAddress = DYNAMIC */
195 .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
196 .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
197 .wNumberMCFilters = cpu_to_le16(0),
198 .bNumberPowerFilters = 0,
199};
200
201#define NCAPS (USB_CDC_NCM_NCAP_ETH_FILTER | USB_CDC_NCM_NCAP_CRC_MODE)
202
203static struct usb_cdc_ncm_desc ncm_desc __initdata = {
204 .bLength = sizeof ncm_desc,
205 .bDescriptorType = USB_DT_CS_INTERFACE,
206 .bDescriptorSubType = USB_CDC_NCM_TYPE,
207
208 .bcdNcmVersion = cpu_to_le16(0x0100),
209 /* can process SetEthernetPacketFilter */
210 .bmNetworkCapabilities = NCAPS,
211};
212
213/* the default data interface has no endpoints ... */
214
215static struct usb_interface_descriptor ncm_data_nop_intf __initdata = {
216 .bLength = sizeof ncm_data_nop_intf,
217 .bDescriptorType = USB_DT_INTERFACE,
218
219 .bInterfaceNumber = 1,
220 .bAlternateSetting = 0,
221 .bNumEndpoints = 0,
222 .bInterfaceClass = USB_CLASS_CDC_DATA,
223 .bInterfaceSubClass = 0,
224 .bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB,
225 /* .iInterface = DYNAMIC */
226};
227
228/* ... but the "real" data interface has two bulk endpoints */
229
230static struct usb_interface_descriptor ncm_data_intf __initdata = {
231 .bLength = sizeof ncm_data_intf,
232 .bDescriptorType = USB_DT_INTERFACE,
233
234 .bInterfaceNumber = 1,
235 .bAlternateSetting = 1,
236 .bNumEndpoints = 2,
237 .bInterfaceClass = USB_CLASS_CDC_DATA,
238 .bInterfaceSubClass = 0,
239 .bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB,
240 /* .iInterface = DYNAMIC */
241};
242
243/* full speed support: */
244
245static struct usb_endpoint_descriptor fs_ncm_notify_desc __initdata = {
246 .bLength = USB_DT_ENDPOINT_SIZE,
247 .bDescriptorType = USB_DT_ENDPOINT,
248
249 .bEndpointAddress = USB_DIR_IN,
250 .bmAttributes = USB_ENDPOINT_XFER_INT,
251 .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
252 .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
253};
254
255static struct usb_endpoint_descriptor fs_ncm_in_desc __initdata = {
256 .bLength = USB_DT_ENDPOINT_SIZE,
257 .bDescriptorType = USB_DT_ENDPOINT,
258
259 .bEndpointAddress = USB_DIR_IN,
260 .bmAttributes = USB_ENDPOINT_XFER_BULK,
261};
262
263static struct usb_endpoint_descriptor fs_ncm_out_desc __initdata = {
264 .bLength = USB_DT_ENDPOINT_SIZE,
265 .bDescriptorType = USB_DT_ENDPOINT,
266
267 .bEndpointAddress = USB_DIR_OUT,
268 .bmAttributes = USB_ENDPOINT_XFER_BULK,
269};
270
271static struct usb_descriptor_header *ncm_fs_function[] __initdata = {
272 (struct usb_descriptor_header *) &ncm_iad_desc,
273 /* CDC NCM control descriptors */
274 (struct usb_descriptor_header *) &ncm_control_intf,
275 (struct usb_descriptor_header *) &ncm_header_desc,
276 (struct usb_descriptor_header *) &ncm_union_desc,
277 (struct usb_descriptor_header *) &ecm_desc,
278 (struct usb_descriptor_header *) &ncm_desc,
279 (struct usb_descriptor_header *) &fs_ncm_notify_desc,
280 /* data interface, altsettings 0 and 1 */
281 (struct usb_descriptor_header *) &ncm_data_nop_intf,
282 (struct usb_descriptor_header *) &ncm_data_intf,
283 (struct usb_descriptor_header *) &fs_ncm_in_desc,
284 (struct usb_descriptor_header *) &fs_ncm_out_desc,
285 NULL,
286};
287
288/* high speed support: */
289
290static struct usb_endpoint_descriptor hs_ncm_notify_desc __initdata = {
291 .bLength = USB_DT_ENDPOINT_SIZE,
292 .bDescriptorType = USB_DT_ENDPOINT,
293
294 .bEndpointAddress = USB_DIR_IN,
295 .bmAttributes = USB_ENDPOINT_XFER_INT,
296 .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
297 .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
298};
299static struct usb_endpoint_descriptor hs_ncm_in_desc __initdata = {
300 .bLength = USB_DT_ENDPOINT_SIZE,
301 .bDescriptorType = USB_DT_ENDPOINT,
302
303 .bEndpointAddress = USB_DIR_IN,
304 .bmAttributes = USB_ENDPOINT_XFER_BULK,
305 .wMaxPacketSize = cpu_to_le16(512),
306};
307
308static struct usb_endpoint_descriptor hs_ncm_out_desc __initdata = {
309 .bLength = USB_DT_ENDPOINT_SIZE,
310 .bDescriptorType = USB_DT_ENDPOINT,
311
312 .bEndpointAddress = USB_DIR_OUT,
313 .bmAttributes = USB_ENDPOINT_XFER_BULK,
314 .wMaxPacketSize = cpu_to_le16(512),
315};
316
317static struct usb_descriptor_header *ncm_hs_function[] __initdata = {
318 (struct usb_descriptor_header *) &ncm_iad_desc,
319 /* CDC NCM control descriptors */
320 (struct usb_descriptor_header *) &ncm_control_intf,
321 (struct usb_descriptor_header *) &ncm_header_desc,
322 (struct usb_descriptor_header *) &ncm_union_desc,
323 (struct usb_descriptor_header *) &ecm_desc,
324 (struct usb_descriptor_header *) &ncm_desc,
325 (struct usb_descriptor_header *) &hs_ncm_notify_desc,
326 /* data interface, altsettings 0 and 1 */
327 (struct usb_descriptor_header *) &ncm_data_nop_intf,
328 (struct usb_descriptor_header *) &ncm_data_intf,
329 (struct usb_descriptor_header *) &hs_ncm_in_desc,
330 (struct usb_descriptor_header *) &hs_ncm_out_desc,
331 NULL,
332};
333
334/* string descriptors: */
335
336#define STRING_CTRL_IDX 0
337#define STRING_MAC_IDX 1
338#define STRING_DATA_IDX 2
339#define STRING_IAD_IDX 3
340
341static struct usb_string ncm_string_defs[] = {
342 [STRING_CTRL_IDX].s = "CDC Network Control Model (NCM)",
343 [STRING_MAC_IDX].s = NULL /* DYNAMIC */,
344 [STRING_DATA_IDX].s = "CDC Network Data",
345 [STRING_IAD_IDX].s = "CDC NCM",
346 { } /* end of list */
347};
348
349static struct usb_gadget_strings ncm_string_table = {
350 .language = 0x0409, /* en-us */
351 .strings = ncm_string_defs,
352};
353
354static struct usb_gadget_strings *ncm_strings[] = {
355 &ncm_string_table,
356 NULL,
357};
358
359/*
360 * Here are options for NCM Datagram Pointer table (NDP) parser.
361 * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3),
362 * in NDP16 offsets and sizes fields are 1 16bit word wide,
363 * in NDP32 -- 2 16bit words wide. Also signatures are different.
364 * To make the parser code the same, put the differences in the structure,
365 * and switch pointers to the structures when the format is changed.
366 */
367
368struct ndp_parser_opts {
369 u32 nth_sign;
370 u32 ndp_sign;
371 unsigned nth_size;
372 unsigned ndp_size;
373 unsigned ndplen_align;
374 /* sizes in u16 units */
375 unsigned dgram_item_len; /* index or length */
376 unsigned block_length;
377 unsigned fp_index;
378 unsigned reserved1;
379 unsigned reserved2;
380 unsigned next_fp_index;
381};
382
383#define INIT_NDP16_OPTS { \
384 .nth_sign = USB_CDC_NCM_NTH16_SIGN, \
385 .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN, \
386 .nth_size = sizeof(struct usb_cdc_ncm_nth16), \
387 .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \
388 .ndplen_align = 4, \
389 .dgram_item_len = 1, \
390 .block_length = 1, \
391 .fp_index = 1, \
392 .reserved1 = 0, \
393 .reserved2 = 0, \
394 .next_fp_index = 1, \
395 }
396
397
398#define INIT_NDP32_OPTS { \
399 .nth_sign = USB_CDC_NCM_NTH32_SIGN, \
400 .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN, \
401 .nth_size = sizeof(struct usb_cdc_ncm_nth32), \
402 .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \
403 .ndplen_align = 8, \
404 .dgram_item_len = 2, \
405 .block_length = 2, \
406 .fp_index = 2, \
407 .reserved1 = 1, \
408 .reserved2 = 2, \
409 .next_fp_index = 2, \
410 }
411
412static struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS;
413static struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS;
414
415static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
416{
417 switch (size) {
418 case 1:
419 put_unaligned_le16((u16)val, *p);
420 break;
421 case 2:
422 put_unaligned_le32((u32)val, *p);
423
424 break;
425 default:
426 BUG();
427 }
428
429 *p += size;
430}
431
432static inline unsigned get_ncm(__le16 **p, unsigned size)
433{
434 unsigned tmp;
435
436 switch (size) {
437 case 1:
438 tmp = get_unaligned_le16(*p);
439 break;
440 case 2:
441 tmp = get_unaligned_le32(*p);
442 break;
443 default:
444 BUG();
445 }
446
447 *p += size;
448 return tmp;
449}
450
451/*-------------------------------------------------------------------------*/
452
453static inline void ncm_reset_values(struct f_ncm *ncm)
454{
455 ncm->parser_opts = &ndp16_opts;
456 ncm->is_crc = false;
457 ncm->port.cdc_filter = DEFAULT_FILTER;
458
459 /* doesn't make sense for ncm, fixed size used */
460 ncm->port.header_len = 0;
461
462 ncm->port.fixed_out_len = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
463 ncm->port.fixed_in_len = NTB_DEFAULT_IN_SIZE;
464}
465
466/*
467 * Context: ncm->lock held
468 */
469static void ncm_do_notify(struct f_ncm *ncm)
470{
471 struct usb_request *req = ncm->notify_req;
472 struct usb_cdc_notification *event;
473 struct usb_composite_dev *cdev = ncm->port.func.config->cdev;
474 __le32 *data;
475 int status;
476
477 /* notification already in flight? */
478 if (!req)
479 return;
480
481 event = req->buf;
482 switch (ncm->notify_state) {
483 case NCM_NOTIFY_NONE:
484 return;
485
486 case NCM_NOTIFY_CONNECT:
487 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
488 if (ncm->is_open)
489 event->wValue = cpu_to_le16(1);
490 else
491 event->wValue = cpu_to_le16(0);
492 event->wLength = 0;
493 req->length = sizeof *event;
494
495 DBG(cdev, "notify connect %s\n",
496 ncm->is_open ? "true" : "false");
497 ncm->notify_state = NCM_NOTIFY_NONE;
498 break;
499
500 case NCM_NOTIFY_SPEED:
501 event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
502 event->wValue = cpu_to_le16(0);
503 event->wLength = cpu_to_le16(8);
504 req->length = NCM_STATUS_BYTECOUNT;
505
506 /* SPEED_CHANGE data is up/down speeds in bits/sec */
507 data = req->buf + sizeof *event;
508 data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
509 data[1] = data[0];
510
511 DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
512 ncm->notify_state = NCM_NOTIFY_CONNECT;
513 break;
514 }
515 event->bmRequestType = 0xA1;
516 event->wIndex = cpu_to_le16(ncm->ctrl_id);
517
518 ncm->notify_req = NULL;
519 /*
520 * In double buffering if there is a space in FIFO,
521 * completion callback can be called right after the call,
522 * so unlocking
523 */
524 spin_unlock(&ncm->lock);
525 status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
526 spin_lock(&ncm->lock);
527 if (status < 0) {
528 ncm->notify_req = req;
529 DBG(cdev, "notify --> %d\n", status);
530 }
531}
532
533/*
534 * Context: ncm->lock held
535 */
536static void ncm_notify(struct f_ncm *ncm)
537{
538 /*
539 * NOTE on most versions of Linux, host side cdc-ethernet
540 * won't listen for notifications until its netdevice opens.
541 * The first notification then sits in the FIFO for a long
542 * time, and the second one is queued.
543 *
544 * If ncm_notify() is called before the second (CONNECT)
545 * notification is sent, then it will reset to send the SPEED
546 * notificaion again (and again, and again), but it's not a problem
547 */
548 ncm->notify_state = NCM_NOTIFY_SPEED;
549 ncm_do_notify(ncm);
550}
551
552static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
553{
554 struct f_ncm *ncm = req->context;
555 struct usb_composite_dev *cdev = ncm->port.func.config->cdev;
556 struct usb_cdc_notification *event = req->buf;
557
558 spin_lock(&ncm->lock);
559 switch (req->status) {
560 case 0:
561 VDBG(cdev, "Notification %02x sent\n",
562 event->bNotificationType);
563 break;
564 case -ECONNRESET:
565 case -ESHUTDOWN:
566 ncm->notify_state = NCM_NOTIFY_NONE;
567 break;
568 default:
569 DBG(cdev, "event %02x --> %d\n",
570 event->bNotificationType, req->status);
571 break;
572 }
573 ncm->notify_req = req;
574 ncm_do_notify(ncm);
575 spin_unlock(&ncm->lock);
576}
577
578static void ncm_ep0out_complete(struct usb_ep *ep, struct usb_request *req)
579{
580 /* now for SET_NTB_INPUT_SIZE only */
581 unsigned in_size;
582 struct usb_function *f = req->context;
583 struct f_ncm *ncm = func_to_ncm(f);
584 struct usb_composite_dev *cdev = ep->driver_data;
585
586 req->context = NULL;
587 if (req->status || req->actual != req->length) {
588 DBG(cdev, "Bad control-OUT transfer\n");
589 goto invalid;
590 }
591
592 in_size = get_unaligned_le32(req->buf);
593 if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
594 in_size > le32_to_cpu(ntb_parameters.dwNtbInMaxSize)) {
595 DBG(cdev, "Got wrong INPUT SIZE (%d) from host\n", in_size);
596 goto invalid;
597 }
598
599 ncm->port.fixed_in_len = in_size;
600 VDBG(cdev, "Set NTB INPUT SIZE %d\n", in_size);
601 return;
602
603invalid:
604 usb_ep_set_halt(ep);
605 return;
606}
607
608static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
609{
610 struct f_ncm *ncm = func_to_ncm(f);
611 struct usb_composite_dev *cdev = f->config->cdev;
612 struct usb_request *req = cdev->req;
613 int value = -EOPNOTSUPP;
614 u16 w_index = le16_to_cpu(ctrl->wIndex);
615 u16 w_value = le16_to_cpu(ctrl->wValue);
616 u16 w_length = le16_to_cpu(ctrl->wLength);
617
618 /*
619 * composite driver infrastructure handles everything except
620 * CDC class messages; interface activation uses set_alt().
621 */
622 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
623 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
624 | USB_CDC_SET_ETHERNET_PACKET_FILTER:
625 /*
626 * see 6.2.30: no data, wIndex = interface,
627 * wValue = packet filter bitmap
628 */
629 if (w_length != 0 || w_index != ncm->ctrl_id)
630 goto invalid;
631 DBG(cdev, "packet filter %02x\n", w_value);
632 /*
633 * REVISIT locking of cdc_filter. This assumes the UDC
634 * driver won't have a concurrent packet TX irq running on
635 * another CPU; or that if it does, this write is atomic...
636 */
637 ncm->port.cdc_filter = w_value;
638 value = 0;
639 break;
640 /*
641 * and optionally:
642 * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
643 * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
644 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
645 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
646 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
647 * case USB_CDC_GET_ETHERNET_STATISTIC:
648 */
649
650 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
651 | USB_CDC_GET_NTB_PARAMETERS:
652
653 if (w_length == 0 || w_value != 0 || w_index != ncm->ctrl_id)
654 goto invalid;
655 value = w_length > sizeof ntb_parameters ?
656 sizeof ntb_parameters : w_length;
657 memcpy(req->buf, &ntb_parameters, value);
658 VDBG(cdev, "Host asked NTB parameters\n");
659 break;
660
661 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
662 | USB_CDC_GET_NTB_INPUT_SIZE:
663
664 if (w_length < 4 || w_value != 0 || w_index != ncm->ctrl_id)
665 goto invalid;
666 put_unaligned_le32(ncm->port.fixed_in_len, req->buf);
667 value = 4;
668 VDBG(cdev, "Host asked INPUT SIZE, sending %d\n",
669 ncm->port.fixed_in_len);
670 break;
671
672 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
673 | USB_CDC_SET_NTB_INPUT_SIZE:
674 {
675 if (w_length != 4 || w_value != 0 || w_index != ncm->ctrl_id)
676 goto invalid;
677 req->complete = ncm_ep0out_complete;
678 req->length = w_length;
679 req->context = f;
680
681 value = req->length;
682 break;
683 }
684
685 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
686 | USB_CDC_GET_NTB_FORMAT:
687 {
688 uint16_t format;
689
690 if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id)
691 goto invalid;
692 format = (ncm->parser_opts == &ndp16_opts) ? 0x0000 : 0x0001;
693 put_unaligned_le16(format, req->buf);
694 value = 2;
695 VDBG(cdev, "Host asked NTB FORMAT, sending %d\n", format);
696 break;
697 }
698
699 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
700 | USB_CDC_SET_NTB_FORMAT:
701 {
702 if (w_length != 0 || w_index != ncm->ctrl_id)
703 goto invalid;
704 switch (w_value) {
705 case 0x0000:
706 ncm->parser_opts = &ndp16_opts;
707 DBG(cdev, "NCM16 selected\n");
708 break;
709 case 0x0001:
710 ncm->parser_opts = &ndp32_opts;
711 DBG(cdev, "NCM32 selected\n");
712 break;
713 default:
714 goto invalid;
715 }
716 value = 0;
717 break;
718 }
719 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
720 | USB_CDC_GET_CRC_MODE:
721 {
722 uint16_t is_crc;
723
724 if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id)
725 goto invalid;
726 is_crc = ncm->is_crc ? 0x0001 : 0x0000;
727 put_unaligned_le16(is_crc, req->buf);
728 value = 2;
729 VDBG(cdev, "Host asked CRC MODE, sending %d\n", is_crc);
730 break;
731 }
732
733 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
734 | USB_CDC_SET_CRC_MODE:
735 {
736 int ndp_hdr_crc = 0;
737
738 if (w_length != 0 || w_index != ncm->ctrl_id)
739 goto invalid;
740 switch (w_value) {
741 case 0x0000:
742 ncm->is_crc = false;
743 ndp_hdr_crc = NCM_NDP_HDR_NOCRC;
744 DBG(cdev, "non-CRC mode selected\n");
745 break;
746 case 0x0001:
747 ncm->is_crc = true;
748 ndp_hdr_crc = NCM_NDP_HDR_CRC;
749 DBG(cdev, "CRC mode selected\n");
750 break;
751 default:
752 goto invalid;
753 }
754 ncm->parser_opts->ndp_sign &= ~NCM_NDP_HDR_CRC_MASK;
755 ncm->parser_opts->ndp_sign |= ndp_hdr_crc;
756 value = 0;
757 break;
758 }
759
760 /* and disabled in ncm descriptor: */
761 /* case USB_CDC_GET_NET_ADDRESS: */
762 /* case USB_CDC_SET_NET_ADDRESS: */
763 /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */
764 /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */
765
766 default:
767invalid:
768 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
769 ctrl->bRequestType, ctrl->bRequest,
770 w_value, w_index, w_length);
771 }
772
773 /* respond with data transfer or status phase? */
774 if (value >= 0) {
775 DBG(cdev, "ncm req%02x.%02x v%04x i%04x l%d\n",
776 ctrl->bRequestType, ctrl->bRequest,
777 w_value, w_index, w_length);
778 req->zero = 0;
779 req->length = value;
780 value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
781 if (value < 0)
782 ERROR(cdev, "ncm req %02x.%02x response err %d\n",
783 ctrl->bRequestType, ctrl->bRequest,
784 value);
785 }
786
787 /* device either stalls (value < 0) or reports success */
788 return value;
789}
790
791
792static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
793{
794 struct f_ncm *ncm = func_to_ncm(f);
795 struct usb_composite_dev *cdev = f->config->cdev;
796
797 /* Control interface has only altsetting 0 */
798 if (intf == ncm->ctrl_id) {
799 if (alt != 0)
800 goto fail;
801
802 if (ncm->notify->driver_data) {
803 DBG(cdev, "reset ncm control %d\n", intf);
804 usb_ep_disable(ncm->notify);
805 } else {
806 DBG(cdev, "init ncm ctrl %d\n", intf);
807 ncm->notify_desc = ep_choose(cdev->gadget,
808 ncm->hs.notify,
809 ncm->fs.notify);
810 }
811 usb_ep_enable(ncm->notify, ncm->notify_desc);
812 ncm->notify->driver_data = ncm;
813
814 /* Data interface has two altsettings, 0 and 1 */
815 } else if (intf == ncm->data_id) {
816 if (alt > 1)
817 goto fail;
818
819 if (ncm->port.in_ep->driver_data) {
820 DBG(cdev, "reset ncm\n");
821 gether_disconnect(&ncm->port);
822 ncm_reset_values(ncm);
823 }
824
825 /*
826 * CDC Network only sends data in non-default altsettings.
827 * Changing altsettings resets filters, statistics, etc.
828 */
829 if (alt == 1) {
830 struct net_device *net;
831
832 if (!ncm->port.in) {
833 DBG(cdev, "init ncm\n");
834 ncm->port.in = ep_choose(cdev->gadget,
835 ncm->hs.in,
836 ncm->fs.in);
837 ncm->port.out = ep_choose(cdev->gadget,
838 ncm->hs.out,
839 ncm->fs.out);
840 }
841
842 /* TODO */
843 /* Enable zlps by default for NCM conformance;
844 * override for musb_hdrc (avoids txdma ovhead)
845 */
846 ncm->port.is_zlp_ok = !(
847 gadget_is_musbhdrc(cdev->gadget)
848 );
849 ncm->port.cdc_filter = DEFAULT_FILTER;
850 DBG(cdev, "activate ncm\n");
851 net = gether_connect(&ncm->port);
852 if (IS_ERR(net))
853 return PTR_ERR(net);
854 }
855
856 spin_lock(&ncm->lock);
857 ncm_notify(ncm);
858 spin_unlock(&ncm->lock);
859 } else
860 goto fail;
861
862 return 0;
863fail:
864 return -EINVAL;
865}
866
867/*
868 * Because the data interface supports multiple altsettings,
869 * this NCM function *MUST* implement a get_alt() method.
870 */
871static int ncm_get_alt(struct usb_function *f, unsigned intf)
872{
873 struct f_ncm *ncm = func_to_ncm(f);
874
875 if (intf == ncm->ctrl_id)
876 return 0;
877 return ncm->port.in_ep->driver_data ? 1 : 0;
878}
879
880static struct sk_buff *ncm_wrap_ntb(struct gether *port,
881 struct sk_buff *skb)
882{
883 struct f_ncm *ncm = func_to_ncm(&port->func);
884 struct sk_buff *skb2;
885 int ncb_len = 0;
886 __le16 *tmp;
887 int div = ntb_parameters.wNdpInDivisor;
888 int rem = ntb_parameters.wNdpInPayloadRemainder;
889 int pad;
890 int ndp_align = ntb_parameters.wNdpInAlignment;
891 int ndp_pad;
892 unsigned max_size = ncm->port.fixed_in_len;
893 struct ndp_parser_opts *opts = ncm->parser_opts;
894 unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
895
896 ncb_len += opts->nth_size;
897 ndp_pad = ALIGN(ncb_len, ndp_align) - ncb_len;
898 ncb_len += ndp_pad;
899 ncb_len += opts->ndp_size;
900 ncb_len += 2 * 2 * opts->dgram_item_len; /* Datagram entry */
901 ncb_len += 2 * 2 * opts->dgram_item_len; /* Zero datagram entry */
902 pad = ALIGN(ncb_len, div) + rem - ncb_len;
903 ncb_len += pad;
904
905 if (ncb_len + skb->len + crc_len > max_size) {
906 dev_kfree_skb_any(skb);
907 return NULL;
908 }
909
910 skb2 = skb_copy_expand(skb, ncb_len,
911 max_size - skb->len - ncb_len - crc_len,
912 GFP_ATOMIC);
913 dev_kfree_skb_any(skb);
914 if (!skb2)
915 return NULL;
916
917 skb = skb2;
918
919 tmp = (void *) skb_push(skb, ncb_len);
920 memset(tmp, 0, ncb_len);
921
922 put_unaligned_le32(opts->nth_sign, tmp); /* dwSignature */
923 tmp += 2;
924 /* wHeaderLength */
925 put_unaligned_le16(opts->nth_size, tmp++);
926 tmp++; /* skip wSequence */
927 put_ncm(&tmp, opts->block_length, skb->len); /* (d)wBlockLength */
928 /* (d)wFpIndex */
929 /* the first pointer is right after the NTH + align */
930 put_ncm(&tmp, opts->fp_index, opts->nth_size + ndp_pad);
931
932 tmp = (void *)tmp + ndp_pad;
933
934 /* NDP */
935 put_unaligned_le32(opts->ndp_sign, tmp); /* dwSignature */
936 tmp += 2;
937 /* wLength */
938 put_unaligned_le16(ncb_len - opts->nth_size - pad, tmp++);
939
940 tmp += opts->reserved1;
941 tmp += opts->next_fp_index; /* skip reserved (d)wNextFpIndex */
942 tmp += opts->reserved2;
943
944 if (ncm->is_crc) {
945 uint32_t crc;
946
947 crc = ~crc32_le(~0,
948 skb->data + ncb_len,
949 skb->len - ncb_len);
950 put_unaligned_le32(crc, skb->data + skb->len);
951 skb_put(skb, crc_len);
952 }
953
954 /* (d)wDatagramIndex[0] */
955 put_ncm(&tmp, opts->dgram_item_len, ncb_len);
956 /* (d)wDatagramLength[0] */
957 put_ncm(&tmp, opts->dgram_item_len, skb->len - ncb_len);
958 /* (d)wDatagramIndex[1] and (d)wDatagramLength[1] already zeroed */
959
960 if (skb->len > MAX_TX_NONFIXED)
961 memset(skb_put(skb, max_size - skb->len),
962 0, max_size - skb->len);
963
964 return skb;
965}
966
967static int ncm_unwrap_ntb(struct gether *port,
968 struct sk_buff *skb,
969 struct sk_buff_head *list)
970{
971 struct f_ncm *ncm = func_to_ncm(&port->func);
972 __le16 *tmp = (void *) skb->data;
973 unsigned index, index2;
974 unsigned dg_len, dg_len2;
975 unsigned ndp_len;
976 struct sk_buff *skb2;
977 int ret = -EINVAL;
978 unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
979 struct ndp_parser_opts *opts = ncm->parser_opts;
980 unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
981 int dgram_counter;
982
983 /* dwSignature */
984 if (get_unaligned_le32(tmp) != opts->nth_sign) {
985 INFO(port->func.config->cdev, "Wrong NTH SIGN, skblen %d\n",
986 skb->len);
987 print_hex_dump(KERN_INFO, "HEAD:", DUMP_PREFIX_ADDRESS, 32, 1,
988 skb->data, 32, false);
989
990 goto err;
991 }
992 tmp += 2;
993 /* wHeaderLength */
994 if (get_unaligned_le16(tmp++) != opts->nth_size) {
995 INFO(port->func.config->cdev, "Wrong NTB headersize\n");
996 goto err;
997 }
998 tmp++; /* skip wSequence */
999
1000 /* (d)wBlockLength */
1001 if (get_ncm(&tmp, opts->block_length) > max_size) {
1002 INFO(port->func.config->cdev, "OUT size exceeded\n");
1003 goto err;
1004 }
1005
1006 index = get_ncm(&tmp, opts->fp_index);
1007 /* NCM 3.2 */
1008 if (((index % 4) != 0) && (index < opts->nth_size)) {
1009 INFO(port->func.config->cdev, "Bad index: %x\n",
1010 index);
1011 goto err;
1012 }
1013
1014 /* walk through NDP */
1015 tmp = ((void *)skb->data) + index;
1016 if (get_unaligned_le32(tmp) != opts->ndp_sign) {
1017 INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
1018 goto err;
1019 }
1020 tmp += 2;
1021
1022 ndp_len = get_unaligned_le16(tmp++);
1023 /*
1024 * NCM 3.3.1
1025 * entry is 2 items
1026 * item size is 16/32 bits, opts->dgram_item_len * 2 bytes
1027 * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
1028 */
1029 if ((ndp_len < opts->ndp_size + 2 * 2 * (opts->dgram_item_len * 2))
1030 || (ndp_len % opts->ndplen_align != 0)) {
1031 INFO(port->func.config->cdev, "Bad NDP length: %x\n", ndp_len);
1032 goto err;
1033 }
1034 tmp += opts->reserved1;
1035 tmp += opts->next_fp_index; /* skip reserved (d)wNextFpIndex */
1036 tmp += opts->reserved2;
1037
1038 ndp_len -= opts->ndp_size;
1039 index2 = get_ncm(&tmp, opts->dgram_item_len);
1040 dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
1041 dgram_counter = 0;
1042
1043 do {
1044 index = index2;
1045 dg_len = dg_len2;
1046 if (dg_len < 14 + crc_len) { /* ethernet header + crc */
1047 INFO(port->func.config->cdev, "Bad dgram length: %x\n",
1048 dg_len);
1049 goto err;
1050 }
1051 if (ncm->is_crc) {
1052 uint32_t crc, crc2;
1053
1054 crc = get_unaligned_le32(skb->data +
1055 index + dg_len - crc_len);
1056 crc2 = ~crc32_le(~0,
1057 skb->data + index,
1058 dg_len - crc_len);
1059 if (crc != crc2) {
1060 INFO(port->func.config->cdev, "Bad CRC\n");
1061 goto err;
1062 }
1063 }
1064
1065 index2 = get_ncm(&tmp, opts->dgram_item_len);
1066 dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
1067
1068 if (index2 == 0 || dg_len2 == 0) {
1069 skb2 = skb;
1070 } else {
1071 skb2 = skb_clone(skb, GFP_ATOMIC);
1072 if (skb2 == NULL)
1073 goto err;
1074 }
1075
1076 if (!skb_pull(skb2, index)) {
1077 ret = -EOVERFLOW;
1078 goto err;
1079 }
1080
1081 skb_trim(skb2, dg_len - crc_len);
1082 skb_queue_tail(list, skb2);
1083
1084 ndp_len -= 2 * (opts->dgram_item_len * 2);
1085
1086 dgram_counter++;
1087
1088 if (index2 == 0 || dg_len2 == 0)
1089 break;
1090 } while (ndp_len > 2 * (opts->dgram_item_len * 2)); /* zero entry */
1091
1092 VDBG(port->func.config->cdev,
1093 "Parsed NTB with %d frames\n", dgram_counter);
1094 return 0;
1095err:
1096 skb_queue_purge(list);
1097 dev_kfree_skb_any(skb);
1098 return ret;
1099}
1100
1101static void ncm_disable(struct usb_function *f)
1102{
1103 struct f_ncm *ncm = func_to_ncm(f);
1104 struct usb_composite_dev *cdev = f->config->cdev;
1105
1106 DBG(cdev, "ncm deactivated\n");
1107
1108 if (ncm->port.in_ep->driver_data)
1109 gether_disconnect(&ncm->port);
1110
1111 if (ncm->notify->driver_data) {
1112 usb_ep_disable(ncm->notify);
1113 ncm->notify->driver_data = NULL;
1114 ncm->notify_desc = NULL;
1115 }
1116}
1117
1118/*-------------------------------------------------------------------------*/
1119
1120/*
1121 * Callbacks let us notify the host about connect/disconnect when the
1122 * net device is opened or closed.
1123 *
1124 * For testing, note that link states on this side include both opened
1125 * and closed variants of:
1126 *
1127 * - disconnected/unconfigured
1128 * - configured but inactive (data alt 0)
1129 * - configured and active (data alt 1)
1130 *
1131 * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
1132 * SET_INTERFACE (altsetting). Remember also that "configured" doesn't
1133 * imply the host is actually polling the notification endpoint, and
1134 * likewise that "active" doesn't imply it's actually using the data
1135 * endpoints for traffic.
1136 */
1137
1138static void ncm_open(struct gether *geth)
1139{
1140 struct f_ncm *ncm = func_to_ncm(&geth->func);
1141
1142 DBG(ncm->port.func.config->cdev, "%s\n", __func__);
1143
1144 spin_lock(&ncm->lock);
1145 ncm->is_open = true;
1146 ncm_notify(ncm);
1147 spin_unlock(&ncm->lock);
1148}
1149
1150static void ncm_close(struct gether *geth)
1151{
1152 struct f_ncm *ncm = func_to_ncm(&geth->func);
1153
1154 DBG(ncm->port.func.config->cdev, "%s\n", __func__);
1155
1156 spin_lock(&ncm->lock);
1157 ncm->is_open = false;
1158 ncm_notify(ncm);
1159 spin_unlock(&ncm->lock);
1160}
1161
1162/*-------------------------------------------------------------------------*/
1163
1164/* ethernet function driver setup/binding */
1165
1166static int __init
1167ncm_bind(struct usb_configuration *c, struct usb_function *f)
1168{
1169 struct usb_composite_dev *cdev = c->cdev;
1170 struct f_ncm *ncm = func_to_ncm(f);
1171 int status;
1172 struct usb_ep *ep;
1173
1174 /* allocate instance-specific interface IDs */
1175 status = usb_interface_id(c, f);
1176 if (status < 0)
1177 goto fail;
1178 ncm->ctrl_id = status;
1179 ncm_iad_desc.bFirstInterface = status;
1180
1181 ncm_control_intf.bInterfaceNumber = status;
1182 ncm_union_desc.bMasterInterface0 = status;
1183
1184 status = usb_interface_id(c, f);
1185 if (status < 0)
1186 goto fail;
1187 ncm->data_id = status;
1188
1189 ncm_data_nop_intf.bInterfaceNumber = status;
1190 ncm_data_intf.bInterfaceNumber = status;
1191 ncm_union_desc.bSlaveInterface0 = status;
1192
1193 status = -ENODEV;
1194
1195 /* allocate instance-specific endpoints */
1196 ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
1197 if (!ep)
1198 goto fail;
1199 ncm->port.in_ep = ep;
1200 ep->driver_data = cdev; /* claim */
1201
1202 ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
1203 if (!ep)
1204 goto fail;
1205 ncm->port.out_ep = ep;
1206 ep->driver_data = cdev; /* claim */
1207
1208 ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
1209 if (!ep)
1210 goto fail;
1211 ncm->notify = ep;
1212 ep->driver_data = cdev; /* claim */
1213
1214 status = -ENOMEM;
1215
1216 /* allocate notification request and buffer */
1217 ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
1218 if (!ncm->notify_req)
1219 goto fail;
1220 ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
1221 if (!ncm->notify_req->buf)
1222 goto fail;
1223 ncm->notify_req->context = ncm;
1224 ncm->notify_req->complete = ncm_notify_complete;
1225
1226 /* copy descriptors, and track endpoint copies */
1227 f->descriptors = usb_copy_descriptors(ncm_fs_function);
1228 if (!f->descriptors)
1229 goto fail;
1230
1231 ncm->fs.in = usb_find_endpoint(ncm_fs_function,
1232 f->descriptors, &fs_ncm_in_desc);
1233 ncm->fs.out = usb_find_endpoint(ncm_fs_function,
1234 f->descriptors, &fs_ncm_out_desc);
1235 ncm->fs.notify = usb_find_endpoint(ncm_fs_function,
1236 f->descriptors, &fs_ncm_notify_desc);
1237
1238 /*
1239 * support all relevant hardware speeds... we expect that when
1240 * hardware is dual speed, all bulk-capable endpoints work at
1241 * both speeds
1242 */
1243 if (gadget_is_dualspeed(c->cdev->gadget)) {
1244 hs_ncm_in_desc.bEndpointAddress =
1245 fs_ncm_in_desc.bEndpointAddress;
1246 hs_ncm_out_desc.bEndpointAddress =
1247 fs_ncm_out_desc.bEndpointAddress;
1248 hs_ncm_notify_desc.bEndpointAddress =
1249 fs_ncm_notify_desc.bEndpointAddress;
1250
1251 /* copy descriptors, and track endpoint copies */
1252 f->hs_descriptors = usb_copy_descriptors(ncm_hs_function);
1253 if (!f->hs_descriptors)
1254 goto fail;
1255
1256 ncm->hs.in = usb_find_endpoint(ncm_hs_function,
1257 f->hs_descriptors, &hs_ncm_in_desc);
1258 ncm->hs.out = usb_find_endpoint(ncm_hs_function,
1259 f->hs_descriptors, &hs_ncm_out_desc);
1260 ncm->hs.notify = usb_find_endpoint(ncm_hs_function,
1261 f->hs_descriptors, &hs_ncm_notify_desc);
1262 }
1263
1264 /*
1265 * NOTE: all that is done without knowing or caring about
1266 * the network link ... which is unavailable to this code
1267 * until we're activated via set_alt().
1268 */
1269
1270 ncm->port.open = ncm_open;
1271 ncm->port.close = ncm_close;
1272
1273 DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
1274 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
1275 ncm->port.in_ep->name, ncm->port.out_ep->name,
1276 ncm->notify->name);
1277 return 0;
1278
1279fail:
1280 if (f->descriptors)
1281 usb_free_descriptors(f->descriptors);
1282
1283 if (ncm->notify_req) {
1284 kfree(ncm->notify_req->buf);
1285 usb_ep_free_request(ncm->notify, ncm->notify_req);
1286 }
1287
1288 /* we might as well release our claims on endpoints */
1289 if (ncm->notify)
1290 ncm->notify->driver_data = NULL;
1291 if (ncm->port.out)
1292 ncm->port.out_ep->driver_data = NULL;
1293 if (ncm->port.in)
1294 ncm->port.in_ep->driver_data = NULL;
1295
1296 ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
1297
1298 return status;
1299}
1300
1301static void
1302ncm_unbind(struct usb_configuration *c, struct usb_function *f)
1303{
1304 struct f_ncm *ncm = func_to_ncm(f);
1305
1306 DBG(c->cdev, "ncm unbind\n");
1307
1308 if (gadget_is_dualspeed(c->cdev->gadget))
1309 usb_free_descriptors(f->hs_descriptors);
1310 usb_free_descriptors(f->descriptors);
1311
1312 kfree(ncm->notify_req->buf);
1313 usb_ep_free_request(ncm->notify, ncm->notify_req);
1314
1315 ncm_string_defs[1].s = NULL;
1316 kfree(ncm);
1317}
1318
1319/**
1320 * ncm_bind_config - add CDC Network link to a configuration
1321 * @c: the configuration to support the network link
1322 * @ethaddr: a buffer in which the ethernet address of the host side
1323 * side of the link was recorded
1324 * Context: single threaded during gadget setup
1325 *
1326 * Returns zero on success, else negative errno.
1327 *
1328 * Caller must have called @gether_setup(). Caller is also responsible
1329 * for calling @gether_cleanup() before module unload.
1330 */
1331int __init ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
1332{
1333 struct f_ncm *ncm;
1334 int status;
1335
1336 if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
1337 return -EINVAL;
1338
1339 /* maybe allocate device-global string IDs */
1340 if (ncm_string_defs[0].id == 0) {
1341
1342 /* control interface label */
1343 status = usb_string_id(c->cdev);
1344 if (status < 0)
1345 return status;
1346 ncm_string_defs[STRING_CTRL_IDX].id = status;
1347 ncm_control_intf.iInterface = status;
1348
1349 /* data interface label */
1350 status = usb_string_id(c->cdev);
1351 if (status < 0)
1352 return status;
1353 ncm_string_defs[STRING_DATA_IDX].id = status;
1354 ncm_data_nop_intf.iInterface = status;
1355 ncm_data_intf.iInterface = status;
1356
1357 /* MAC address */
1358 status = usb_string_id(c->cdev);
1359 if (status < 0)
1360 return status;
1361 ncm_string_defs[STRING_MAC_IDX].id = status;
1362 ecm_desc.iMACAddress = status;
1363
1364 /* IAD */
1365 status = usb_string_id(c->cdev);
1366 if (status < 0)
1367 return status;
1368 ncm_string_defs[STRING_IAD_IDX].id = status;
1369 ncm_iad_desc.iFunction = status;
1370 }
1371
1372 /* allocate and initialize one new instance */
1373 ncm = kzalloc(sizeof *ncm, GFP_KERNEL);
1374 if (!ncm)
1375 return -ENOMEM;
1376
1377 /* export host's Ethernet address in CDC format */
1378 snprintf(ncm->ethaddr, sizeof ncm->ethaddr,
1379 "%02X%02X%02X%02X%02X%02X",
1380 ethaddr[0], ethaddr[1], ethaddr[2],
1381 ethaddr[3], ethaddr[4], ethaddr[5]);
1382 ncm_string_defs[1].s = ncm->ethaddr;
1383
1384 spin_lock_init(&ncm->lock);
1385 ncm_reset_values(ncm);
1386 ncm->port.is_fixed = true;
1387
1388 ncm->port.func.name = "cdc_network";
1389 ncm->port.func.strings = ncm_strings;
1390 /* descriptors are per-instance copies */
1391 ncm->port.func.bind = ncm_bind;
1392 ncm->port.func.unbind = ncm_unbind;
1393 ncm->port.func.set_alt = ncm_set_alt;
1394 ncm->port.func.get_alt = ncm_get_alt;
1395 ncm->port.func.setup = ncm_setup;
1396 ncm->port.func.disable = ncm_disable;
1397
1398 ncm->port.wrap = ncm_wrap_ntb;
1399 ncm->port.unwrap = ncm_unwrap_ntb;
1400
1401 status = usb_add_function(c, &ncm->port.func);
1402 if (status) {
1403 ncm_string_defs[1].s = NULL;
1404 kfree(ncm);
1405 }
1406 return status;
1407}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index d4fdf65fb925..a6eacb59571b 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3392,25 +3392,28 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3392 dev_set_name(&curlun->dev,"%s-lun%d", 3392 dev_set_name(&curlun->dev,"%s-lun%d",
3393 dev_name(&gadget->dev), i); 3393 dev_name(&gadget->dev), i);
3394 3394
3395 if ((rc = device_register(&curlun->dev)) != 0) { 3395 kref_get(&fsg->ref);
3396 rc = device_register(&curlun->dev);
3397 if (rc) {
3396 INFO(fsg, "failed to register LUN%d: %d\n", i, rc); 3398 INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
3397 goto out; 3399 put_device(&curlun->dev);
3398 }
3399 if ((rc = device_create_file(&curlun->dev,
3400 &dev_attr_ro)) != 0 ||
3401 (rc = device_create_file(&curlun->dev,
3402 &dev_attr_nofua)) != 0 ||
3403 (rc = device_create_file(&curlun->dev,
3404 &dev_attr_file)) != 0) {
3405 device_unregister(&curlun->dev);
3406 goto out; 3400 goto out;
3407 } 3401 }
3408 curlun->registered = 1; 3402 curlun->registered = 1;
3409 kref_get(&fsg->ref); 3403
3404 rc = device_create_file(&curlun->dev, &dev_attr_ro);
3405 if (rc)
3406 goto out;
3407 rc = device_create_file(&curlun->dev, &dev_attr_nofua);
3408 if (rc)
3409 goto out;
3410 rc = device_create_file(&curlun->dev, &dev_attr_file);
3411 if (rc)
3412 goto out;
3410 3413
3411 if (mod_data.file[i] && *mod_data.file[i]) { 3414 if (mod_data.file[i] && *mod_data.file[i]) {
3412 if ((rc = fsg_lun_open(curlun, 3415 rc = fsg_lun_open(curlun, mod_data.file[i]);
3413 mod_data.file[i])) != 0) 3416 if (rc)
3414 goto out; 3417 goto out;
3415 } else if (!mod_data.removable) { 3418 } else if (!mod_data.removable) {
3416 ERROR(fsg, "no file given for LUN%d\n", i); 3419 ERROR(fsg, "no file given for LUN%d\n", i);
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index af75e3620849..ebf6970a10bf 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -1,7 +1,29 @@
1/*
2 * g_ffs.c -- user mode file system API for USB composite function controllers
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#define pr_fmt(fmt) "g_ffs: " fmt
23
1#include <linux/module.h> 24#include <linux/module.h>
2#include <linux/utsname.h> 25#include <linux/utsname.h>
3 26
4
5/* 27/*
6 * kbuild is not very cooperative with respect to linking separately 28 * kbuild is not very cooperative with respect to linking separately
7 * compiled library objects into one module. So for now we won't use 29 * compiled library objects into one module. So for now we won't use
@@ -43,7 +65,6 @@ static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
43 65
44#include "f_fs.c" 66#include "f_fs.c"
45 67
46
47#define DRIVER_NAME "g_ffs" 68#define DRIVER_NAME "g_ffs"
48#define DRIVER_DESC "USB Function Filesystem" 69#define DRIVER_DESC "USB Function Filesystem"
49#define DRIVER_VERSION "24 Aug 2004" 70#define DRIVER_VERSION "24 Aug 2004"
@@ -73,8 +94,6 @@ MODULE_PARM_DESC(bDeviceSubClass, "USB Device subclass");
73module_param_named(bDeviceProtocol, gfs_dev_desc.bDeviceProtocol, byte, 0644); 94module_param_named(bDeviceProtocol, gfs_dev_desc.bDeviceProtocol, byte, 0644);
74MODULE_PARM_DESC(bDeviceProtocol, "USB Device protocol"); 95MODULE_PARM_DESC(bDeviceProtocol, "USB Device protocol");
75 96
76
77
78static const struct usb_descriptor_header *gfs_otg_desc[] = { 97static const struct usb_descriptor_header *gfs_otg_desc[] = {
79 (const struct usb_descriptor_header *) 98 (const struct usb_descriptor_header *)
80 &(const struct usb_otg_descriptor) { 99 &(const struct usb_otg_descriptor) {
@@ -91,8 +110,7 @@ static const struct usb_descriptor_header *gfs_otg_desc[] = {
91 NULL 110 NULL
92}; 111};
93 112
94/* string IDs are assigned dynamically */ 113/* String IDs are assigned dynamically */
95
96static struct usb_string gfs_strings[] = { 114static struct usb_string gfs_strings[] = {
97#ifdef CONFIG_USB_FUNCTIONFS_RNDIS 115#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
98 { .s = "FunctionFS + RNDIS" }, 116 { .s = "FunctionFS + RNDIS" },
@@ -114,8 +132,6 @@ static struct usb_gadget_strings *gfs_dev_strings[] = {
114 NULL, 132 NULL,
115}; 133};
116 134
117
118
119struct gfs_configuration { 135struct gfs_configuration {
120 struct usb_configuration c; 136 struct usb_configuration c;
121 int (*eth)(struct usb_configuration *c, u8 *ethaddr); 137 int (*eth)(struct usb_configuration *c, u8 *ethaddr);
@@ -138,7 +154,6 @@ struct gfs_configuration {
138#endif 154#endif
139}; 155};
140 156
141
142static int gfs_bind(struct usb_composite_dev *cdev); 157static int gfs_bind(struct usb_composite_dev *cdev);
143static int gfs_unbind(struct usb_composite_dev *cdev); 158static int gfs_unbind(struct usb_composite_dev *cdev);
144static int gfs_do_config(struct usb_configuration *c); 159static int gfs_do_config(struct usb_configuration *c);
@@ -151,11 +166,9 @@ static struct usb_composite_driver gfs_driver = {
151 .iProduct = DRIVER_DESC, 166 .iProduct = DRIVER_DESC,
152}; 167};
153 168
154
155static struct ffs_data *gfs_ffs_data; 169static struct ffs_data *gfs_ffs_data;
156static unsigned long gfs_registered; 170static unsigned long gfs_registered;
157 171
158
159static int gfs_init(void) 172static int gfs_init(void)
160{ 173{
161 ENTER(); 174 ENTER();
@@ -175,7 +188,6 @@ static void gfs_exit(void)
175} 188}
176module_exit(gfs_exit); 189module_exit(gfs_exit);
177 190
178
179static int functionfs_ready_callback(struct ffs_data *ffs) 191static int functionfs_ready_callback(struct ffs_data *ffs)
180{ 192{
181 int ret; 193 int ret;
@@ -200,14 +212,11 @@ static void functionfs_closed_callback(struct ffs_data *ffs)
200 usb_composite_unregister(&gfs_driver); 212 usb_composite_unregister(&gfs_driver);
201} 213}
202 214
203
204static int functionfs_check_dev_callback(const char *dev_name) 215static int functionfs_check_dev_callback(const char *dev_name)
205{ 216{
206 return 0; 217 return 0;
207} 218}
208 219
209
210
211static int gfs_bind(struct usb_composite_dev *cdev) 220static int gfs_bind(struct usb_composite_dev *cdev)
212{ 221{
213 int ret, i; 222 int ret, i;
@@ -274,7 +283,6 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
274 return 0; 283 return 0;
275} 284}
276 285
277
278static int gfs_do_config(struct usb_configuration *c) 286static int gfs_do_config(struct usb_configuration *c)
279{ 287{
280 struct gfs_configuration *gc = 288 struct gfs_configuration *gc =
@@ -315,7 +323,6 @@ static int gfs_do_config(struct usb_configuration *c)
315 return 0; 323 return 0;
316} 324}
317 325
318
319#ifdef CONFIG_USB_FUNCTIONFS_ETH 326#ifdef CONFIG_USB_FUNCTIONFS_ETH
320 327
321static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) 328static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index e511fec9f26d..5c2720d64ffa 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -96,7 +96,7 @@
96 96
97/* Mentor high speed "dual role" controller, in peripheral role */ 97/* Mentor high speed "dual role" controller, in peripheral role */
98#ifdef CONFIG_USB_GADGET_MUSB_HDRC 98#ifdef CONFIG_USB_GADGET_MUSB_HDRC
99#define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name) 99#define gadget_is_musbhdrc(g) !strcmp("musb-hdrc", (g)->name)
100#else 100#else
101#define gadget_is_musbhdrc(g) 0 101#define gadget_is_musbhdrc(g) 0
102#endif 102#endif
@@ -120,10 +120,10 @@
120#define gadget_is_fsl_qe(g) 0 120#define gadget_is_fsl_qe(g) 0
121#endif 121#endif
122 122
123#ifdef CONFIG_USB_GADGET_CI13XXX 123#ifdef CONFIG_USB_GADGET_CI13XXX_PCI
124#define gadget_is_ci13xxx(g) (!strcmp("ci13xxx_udc", (g)->name)) 124#define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name))
125#else 125#else
126#define gadget_is_ci13xxx(g) 0 126#define gadget_is_ci13xxx_pci(g) 0
127#endif 127#endif
128 128
129// CONFIG_USB_GADGET_SX2 129// CONFIG_USB_GADGET_SX2
@@ -142,6 +142,17 @@
142#define gadget_is_s3c_hsotg(g) 0 142#define gadget_is_s3c_hsotg(g) 0
143#endif 143#endif
144 144
145#ifdef CONFIG_USB_GADGET_EG20T
146#define gadget_is_pch(g) (!strcmp("pch_udc", (g)->name))
147#else
148#define gadget_is_pch(g) 0
149#endif
150
151#ifdef CONFIG_USB_GADGET_CI13XXX_MSM
152#define gadget_is_ci13xxx_msm(g) (!strcmp("ci13xxx_msm", (g)->name))
153#else
154#define gadget_is_ci13xxx_msm(g) 0
155#endif
145 156
146/** 157/**
147 * usb_gadget_controller_number - support bcdDevice id convention 158 * usb_gadget_controller_number - support bcdDevice id convention
@@ -192,7 +203,7 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
192 return 0x21; 203 return 0x21;
193 else if (gadget_is_fsl_qe(gadget)) 204 else if (gadget_is_fsl_qe(gadget))
194 return 0x22; 205 return 0x22;
195 else if (gadget_is_ci13xxx(gadget)) 206 else if (gadget_is_ci13xxx_pci(gadget))
196 return 0x23; 207 return 0x23;
197 else if (gadget_is_langwell(gadget)) 208 else if (gadget_is_langwell(gadget))
198 return 0x24; 209 return 0x24;
@@ -200,6 +211,10 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
200 return 0x25; 211 return 0x25;
201 else if (gadget_is_s3c_hsotg(gadget)) 212 else if (gadget_is_s3c_hsotg(gadget))
202 return 0x26; 213 return 0x26;
214 else if (gadget_is_pch(gadget))
215 return 0x27;
216 else if (gadget_is_ci13xxx_msm(gadget))
217 return 0x28;
203 return -ENOENT; 218 return -ENOENT;
204} 219}
205 220
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index ed0266462c57..1210534822d6 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1191,13 +1191,17 @@ static irqreturn_t imx_udc_ctrl_irq(int irq, void *dev)
1191 return IRQ_HANDLED; 1191 return IRQ_HANDLED;
1192} 1192}
1193 1193
1194#ifndef MX1_INT_USBD0
1195#define MX1_INT_USBD0 MX1_USBD_INT0
1196#endif
1197
1194static irqreturn_t imx_udc_bulk_irq(int irq, void *dev) 1198static irqreturn_t imx_udc_bulk_irq(int irq, void *dev)
1195{ 1199{
1196 struct imx_udc_struct *imx_usb = dev; 1200 struct imx_udc_struct *imx_usb = dev;
1197 struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - USBD_INT0]; 1201 struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - MX1_INT_USBD0];
1198 int intr = __raw_readl(imx_usb->base + USB_EP_INTR(EP_NO(imx_ep))); 1202 int intr = __raw_readl(imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
1199 1203
1200 dump_ep_intr(__func__, irq - USBD_INT0, intr, imx_usb->dev); 1204 dump_ep_intr(__func__, irq - MX1_INT_USBD0, intr, imx_usb->dev);
1201 1205
1202 if (!imx_usb->driver) { 1206 if (!imx_usb->driver) {
1203 __raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep))); 1207 __raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
diff --git a/drivers/usb/gadget/imx_udc.h b/drivers/usb/gadget/imx_udc.h
index b48ad59603d1..7136c242b4ec 100644
--- a/drivers/usb/gadget/imx_udc.h
+++ b/drivers/usb/gadget/imx_udc.h
@@ -23,9 +23,6 @@
23/* Helper macros */ 23/* Helper macros */
24#define EP_NO(ep) ((ep->bEndpointAddress) & ~USB_DIR_IN) /* IN:1, OUT:0 */ 24#define EP_NO(ep) ((ep->bEndpointAddress) & ~USB_DIR_IN) /* IN:1, OUT:0 */
25#define EP_DIR(ep) ((ep->bEndpointAddress) & USB_DIR_IN ? 1 : 0) 25#define EP_DIR(ep) ((ep->bEndpointAddress) & USB_DIR_IN ? 1 : 0)
26#define irq_to_ep(irq) (((irq) >= USBD_INT0) || ((irq) <= USBD_INT6) \
27 ? ((irq) - USBD_INT0) : (USBD_INT6)) /*should not happen*/
28#define ep_to_irq(ep) (EP_NO((ep)) + USBD_INT0)
29#define IMX_USB_NB_EP 6 26#define IMX_USB_NB_EP 6
30 27
31/* Driver structures */ 28/* Driver structures */
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index b8ec954c0692..777972454e3e 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -2225,6 +2225,7 @@ static void handle_setup_packet(struct langwell_udc *dev,
2225 u16 wValue = le16_to_cpu(setup->wValue); 2225 u16 wValue = le16_to_cpu(setup->wValue);
2226 u16 wIndex = le16_to_cpu(setup->wIndex); 2226 u16 wIndex = le16_to_cpu(setup->wIndex);
2227 u16 wLength = le16_to_cpu(setup->wLength); 2227 u16 wLength = le16_to_cpu(setup->wLength);
2228 u32 portsc1;
2228 2229
2229 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__); 2230 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2230 2231
@@ -2313,6 +2314,28 @@ static void handle_setup_packet(struct langwell_udc *dev,
2313 dev->dev_status &= ~(1 << wValue); 2314 dev->dev_status &= ~(1 << wValue);
2314 } 2315 }
2315 break; 2316 break;
2317 case USB_DEVICE_TEST_MODE:
2318 dev_dbg(&dev->pdev->dev, "SETUP: TEST MODE\n");
2319 if ((wIndex & 0xff) ||
2320 (dev->gadget.speed != USB_SPEED_HIGH))
2321 ep0_stall(dev);
2322
2323 switch (wIndex >> 8) {
2324 case TEST_J:
2325 case TEST_K:
2326 case TEST_SE0_NAK:
2327 case TEST_PACKET:
2328 case TEST_FORCE_EN:
2329 if (prime_status_phase(dev, EP_DIR_IN))
2330 ep0_stall(dev);
2331 portsc1 = readl(&dev->op_regs->portsc1);
2332 portsc1 |= (wIndex & 0xf00) << 8;
2333 writel(portsc1, &dev->op_regs->portsc1);
2334 goto end;
2335 default:
2336 rc = -EOPNOTSUPP;
2337 }
2338 break;
2316 default: 2339 default:
2317 rc = -EOPNOTSUPP; 2340 rc = -EOPNOTSUPP;
2318 break; 2341 break;
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 0769179dbdb0..01822422c3e8 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -102,7 +102,7 @@ static struct fsg_module_parameters mod_data = {
102}; 102};
103FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); 103FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
104 104
105static unsigned long msg_registered = 0; 105static unsigned long msg_registered;
106static void msg_cleanup(void); 106static void msg_cleanup(void);
107 107
108static int msg_thread_exits(struct fsg_common *common) 108static int msg_thread_exits(struct fsg_common *common)
diff --git a/drivers/usb/gadget/mv_udc.h b/drivers/usb/gadget/mv_udc.h
new file mode 100644
index 000000000000..65f1f7c3bd4e
--- /dev/null
+++ b/drivers/usb/gadget/mv_udc.h
@@ -0,0 +1,294 @@
1
2#ifndef __MV_UDC_H
3#define __MV_UDC_H
4
5#define VUSBHS_MAX_PORTS 8
6
7#define DQH_ALIGNMENT 2048
8#define DTD_ALIGNMENT 64
9#define DMA_BOUNDARY 4096
10
11#define EP_DIR_IN 1
12#define EP_DIR_OUT 0
13
14#define DMA_ADDR_INVALID (~(dma_addr_t)0)
15
16#define EP0_MAX_PKT_SIZE 64
17/* ep0 transfer state */
18#define WAIT_FOR_SETUP 0
19#define DATA_STATE_XMIT 1
20#define DATA_STATE_NEED_ZLP 2
21#define WAIT_FOR_OUT_STATUS 3
22#define DATA_STATE_RECV 4
23
24#define CAPLENGTH_MASK (0xff)
25#define DCCPARAMS_DEN_MASK (0x1f)
26
27#define HCSPARAMS_PPC (0x10)
28
29/* Frame Index Register Bit Masks */
30#define USB_FRINDEX_MASKS 0x3fff
31
32/* Command Register Bit Masks */
33#define USBCMD_RUN_STOP (0x00000001)
34#define USBCMD_CTRL_RESET (0x00000002)
35#define USBCMD_SETUP_TRIPWIRE_SET (0x00002000)
36#define USBCMD_SETUP_TRIPWIRE_CLEAR (~USBCMD_SETUP_TRIPWIRE_SET)
37
38#define USBCMD_ATDTW_TRIPWIRE_SET (0x00004000)
39#define USBCMD_ATDTW_TRIPWIRE_CLEAR (~USBCMD_ATDTW_TRIPWIRE_SET)
40
41/* bit 15,3,2 are for frame list size */
42#define USBCMD_FRAME_SIZE_1024 (0x00000000) /* 000 */
43#define USBCMD_FRAME_SIZE_512 (0x00000004) /* 001 */
44#define USBCMD_FRAME_SIZE_256 (0x00000008) /* 010 */
45#define USBCMD_FRAME_SIZE_128 (0x0000000C) /* 011 */
46#define USBCMD_FRAME_SIZE_64 (0x00008000) /* 100 */
47#define USBCMD_FRAME_SIZE_32 (0x00008004) /* 101 */
48#define USBCMD_FRAME_SIZE_16 (0x00008008) /* 110 */
49#define USBCMD_FRAME_SIZE_8 (0x0000800C) /* 111 */
50
51#define EPCTRL_TX_ALL_MASK (0xFFFF0000)
52#define EPCTRL_RX_ALL_MASK (0x0000FFFF)
53
54#define EPCTRL_TX_DATA_TOGGLE_RST (0x00400000)
55#define EPCTRL_TX_EP_STALL (0x00010000)
56#define EPCTRL_RX_EP_STALL (0x00000001)
57#define EPCTRL_RX_DATA_TOGGLE_RST (0x00000040)
58#define EPCTRL_RX_ENABLE (0x00000080)
59#define EPCTRL_TX_ENABLE (0x00800000)
60#define EPCTRL_CONTROL (0x00000000)
61#define EPCTRL_ISOCHRONOUS (0x00040000)
62#define EPCTRL_BULK (0x00080000)
63#define EPCTRL_INT (0x000C0000)
64#define EPCTRL_TX_TYPE (0x000C0000)
65#define EPCTRL_RX_TYPE (0x0000000C)
66#define EPCTRL_DATA_TOGGLE_INHIBIT (0x00000020)
67#define EPCTRL_TX_EP_TYPE_SHIFT (18)
68#define EPCTRL_RX_EP_TYPE_SHIFT (2)
69
70#define EPCOMPLETE_MAX_ENDPOINTS (16)
71
72/* endpoint list address bit masks */
73#define USB_EP_LIST_ADDRESS_MASK 0xfffff800
74
75#define PORTSCX_W1C_BITS 0x2a
76#define PORTSCX_PORT_RESET 0x00000100
77#define PORTSCX_PORT_POWER 0x00001000
78#define PORTSCX_FORCE_FULL_SPEED_CONNECT 0x01000000
79#define PORTSCX_PAR_XCVR_SELECT 0xC0000000
80#define PORTSCX_PORT_FORCE_RESUME 0x00000040
81#define PORTSCX_PORT_SUSPEND 0x00000080
82#define PORTSCX_PORT_SPEED_FULL 0x00000000
83#define PORTSCX_PORT_SPEED_LOW 0x04000000
84#define PORTSCX_PORT_SPEED_HIGH 0x08000000
85#define PORTSCX_PORT_SPEED_MASK 0x0C000000
86
87/* USB MODE Register Bit Masks */
88#define USBMODE_CTRL_MODE_IDLE 0x00000000
89#define USBMODE_CTRL_MODE_DEVICE 0x00000002
90#define USBMODE_CTRL_MODE_HOST 0x00000003
91#define USBMODE_CTRL_MODE_RSV 0x00000001
92#define USBMODE_SETUP_LOCK_OFF 0x00000008
93#define USBMODE_STREAM_DISABLE 0x00000010
94
95/* USB STS Register Bit Masks */
96#define USBSTS_INT 0x00000001
97#define USBSTS_ERR 0x00000002
98#define USBSTS_PORT_CHANGE 0x00000004
99#define USBSTS_FRM_LST_ROLL 0x00000008
100#define USBSTS_SYS_ERR 0x00000010
101#define USBSTS_IAA 0x00000020
102#define USBSTS_RESET 0x00000040
103#define USBSTS_SOF 0x00000080
104#define USBSTS_SUSPEND 0x00000100
105#define USBSTS_HC_HALTED 0x00001000
106#define USBSTS_RCL 0x00002000
107#define USBSTS_PERIODIC_SCHEDULE 0x00004000
108#define USBSTS_ASYNC_SCHEDULE 0x00008000
109
110
111/* Interrupt Enable Register Bit Masks */
112#define USBINTR_INT_EN (0x00000001)
113#define USBINTR_ERR_INT_EN (0x00000002)
114#define USBINTR_PORT_CHANGE_DETECT_EN (0x00000004)
115
116#define USBINTR_ASYNC_ADV_AAE (0x00000020)
117#define USBINTR_ASYNC_ADV_AAE_ENABLE (0x00000020)
118#define USBINTR_ASYNC_ADV_AAE_DISABLE (0xFFFFFFDF)
119
120#define USBINTR_RESET_EN (0x00000040)
121#define USBINTR_SOF_UFRAME_EN (0x00000080)
122#define USBINTR_DEVICE_SUSPEND (0x00000100)
123
124#define USB_DEVICE_ADDRESS_MASK (0xfe000000)
125#define USB_DEVICE_ADDRESS_BIT_SHIFT (25)
126
127struct mv_cap_regs {
128 u32 caplength_hciversion;
129 u32 hcsparams; /* HC structural parameters */
130 u32 hccparams; /* HC Capability Parameters*/
131 u32 reserved[5];
132 u32 dciversion; /* DC version number and reserved 16 bits */
133 u32 dccparams; /* DC Capability Parameters */
134};
135
136struct mv_op_regs {
137 u32 usbcmd; /* Command register */
138 u32 usbsts; /* Status register */
139 u32 usbintr; /* Interrupt enable */
140 u32 frindex; /* Frame index */
141 u32 reserved1[1];
142 u32 deviceaddr; /* Device Address */
143 u32 eplistaddr; /* Endpoint List Address */
144 u32 ttctrl; /* HOST TT status and control */
145 u32 burstsize; /* Programmable Burst Size */
146 u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */
147 u32 reserved[4];
148 u32 epnak; /* Endpoint NAK */
149 u32 epnaken; /* Endpoint NAK Enable */
150 u32 configflag; /* Configured Flag register */
151 u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
152 u32 otgsc;
153 u32 usbmode; /* USB Host/Device mode */
154 u32 epsetupstat; /* Endpoint Setup Status */
155 u32 epprime; /* Endpoint Initialize */
156 u32 epflush; /* Endpoint De-initialize */
157 u32 epstatus; /* Endpoint Status */
158 u32 epcomplete; /* Endpoint Interrupt On Complete */
159 u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */
160 u32 mcr; /* Mux Control */
161 u32 isr; /* Interrupt Status */
162 u32 ier; /* Interrupt Enable */
163};
164
165struct mv_udc {
166 struct usb_gadget gadget;
167 struct usb_gadget_driver *driver;
168 spinlock_t lock;
169 struct completion *done;
170 struct platform_device *dev;
171 int irq;
172
173 struct mv_cap_regs __iomem *cap_regs;
174 struct mv_op_regs __iomem *op_regs;
175 unsigned int phy_regs;
176 unsigned int max_eps;
177 struct mv_dqh *ep_dqh;
178 size_t ep_dqh_size;
179 dma_addr_t ep_dqh_dma;
180
181 struct dma_pool *dtd_pool;
182 struct mv_ep *eps;
183
184 struct mv_dtd *dtd_head;
185 struct mv_dtd *dtd_tail;
186 unsigned int dtd_entries;
187
188 struct mv_req *status_req;
189 struct usb_ctrlrequest local_setup_buff;
190
191 unsigned int resume_state; /* USB state to resume */
192 unsigned int usb_state; /* USB current state */
193 unsigned int ep0_state; /* Endpoint zero state */
194 unsigned int ep0_dir;
195
196 unsigned int dev_addr;
197
198 int errors;
199 unsigned softconnect:1,
200 vbus_active:1,
201 remote_wakeup:1,
202 softconnected:1,
203 force_fs:1;
204 struct clk *clk;
205};
206
207/* endpoint data structure */
208struct mv_ep {
209 struct usb_ep ep;
210 struct mv_udc *udc;
211 struct list_head queue;
212 struct mv_dqh *dqh;
213 const struct usb_endpoint_descriptor *desc;
214 u32 direction;
215 char name[14];
216 unsigned stopped:1,
217 wedge:1,
218 ep_type:2,
219 ep_num:8;
220};
221
222/* request data structure */
223struct mv_req {
224 struct usb_request req;
225 struct mv_dtd *dtd, *head, *tail;
226 struct mv_ep *ep;
227 struct list_head queue;
228 unsigned dtd_count;
229 unsigned mapped:1;
230};
231
232#define EP_QUEUE_HEAD_MULT_POS 30
233#define EP_QUEUE_HEAD_ZLT_SEL 0x20000000
234#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS 16
235#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info) (((ep_info)>>16)&0x07ff)
236#define EP_QUEUE_HEAD_IOS 0x00008000
237#define EP_QUEUE_HEAD_NEXT_TERMINATE 0x00000001
238#define EP_QUEUE_HEAD_IOC 0x00008000
239#define EP_QUEUE_HEAD_MULTO 0x00000C00
240#define EP_QUEUE_HEAD_STATUS_HALT 0x00000040
241#define EP_QUEUE_HEAD_STATUS_ACTIVE 0x00000080
242#define EP_QUEUE_CURRENT_OFFSET_MASK 0x00000FFF
243#define EP_QUEUE_HEAD_NEXT_POINTER_MASK 0xFFFFFFE0
244#define EP_QUEUE_FRINDEX_MASK 0x000007FF
245#define EP_MAX_LENGTH_TRANSFER 0x4000
246
247struct mv_dqh {
248 /* Bits 16..26 Bit 15 is Interrupt On Setup */
249 u32 max_packet_length;
250 u32 curr_dtd_ptr; /* Current dTD Pointer */
251 u32 next_dtd_ptr; /* Next dTD Pointer */
252 /* Total bytes (16..30), IOC (15), INT (8), STS (0-7) */
253 u32 size_ioc_int_sts;
254 u32 buff_ptr0; /* Buffer pointer Page 0 (12-31) */
255 u32 buff_ptr1; /* Buffer pointer Page 1 (12-31) */
256 u32 buff_ptr2; /* Buffer pointer Page 2 (12-31) */
257 u32 buff_ptr3; /* Buffer pointer Page 3 (12-31) */
258 u32 buff_ptr4; /* Buffer pointer Page 4 (12-31) */
259 u32 reserved1;
260 /* 8 bytes of setup data that follows the Setup PID */
261 u8 setup_buffer[8];
262 u32 reserved2[4];
263};
264
265
266#define DTD_NEXT_TERMINATE (0x00000001)
267#define DTD_IOC (0x00008000)
268#define DTD_STATUS_ACTIVE (0x00000080)
269#define DTD_STATUS_HALTED (0x00000040)
270#define DTD_STATUS_DATA_BUFF_ERR (0x00000020)
271#define DTD_STATUS_TRANSACTION_ERR (0x00000008)
272#define DTD_RESERVED_FIELDS (0x00007F00)
273#define DTD_ERROR_MASK (0x68)
274#define DTD_ADDR_MASK (0xFFFFFFE0)
275#define DTD_PACKET_SIZE 0x7FFF0000
276#define DTD_LENGTH_BIT_POS (16)
277
278struct mv_dtd {
279 u32 dtd_next;
280 u32 size_ioc_sts;
281 u32 buff_ptr0; /* Buffer pointer Page 0 */
282 u32 buff_ptr1; /* Buffer pointer Page 1 */
283 u32 buff_ptr2; /* Buffer pointer Page 2 */
284 u32 buff_ptr3; /* Buffer pointer Page 3 */
285 u32 buff_ptr4; /* Buffer pointer Page 4 */
286 u32 scratch_ptr;
287 /* 32 bytes */
288 dma_addr_t td_dma; /* dma address for this td */
289 struct mv_dtd *next_dtd_virt;
290};
291
292extern int mv_udc_phy_init(unsigned int base);
293
294#endif
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
new file mode 100644
index 000000000000..d5468a7f38e0
--- /dev/null
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -0,0 +1,2149 @@
1#include <linux/module.h>
2#include <linux/pci.h>
3#include <linux/dma-mapping.h>
4#include <linux/dmapool.h>
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/ioport.h>
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/timer.h>
13#include <linux/list.h>
14#include <linux/interrupt.h>
15#include <linux/moduleparam.h>
16#include <linux/device.h>
17#include <linux/usb/ch9.h>
18#include <linux/usb/gadget.h>
19#include <linux/usb/otg.h>
20#include <linux/pm.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/platform_device.h>
24#include <linux/clk.h>
25#include <asm/system.h>
26#include <asm/unaligned.h>
27
28#include "mv_udc.h"
29
30#define DRIVER_DESC "Marvell PXA USB Device Controller driver"
31#define DRIVER_VERSION "8 Nov 2010"
32
33#define ep_dir(ep) (((ep)->ep_num == 0) ? \
34 ((ep)->udc->ep0_dir) : ((ep)->direction))
35
36/* timeout value -- usec */
37#define RESET_TIMEOUT 10000
38#define FLUSH_TIMEOUT 10000
39#define EPSTATUS_TIMEOUT 10000
40#define PRIME_TIMEOUT 10000
41#define READSAFE_TIMEOUT 1000
42#define DTD_TIMEOUT 1000
43
44#define LOOPS_USEC_SHIFT 4
45#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
46#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
47
48static const char driver_name[] = "mv_udc";
49static const char driver_desc[] = DRIVER_DESC;
50
51/* controller device global variable */
52static struct mv_udc *the_controller;
53int mv_usb_otgsc;
54
55static void nuke(struct mv_ep *ep, int status);
56
57/* for endpoint 0 operations */
58static const struct usb_endpoint_descriptor mv_ep0_desc = {
59 .bLength = USB_DT_ENDPOINT_SIZE,
60 .bDescriptorType = USB_DT_ENDPOINT,
61 .bEndpointAddress = 0,
62 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
63 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
64};
65
66static void ep0_reset(struct mv_udc *udc)
67{
68 struct mv_ep *ep;
69 u32 epctrlx;
70 int i = 0;
71
72 /* ep0 in and out */
73 for (i = 0; i < 2; i++) {
74 ep = &udc->eps[i];
75 ep->udc = udc;
76
77 /* ep0 dQH */
78 ep->dqh = &udc->ep_dqh[i];
79
80 /* configure ep0 endpoint capabilities in dQH */
81 ep->dqh->max_packet_length =
82 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
83 | EP_QUEUE_HEAD_IOS;
84
85 epctrlx = readl(&udc->op_regs->epctrlx[0]);
86 if (i) { /* TX */
87 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
88 | (USB_ENDPOINT_XFER_CONTROL
89 << EPCTRL_TX_EP_TYPE_SHIFT);
90
91 } else { /* RX */
92 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
93 | (USB_ENDPOINT_XFER_CONTROL
94 << EPCTRL_RX_EP_TYPE_SHIFT);
95 }
96
97 writel(epctrlx, &udc->op_regs->epctrlx[0]);
98 }
99}
100
101/* protocol ep0 stall, will automatically be cleared on new transaction */
102static void ep0_stall(struct mv_udc *udc)
103{
104 u32 epctrlx;
105
106 /* set TX and RX to stall */
107 epctrlx = readl(&udc->op_regs->epctrlx[0]);
108 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
109 writel(epctrlx, &udc->op_regs->epctrlx[0]);
110
111 /* update ep0 state */
112 udc->ep0_state = WAIT_FOR_SETUP;
113 udc->ep0_dir = EP_DIR_OUT;
114}
115
116static int process_ep_req(struct mv_udc *udc, int index,
117 struct mv_req *curr_req)
118{
119 struct mv_dtd *curr_dtd;
120 struct mv_dqh *curr_dqh;
121 int td_complete, actual, remaining_length;
122 int i, direction;
123 int retval = 0;
124 u32 errors;
125
126 curr_dqh = &udc->ep_dqh[index];
127 direction = index % 2;
128
129 curr_dtd = curr_req->head;
130 td_complete = 0;
131 actual = curr_req->req.length;
132
133 for (i = 0; i < curr_req->dtd_count; i++) {
134 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
135 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
136 udc->eps[index].name);
137 return 1;
138 }
139
140 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
141 if (!errors) {
142 remaining_length +=
143 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
144 >> DTD_LENGTH_BIT_POS;
145 actual -= remaining_length;
146 } else {
147 dev_info(&udc->dev->dev,
148 "complete_tr error: ep=%d %s: error = 0x%x\n",
149 index >> 1, direction ? "SEND" : "RECV",
150 errors);
151 if (errors & DTD_STATUS_HALTED) {
152 /* Clear the errors and Halt condition */
153 curr_dqh->size_ioc_int_sts &= ~errors;
154 retval = -EPIPE;
155 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
156 retval = -EPROTO;
157 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
158 retval = -EILSEQ;
159 }
160 }
161 if (i != curr_req->dtd_count - 1)
162 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
163 }
164 if (retval)
165 return retval;
166
167 curr_req->req.actual = actual;
168
169 return 0;
170}
171
172/*
173 * done() - retire a request; caller blocked irqs
174 * @status : request status to be set, only works when
175 * request is still in progress.
176 */
177static void done(struct mv_ep *ep, struct mv_req *req, int status)
178{
179 struct mv_udc *udc = NULL;
180 unsigned char stopped = ep->stopped;
181 struct mv_dtd *curr_td, *next_td;
182 int j;
183
184 udc = (struct mv_udc *)ep->udc;
185 /* Removed the req from fsl_ep->queue */
186 list_del_init(&req->queue);
187
188 /* req.status should be set as -EINPROGRESS in ep_queue() */
189 if (req->req.status == -EINPROGRESS)
190 req->req.status = status;
191 else
192 status = req->req.status;
193
194 /* Free dtd for the request */
195 next_td = req->head;
196 for (j = 0; j < req->dtd_count; j++) {
197 curr_td = next_td;
198 if (j != req->dtd_count - 1)
199 next_td = curr_td->next_dtd_virt;
200 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
201 }
202
203 if (req->mapped) {
204 dma_unmap_single(ep->udc->gadget.dev.parent,
205 req->req.dma, req->req.length,
206 ((ep_dir(ep) == EP_DIR_IN) ?
207 DMA_TO_DEVICE : DMA_FROM_DEVICE));
208 req->req.dma = DMA_ADDR_INVALID;
209 req->mapped = 0;
210 } else
211 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
212 req->req.dma, req->req.length,
213 ((ep_dir(ep) == EP_DIR_IN) ?
214 DMA_TO_DEVICE : DMA_FROM_DEVICE));
215
216 if (status && (status != -ESHUTDOWN))
217 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
218 ep->ep.name, &req->req, status,
219 req->req.actual, req->req.length);
220
221 ep->stopped = 1;
222
223 spin_unlock(&ep->udc->lock);
224 /*
225 * complete() is from gadget layer,
226 * eg fsg->bulk_in_complete()
227 */
228 if (req->req.complete)
229 req->req.complete(&ep->ep, &req->req);
230
231 spin_lock(&ep->udc->lock);
232 ep->stopped = stopped;
233}
234
235static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
236{
237 u32 tmp, epstatus, bit_pos, direction;
238 struct mv_udc *udc;
239 struct mv_dqh *dqh;
240 unsigned int loops;
241 int readsafe, retval = 0;
242
243 udc = ep->udc;
244 direction = ep_dir(ep);
245 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
246 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
247
248 /* check if the pipe is empty */
249 if (!(list_empty(&ep->queue))) {
250 struct mv_req *lastreq;
251 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
252 lastreq->tail->dtd_next =
253 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
254 if (readl(&udc->op_regs->epprime) & bit_pos) {
255 loops = LOOPS(PRIME_TIMEOUT);
256 while (readl(&udc->op_regs->epprime) & bit_pos) {
257 if (loops == 0) {
258 retval = -ETIME;
259 goto done;
260 }
261 udelay(LOOPS_USEC);
262 loops--;
263 }
264 if (readl(&udc->op_regs->epstatus) & bit_pos)
265 goto done;
266 }
267 readsafe = 0;
268 loops = LOOPS(READSAFE_TIMEOUT);
269 while (readsafe == 0) {
270 if (loops == 0) {
271 retval = -ETIME;
272 goto done;
273 }
274 /* start with setting the semaphores */
275 tmp = readl(&udc->op_regs->usbcmd);
276 tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
277 writel(tmp, &udc->op_regs->usbcmd);
278
279 /* read the endpoint status */
280 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
281
282 /*
283 * Reread the ATDTW semaphore bit to check if it is
284 * cleared. When hardware see a hazard, it will clear
285 * the bit or else we remain set to 1 and we can
286 * proceed with priming of endpoint if not already
287 * primed.
288 */
289 if (readl(&udc->op_regs->usbcmd)
290 & USBCMD_ATDTW_TRIPWIRE_SET) {
291 readsafe = 1;
292 }
293 loops--;
294 udelay(LOOPS_USEC);
295 }
296
297 /* Clear the semaphore */
298 tmp = readl(&udc->op_regs->usbcmd);
299 tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
300 writel(tmp, &udc->op_regs->usbcmd);
301
302 /* If endpoint is not active, we activate it now. */
303 if (!epstatus) {
304 if (direction == EP_DIR_IN) {
305 struct mv_dtd *curr_dtd = dma_to_virt(
306 &udc->dev->dev, dqh->curr_dtd_ptr);
307
308 loops = LOOPS(DTD_TIMEOUT);
309 while (curr_dtd->size_ioc_sts
310 & DTD_STATUS_ACTIVE) {
311 if (loops == 0) {
312 retval = -ETIME;
313 goto done;
314 }
315 loops--;
316 udelay(LOOPS_USEC);
317 }
318 }
319 /* No other transfers on the queue */
320
321 /* Write dQH next pointer and terminate bit to 0 */
322 dqh->next_dtd_ptr = req->head->td_dma
323 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
324 dqh->size_ioc_int_sts = 0;
325
326 /*
327 * Ensure that updates to the QH will
328 * occure before priming.
329 */
330 wmb();
331
332 /* Prime the Endpoint */
333 writel(bit_pos, &udc->op_regs->epprime);
334 }
335 } else {
336 /* Write dQH next pointer and terminate bit to 0 */
337 dqh->next_dtd_ptr = req->head->td_dma
338 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;;
339 dqh->size_ioc_int_sts = 0;
340
341 /* Ensure that updates to the QH will occure before priming. */
342 wmb();
343
344 /* Prime the Endpoint */
345 writel(bit_pos, &udc->op_regs->epprime);
346
347 if (direction == EP_DIR_IN) {
348 /* FIXME add status check after prime the IN ep */
349 int prime_again;
350 u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
351
352 loops = LOOPS(DTD_TIMEOUT);
353 prime_again = 0;
354 while ((curr_dtd_ptr != req->head->td_dma)) {
355 curr_dtd_ptr = dqh->curr_dtd_ptr;
356 if (loops == 0) {
357 dev_err(&udc->dev->dev,
358 "failed to prime %s\n",
359 ep->name);
360 retval = -ETIME;
361 goto done;
362 }
363 loops--;
364 udelay(LOOPS_USEC);
365
366 if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
367 if (prime_again)
368 goto done;
369 dev_info(&udc->dev->dev,
370 "prime again\n");
371 writel(bit_pos,
372 &udc->op_regs->epprime);
373 prime_again = 1;
374 }
375 }
376 }
377 }
378done:
379 return retval;;
380}
381
382static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
383 dma_addr_t *dma, int *is_last)
384{
385 u32 temp;
386 struct mv_dtd *dtd;
387 struct mv_udc *udc;
388
389 /* how big will this transfer be? */
390 *length = min(req->req.length - req->req.actual,
391 (unsigned)EP_MAX_LENGTH_TRANSFER);
392
393 udc = req->ep->udc;
394
395 /*
396 * Be careful that no _GFP_HIGHMEM is set,
397 * or we can not use dma_to_virt
398 */
399 dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
400 if (dtd == NULL)
401 return dtd;
402
403 dtd->td_dma = *dma;
404 /* initialize buffer page pointers */
405 temp = (u32)(req->req.dma + req->req.actual);
406 dtd->buff_ptr0 = cpu_to_le32(temp);
407 temp &= ~0xFFF;
408 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
409 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
410 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
411 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
412
413 req->req.actual += *length;
414
415 /* zlp is needed if req->req.zero is set */
416 if (req->req.zero) {
417 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
418 *is_last = 1;
419 else
420 *is_last = 0;
421 } else if (req->req.length == req->req.actual)
422 *is_last = 1;
423 else
424 *is_last = 0;
425
426 /* Fill in the transfer size; set active bit */
427 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
428
429 /* Enable interrupt for the last dtd of a request */
430 if (*is_last && !req->req.no_interrupt)
431 temp |= DTD_IOC;
432
433 dtd->size_ioc_sts = temp;
434
435 mb();
436
437 return dtd;
438}
439
440/* generate dTD linked list for a request */
441static int req_to_dtd(struct mv_req *req)
442{
443 unsigned count;
444 int is_last, is_first = 1;
445 struct mv_dtd *dtd, *last_dtd = NULL;
446 struct mv_udc *udc;
447 dma_addr_t dma;
448
449 udc = req->ep->udc;
450
451 do {
452 dtd = build_dtd(req, &count, &dma, &is_last);
453 if (dtd == NULL)
454 return -ENOMEM;
455
456 if (is_first) {
457 is_first = 0;
458 req->head = dtd;
459 } else {
460 last_dtd->dtd_next = dma;
461 last_dtd->next_dtd_virt = dtd;
462 }
463 last_dtd = dtd;
464 req->dtd_count++;
465 } while (!is_last);
466
467 /* set terminate bit to 1 for the last dTD */
468 dtd->dtd_next = DTD_NEXT_TERMINATE;
469
470 req->tail = dtd;
471
472 return 0;
473}
474
475static int mv_ep_enable(struct usb_ep *_ep,
476 const struct usb_endpoint_descriptor *desc)
477{
478 struct mv_udc *udc;
479 struct mv_ep *ep;
480 struct mv_dqh *dqh;
481 u16 max = 0;
482 u32 bit_pos, epctrlx, direction;
483 unsigned char zlt = 0, ios = 0, mult = 0;
484
485 ep = container_of(_ep, struct mv_ep, ep);
486 udc = ep->udc;
487
488 if (!_ep || !desc || ep->desc
489 || desc->bDescriptorType != USB_DT_ENDPOINT)
490 return -EINVAL;
491
492 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
493 return -ESHUTDOWN;
494
495 direction = ep_dir(ep);
496 max = le16_to_cpu(desc->wMaxPacketSize);
497
498 /*
499 * disable HW zero length termination select
500 * driver handles zero length packet through req->req.zero
501 */
502 zlt = 1;
503
504 /* Get the endpoint queue head address */
505 dqh = (struct mv_dqh *)ep->dqh;
506
507 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
508
509 /* Check if the Endpoint is Primed */
510 if ((readl(&udc->op_regs->epprime) & bit_pos)
511 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
512 dev_info(&udc->dev->dev,
513 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
514 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
515 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
516 (unsigned)readl(&udc->op_regs->epprime),
517 (unsigned)readl(&udc->op_regs->epstatus),
518 (unsigned)bit_pos);
519 goto en_done;
520 }
521 /* Set the max packet length, interrupt on Setup and Mult fields */
522 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
523 case USB_ENDPOINT_XFER_BULK:
524 zlt = 1;
525 mult = 0;
526 break;
527 case USB_ENDPOINT_XFER_CONTROL:
528 ios = 1;
529 case USB_ENDPOINT_XFER_INT:
530 mult = 0;
531 break;
532 case USB_ENDPOINT_XFER_ISOC:
533 /* Calculate transactions needed for high bandwidth iso */
534 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
535 max = max & 0x8ff; /* bit 0~10 */
536 /* 3 transactions at most */
537 if (mult > 3)
538 goto en_done;
539 break;
540 default:
541 goto en_done;
542 }
543 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
544 | (mult << EP_QUEUE_HEAD_MULT_POS)
545 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
546 | (ios ? EP_QUEUE_HEAD_IOS : 0);
547 dqh->next_dtd_ptr = 1;
548 dqh->size_ioc_int_sts = 0;
549
550 ep->ep.maxpacket = max;
551 ep->desc = desc;
552 ep->stopped = 0;
553
554 /* Enable the endpoint for Rx or Tx and set the endpoint type */
555 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
556 if (direction == EP_DIR_IN) {
557 epctrlx &= ~EPCTRL_TX_ALL_MASK;
558 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
559 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
560 << EPCTRL_TX_EP_TYPE_SHIFT);
561 } else {
562 epctrlx &= ~EPCTRL_RX_ALL_MASK;
563 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
564 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
565 << EPCTRL_RX_EP_TYPE_SHIFT);
566 }
567 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
568
569 /*
570 * Implement Guideline (GL# USB-7) The unused endpoint type must
571 * be programmed to bulk.
572 */
573 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
574 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
575 epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
576 << EPCTRL_RX_EP_TYPE_SHIFT);
577 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
578 }
579
580 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
581 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
582 epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
583 << EPCTRL_TX_EP_TYPE_SHIFT);
584 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
585 }
586
587 return 0;
588en_done:
589 return -EINVAL;
590}
591
592static int mv_ep_disable(struct usb_ep *_ep)
593{
594 struct mv_udc *udc;
595 struct mv_ep *ep;
596 struct mv_dqh *dqh;
597 u32 bit_pos, epctrlx, direction;
598
599 ep = container_of(_ep, struct mv_ep, ep);
600 if ((_ep == NULL) || !ep->desc)
601 return -EINVAL;
602
603 udc = ep->udc;
604
605 /* Get the endpoint queue head address */
606 dqh = ep->dqh;
607
608 direction = ep_dir(ep);
609 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
610
611 /* Reset the max packet length and the interrupt on Setup */
612 dqh->max_packet_length = 0;
613
614 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
615 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
616 epctrlx &= ~((direction == EP_DIR_IN)
617 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
618 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
619 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
620
621 /* nuke all pending requests (does flush) */
622 nuke(ep, -ESHUTDOWN);
623
624 ep->desc = NULL;
625 ep->stopped = 1;
626 return 0;
627}
628
629static struct usb_request *
630mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
631{
632 struct mv_req *req = NULL;
633
634 req = kzalloc(sizeof *req, gfp_flags);
635 if (!req)
636 return NULL;
637
638 req->req.dma = DMA_ADDR_INVALID;
639 INIT_LIST_HEAD(&req->queue);
640
641 return &req->req;
642}
643
644static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
645{
646 struct mv_req *req = NULL;
647
648 req = container_of(_req, struct mv_req, req);
649
650 if (_req)
651 kfree(req);
652}
653
654static void mv_ep_fifo_flush(struct usb_ep *_ep)
655{
656 struct mv_udc *udc;
657 u32 bit_pos, direction;
658 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
659 unsigned int loops;
660
661 udc = ep->udc;
662 direction = ep_dir(ep);
663 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
664 /*
665 * Flushing will halt the pipe
666 * Write 1 to the Flush register
667 */
668 writel(bit_pos, &udc->op_regs->epflush);
669
670 /* Wait until flushing completed */
671 loops = LOOPS(FLUSH_TIMEOUT);
672 while (readl(&udc->op_regs->epflush) & bit_pos) {
673 /*
674 * ENDPTFLUSH bit should be cleared to indicate this
675 * operation is complete
676 */
677 if (loops == 0) {
678 dev_err(&udc->dev->dev,
679 "TIMEOUT for ENDPTFLUSH=0x%x, bit_pos=0x%x\n",
680 (unsigned)readl(&udc->op_regs->epflush),
681 (unsigned)bit_pos);
682 return;
683 }
684 loops--;
685 udelay(LOOPS_USEC);
686 }
687 loops = LOOPS(EPSTATUS_TIMEOUT);
688 while (readl(&udc->op_regs->epstatus) & bit_pos) {
689 unsigned int inter_loops;
690
691 if (loops == 0) {
692 dev_err(&udc->dev->dev,
693 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
694 (unsigned)readl(&udc->op_regs->epstatus),
695 (unsigned)bit_pos);
696 return;
697 }
698 /* Write 1 to the Flush register */
699 writel(bit_pos, &udc->op_regs->epflush);
700
701 /* Wait until flushing completed */
702 inter_loops = LOOPS(FLUSH_TIMEOUT);
703 while (readl(&udc->op_regs->epflush) & bit_pos) {
704 /*
705 * ENDPTFLUSH bit should be cleared to indicate this
706 * operation is complete
707 */
708 if (inter_loops == 0) {
709 dev_err(&udc->dev->dev,
710 "TIMEOUT for ENDPTFLUSH=0x%x,"
711 "bit_pos=0x%x\n",
712 (unsigned)readl(&udc->op_regs->epflush),
713 (unsigned)bit_pos);
714 return;
715 }
716 inter_loops--;
717 udelay(LOOPS_USEC);
718 }
719 loops--;
720 }
721}
722
723/* queues (submits) an I/O request to an endpoint */
724static int
725mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
726{
727 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
728 struct mv_req *req = container_of(_req, struct mv_req, req);
729 struct mv_udc *udc = ep->udc;
730 unsigned long flags;
731
732 /* catch various bogus parameters */
733 if (!_req || !req->req.complete || !req->req.buf
734 || !list_empty(&req->queue)) {
735 dev_err(&udc->dev->dev, "%s, bad params", __func__);
736 return -EINVAL;
737 }
738 if (unlikely(!_ep || !ep->desc)) {
739 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
740 return -EINVAL;
741 }
742 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
743 if (req->req.length > ep->ep.maxpacket)
744 return -EMSGSIZE;
745 }
746
747 udc = ep->udc;
748 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
749 return -ESHUTDOWN;
750
751 req->ep = ep;
752
753 /* map virtual address to hardware */
754 if (req->req.dma == DMA_ADDR_INVALID) {
755 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
756 req->req.buf,
757 req->req.length, ep_dir(ep)
758 ? DMA_TO_DEVICE
759 : DMA_FROM_DEVICE);
760 req->mapped = 1;
761 } else {
762 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
763 req->req.dma, req->req.length,
764 ep_dir(ep)
765 ? DMA_TO_DEVICE
766 : DMA_FROM_DEVICE);
767 req->mapped = 0;
768 }
769
770 req->req.status = -EINPROGRESS;
771 req->req.actual = 0;
772 req->dtd_count = 0;
773
774 spin_lock_irqsave(&udc->lock, flags);
775
776 /* build dtds and push them to device queue */
777 if (!req_to_dtd(req)) {
778 int retval;
779 retval = queue_dtd(ep, req);
780 if (retval) {
781 spin_unlock_irqrestore(&udc->lock, flags);
782 return retval;
783 }
784 } else {
785 spin_unlock_irqrestore(&udc->lock, flags);
786 return -ENOMEM;
787 }
788
789 /* Update ep0 state */
790 if (ep->ep_num == 0)
791 udc->ep0_state = DATA_STATE_XMIT;
792
793 /* irq handler advances the queue */
794 if (req != NULL)
795 list_add_tail(&req->queue, &ep->queue);
796 spin_unlock_irqrestore(&udc->lock, flags);
797
798 return 0;
799}
800
801/* dequeues (cancels, unlinks) an I/O request from an endpoint */
802static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
803{
804 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
805 struct mv_req *req;
806 struct mv_udc *udc = ep->udc;
807 unsigned long flags;
808 int stopped, ret = 0;
809 u32 epctrlx;
810
811 if (!_ep || !_req)
812 return -EINVAL;
813
814 spin_lock_irqsave(&ep->udc->lock, flags);
815 stopped = ep->stopped;
816
817 /* Stop the ep before we deal with the queue */
818 ep->stopped = 1;
819 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
820 if (ep_dir(ep) == EP_DIR_IN)
821 epctrlx &= ~EPCTRL_TX_ENABLE;
822 else
823 epctrlx &= ~EPCTRL_RX_ENABLE;
824 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
825
826 /* make sure it's actually queued on this endpoint */
827 list_for_each_entry(req, &ep->queue, queue) {
828 if (&req->req == _req)
829 break;
830 }
831 if (&req->req != _req) {
832 ret = -EINVAL;
833 goto out;
834 }
835
836 /* The request is in progress, or completed but not dequeued */
837 if (ep->queue.next == &req->queue) {
838 _req->status = -ECONNRESET;
839 mv_ep_fifo_flush(_ep); /* flush current transfer */
840
841 /* The request isn't the last request in this ep queue */
842 if (req->queue.next != &ep->queue) {
843 struct mv_dqh *qh;
844 struct mv_req *next_req;
845
846 qh = ep->dqh;
847 next_req = list_entry(req->queue.next, struct mv_req,
848 queue);
849
850 /* Point the QH to the first TD of next request */
851 writel((u32) next_req->head, &qh->curr_dtd_ptr);
852 } else {
853 struct mv_dqh *qh;
854
855 qh = ep->dqh;
856 qh->next_dtd_ptr = 1;
857 qh->size_ioc_int_sts = 0;
858 }
859
860 /* The request hasn't been processed, patch up the TD chain */
861 } else {
862 struct mv_req *prev_req;
863
864 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
865 writel(readl(&req->tail->dtd_next),
866 &prev_req->tail->dtd_next);
867
868 }
869
870 done(ep, req, -ECONNRESET);
871
872 /* Enable EP */
873out:
874 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
875 if (ep_dir(ep) == EP_DIR_IN)
876 epctrlx |= EPCTRL_TX_ENABLE;
877 else
878 epctrlx |= EPCTRL_RX_ENABLE;
879 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
880 ep->stopped = stopped;
881
882 spin_unlock_irqrestore(&ep->udc->lock, flags);
883 return ret;
884}
885
886static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
887{
888 u32 epctrlx;
889
890 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
891
892 if (stall) {
893 if (direction == EP_DIR_IN)
894 epctrlx |= EPCTRL_TX_EP_STALL;
895 else
896 epctrlx |= EPCTRL_RX_EP_STALL;
897 } else {
898 if (direction == EP_DIR_IN) {
899 epctrlx &= ~EPCTRL_TX_EP_STALL;
900 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
901 } else {
902 epctrlx &= ~EPCTRL_RX_EP_STALL;
903 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
904 }
905 }
906 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
907}
908
909static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
910{
911 u32 epctrlx;
912
913 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
914
915 if (direction == EP_DIR_OUT)
916 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
917 else
918 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
919}
920
921static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
922{
923 struct mv_ep *ep;
924 unsigned long flags = 0;
925 int status = 0;
926 struct mv_udc *udc;
927
928 ep = container_of(_ep, struct mv_ep, ep);
929 udc = ep->udc;
930 if (!_ep || !ep->desc) {
931 status = -EINVAL;
932 goto out;
933 }
934
935 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
936 status = -EOPNOTSUPP;
937 goto out;
938 }
939
940 /*
941 * Attempt to halt IN ep will fail if any transfer requests
942 * are still queue
943 */
944 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
945 status = -EAGAIN;
946 goto out;
947 }
948
949 spin_lock_irqsave(&ep->udc->lock, flags);
950 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
951 if (halt && wedge)
952 ep->wedge = 1;
953 else if (!halt)
954 ep->wedge = 0;
955 spin_unlock_irqrestore(&ep->udc->lock, flags);
956
957 if (ep->ep_num == 0) {
958 udc->ep0_state = WAIT_FOR_SETUP;
959 udc->ep0_dir = EP_DIR_OUT;
960 }
961out:
962 return status;
963}
964
965static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
966{
967 return mv_ep_set_halt_wedge(_ep, halt, 0);
968}
969
970static int mv_ep_set_wedge(struct usb_ep *_ep)
971{
972 return mv_ep_set_halt_wedge(_ep, 1, 1);
973}
974
975static struct usb_ep_ops mv_ep_ops = {
976 .enable = mv_ep_enable,
977 .disable = mv_ep_disable,
978
979 .alloc_request = mv_alloc_request,
980 .free_request = mv_free_request,
981
982 .queue = mv_ep_queue,
983 .dequeue = mv_ep_dequeue,
984
985 .set_wedge = mv_ep_set_wedge,
986 .set_halt = mv_ep_set_halt,
987 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
988};
989
990static void udc_stop(struct mv_udc *udc)
991{
992 u32 tmp;
993
994 /* Disable interrupts */
995 tmp = readl(&udc->op_regs->usbintr);
996 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
997 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
998 writel(tmp, &udc->op_regs->usbintr);
999
1000 /* Reset the Run the bit in the command register to stop VUSB */
1001 tmp = readl(&udc->op_regs->usbcmd);
1002 tmp &= ~USBCMD_RUN_STOP;
1003 writel(tmp, &udc->op_regs->usbcmd);
1004}
1005
1006static void udc_start(struct mv_udc *udc)
1007{
1008 u32 usbintr;
1009
1010 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1011 | USBINTR_PORT_CHANGE_DETECT_EN
1012 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1013 /* Enable interrupts */
1014 writel(usbintr, &udc->op_regs->usbintr);
1015
1016 /* Set the Run bit in the command register */
1017 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1018}
1019
1020static int udc_reset(struct mv_udc *udc)
1021{
1022 unsigned int loops;
1023 u32 tmp, portsc;
1024
1025 /* Stop the controller */
1026 tmp = readl(&udc->op_regs->usbcmd);
1027 tmp &= ~USBCMD_RUN_STOP;
1028 writel(tmp, &udc->op_regs->usbcmd);
1029
1030 /* Reset the controller to get default values */
1031 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1032
1033 /* wait for reset to complete */
1034 loops = LOOPS(RESET_TIMEOUT);
1035 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1036 if (loops == 0) {
1037 dev_err(&udc->dev->dev,
1038 "Wait for RESET completed TIMEOUT\n");
1039 return -ETIMEDOUT;
1040 }
1041 loops--;
1042 udelay(LOOPS_USEC);
1043 }
1044
1045 /* set controller to device mode */
1046 tmp = readl(&udc->op_regs->usbmode);
1047 tmp |= USBMODE_CTRL_MODE_DEVICE;
1048
1049 /* turn setup lockout off, require setup tripwire in usbcmd */
1050 tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1051
1052 writel(tmp, &udc->op_regs->usbmode);
1053
1054 writel(0x0, &udc->op_regs->epsetupstat);
1055
1056 /* Configure the Endpoint List Address */
1057 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1058 &udc->op_regs->eplistaddr);
1059
1060 portsc = readl(&udc->op_regs->portsc[0]);
1061 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1062 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1063
1064 if (udc->force_fs)
1065 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1066 else
1067 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1068
1069 writel(portsc, &udc->op_regs->portsc[0]);
1070
1071 tmp = readl(&udc->op_regs->epctrlx[0]);
1072 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1073 writel(tmp, &udc->op_regs->epctrlx[0]);
1074
1075 return 0;
1076}
1077
1078static int mv_udc_get_frame(struct usb_gadget *gadget)
1079{
1080 struct mv_udc *udc;
1081 u16 retval;
1082
1083 if (!gadget)
1084 return -ENODEV;
1085
1086 udc = container_of(gadget, struct mv_udc, gadget);
1087
1088 retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1089
1090 return retval;
1091}
1092
1093/* Tries to wake up the host connected to this gadget */
1094static int mv_udc_wakeup(struct usb_gadget *gadget)
1095{
1096 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1097 u32 portsc;
1098
1099 /* Remote wakeup feature not enabled by host */
1100 if (!udc->remote_wakeup)
1101 return -ENOTSUPP;
1102
1103 portsc = readl(&udc->op_regs->portsc);
1104 /* not suspended? */
1105 if (!(portsc & PORTSCX_PORT_SUSPEND))
1106 return 0;
1107 /* trigger force resume */
1108 portsc |= PORTSCX_PORT_FORCE_RESUME;
1109 writel(portsc, &udc->op_regs->portsc[0]);
1110 return 0;
1111}
1112
1113static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1114{
1115 struct mv_udc *udc;
1116 unsigned long flags;
1117
1118 udc = container_of(gadget, struct mv_udc, gadget);
1119 spin_lock_irqsave(&udc->lock, flags);
1120
1121 udc->softconnect = (is_on != 0);
1122 if (udc->driver && udc->softconnect)
1123 udc_start(udc);
1124 else
1125 udc_stop(udc);
1126
1127 spin_unlock_irqrestore(&udc->lock, flags);
1128 return 0;
1129}
1130
1131/* device controller usb_gadget_ops structure */
1132static const struct usb_gadget_ops mv_ops = {
1133
1134 /* returns the current frame number */
1135 .get_frame = mv_udc_get_frame,
1136
1137 /* tries to wake up the host connected to this gadget */
1138 .wakeup = mv_udc_wakeup,
1139
1140 /* D+ pullup, software-controlled connect/disconnect to USB host */
1141 .pullup = mv_udc_pullup,
1142};
1143
1144static void mv_udc_testmode(struct mv_udc *udc, u16 index, bool enter)
1145{
1146 dev_info(&udc->dev->dev, "Test Mode is not support yet\n");
1147}
1148
1149static int eps_init(struct mv_udc *udc)
1150{
1151 struct mv_ep *ep;
1152 char name[14];
1153 int i;
1154
1155 /* initialize ep0 */
1156 ep = &udc->eps[0];
1157 ep->udc = udc;
1158 strncpy(ep->name, "ep0", sizeof(ep->name));
1159 ep->ep.name = ep->name;
1160 ep->ep.ops = &mv_ep_ops;
1161 ep->wedge = 0;
1162 ep->stopped = 0;
1163 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1164 ep->ep_num = 0;
1165 ep->desc = &mv_ep0_desc;
1166 INIT_LIST_HEAD(&ep->queue);
1167
1168 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1169
1170 /* initialize other endpoints */
1171 for (i = 2; i < udc->max_eps * 2; i++) {
1172 ep = &udc->eps[i];
1173 if (i % 2) {
1174 snprintf(name, sizeof(name), "ep%din", i / 2);
1175 ep->direction = EP_DIR_IN;
1176 } else {
1177 snprintf(name, sizeof(name), "ep%dout", i / 2);
1178 ep->direction = EP_DIR_OUT;
1179 }
1180 ep->udc = udc;
1181 strncpy(ep->name, name, sizeof(ep->name));
1182 ep->ep.name = ep->name;
1183
1184 ep->ep.ops = &mv_ep_ops;
1185 ep->stopped = 0;
1186 ep->ep.maxpacket = (unsigned short) ~0;
1187 ep->ep_num = i / 2;
1188
1189 INIT_LIST_HEAD(&ep->queue);
1190 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1191
1192 ep->dqh = &udc->ep_dqh[i];
1193 }
1194
1195 return 0;
1196}
1197
1198/* delete all endpoint requests, called with spinlock held */
1199static void nuke(struct mv_ep *ep, int status)
1200{
1201 /* called with spinlock held */
1202 ep->stopped = 1;
1203
1204 /* endpoint fifo flush */
1205 mv_ep_fifo_flush(&ep->ep);
1206
1207 while (!list_empty(&ep->queue)) {
1208 struct mv_req *req = NULL;
1209 req = list_entry(ep->queue.next, struct mv_req, queue);
1210 done(ep, req, status);
1211 }
1212}
1213
1214/* stop all USB activities */
1215static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1216{
1217 struct mv_ep *ep;
1218
1219 nuke(&udc->eps[0], -ESHUTDOWN);
1220
1221 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1222 nuke(ep, -ESHUTDOWN);
1223 }
1224
1225 /* report disconnect; the driver is already quiesced */
1226 if (driver) {
1227 spin_unlock(&udc->lock);
1228 driver->disconnect(&udc->gadget);
1229 spin_lock(&udc->lock);
1230 }
1231}
1232
1233int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1234 int (*bind)(struct usb_gadget *))
1235{
1236 struct mv_udc *udc = the_controller;
1237 int retval = 0;
1238 unsigned long flags;
1239
1240 if (!udc)
1241 return -ENODEV;
1242
1243 if (udc->driver)
1244 return -EBUSY;
1245
1246 spin_lock_irqsave(&udc->lock, flags);
1247
1248 /* hook up the driver ... */
1249 driver->driver.bus = NULL;
1250 udc->driver = driver;
1251 udc->gadget.dev.driver = &driver->driver;
1252
1253 udc->usb_state = USB_STATE_ATTACHED;
1254 udc->ep0_state = WAIT_FOR_SETUP;
1255 udc->ep0_dir = USB_DIR_OUT;
1256
1257 spin_unlock_irqrestore(&udc->lock, flags);
1258
1259 retval = bind(&udc->gadget);
1260 if (retval) {
1261 dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1262 driver->driver.name, retval);
1263 udc->driver = NULL;
1264 udc->gadget.dev.driver = NULL;
1265 return retval;
1266 }
1267 udc_reset(udc);
1268 ep0_reset(udc);
1269 udc_start(udc);
1270
1271 return 0;
1272}
1273EXPORT_SYMBOL(usb_gadget_probe_driver);
1274
1275int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1276{
1277 struct mv_udc *udc = the_controller;
1278 unsigned long flags;
1279
1280 if (!udc)
1281 return -ENODEV;
1282
1283 udc_stop(udc);
1284
1285 spin_lock_irqsave(&udc->lock, flags);
1286
1287 /* stop all usb activities */
1288 udc->gadget.speed = USB_SPEED_UNKNOWN;
1289 stop_activity(udc, driver);
1290 spin_unlock_irqrestore(&udc->lock, flags);
1291
1292 /* unbind gadget driver */
1293 driver->unbind(&udc->gadget);
1294 udc->gadget.dev.driver = NULL;
1295 udc->driver = NULL;
1296
1297 return 0;
1298}
1299EXPORT_SYMBOL(usb_gadget_unregister_driver);
1300
1301static int
1302udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1303{
1304 int retval = 0;
1305 struct mv_req *req;
1306 struct mv_ep *ep;
1307
1308 ep = &udc->eps[0];
1309 udc->ep0_dir = direction;
1310
1311 req = udc->status_req;
1312
1313 /* fill in the reqest structure */
1314 if (empty == false) {
1315 *((u16 *) req->req.buf) = cpu_to_le16(status);
1316 req->req.length = 2;
1317 } else
1318 req->req.length = 0;
1319
1320 req->ep = ep;
1321 req->req.status = -EINPROGRESS;
1322 req->req.actual = 0;
1323 req->req.complete = NULL;
1324 req->dtd_count = 0;
1325
1326 /* prime the data phase */
1327 if (!req_to_dtd(req))
1328 retval = queue_dtd(ep, req);
1329 else{ /* no mem */
1330 retval = -ENOMEM;
1331 goto out;
1332 }
1333
1334 if (retval) {
1335 dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1336 goto out;
1337 }
1338
1339 list_add_tail(&req->queue, &ep->queue);
1340
1341 return 0;
1342out:
1343 return retval;
1344}
1345
1346static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1347{
1348 udc->dev_addr = (u8)setup->wValue;
1349
1350 /* update usb state */
1351 udc->usb_state = USB_STATE_ADDRESS;
1352
1353 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1354 ep0_stall(udc);
1355}
1356
1357static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1358 struct usb_ctrlrequest *setup)
1359{
1360 u16 status;
1361 int retval;
1362
1363 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1364 != (USB_DIR_IN | USB_TYPE_STANDARD))
1365 return;
1366
1367 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1368 status = 1 << USB_DEVICE_SELF_POWERED;
1369 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1370 } else if ((setup->bRequestType & USB_RECIP_MASK)
1371 == USB_RECIP_INTERFACE) {
1372 /* get interface status */
1373 status = 0;
1374 } else if ((setup->bRequestType & USB_RECIP_MASK)
1375 == USB_RECIP_ENDPOINT) {
1376 u8 ep_num, direction;
1377
1378 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1379 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1380 ? EP_DIR_IN : EP_DIR_OUT;
1381 status = ep_is_stall(udc, ep_num, direction)
1382 << USB_ENDPOINT_HALT;
1383 }
1384
1385 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1386 if (retval)
1387 ep0_stall(udc);
1388}
1389
1390static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1391{
1392 u8 ep_num;
1393 u8 direction;
1394 struct mv_ep *ep;
1395
1396 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1397 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1398 switch (setup->wValue) {
1399 case USB_DEVICE_REMOTE_WAKEUP:
1400 udc->remote_wakeup = 0;
1401 break;
1402 case USB_DEVICE_TEST_MODE:
1403 mv_udc_testmode(udc, 0, false);
1404 break;
1405 default:
1406 goto out;
1407 }
1408 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1409 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1410 switch (setup->wValue) {
1411 case USB_ENDPOINT_HALT:
1412 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1413 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1414 ? EP_DIR_IN : EP_DIR_OUT;
1415 if (setup->wValue != 0 || setup->wLength != 0
1416 || ep_num > udc->max_eps)
1417 goto out;
1418 ep = &udc->eps[ep_num * 2 + direction];
1419 if (ep->wedge == 1)
1420 break;
1421 spin_unlock(&udc->lock);
1422 ep_set_stall(udc, ep_num, direction, 0);
1423 spin_lock(&udc->lock);
1424 break;
1425 default:
1426 goto out;
1427 }
1428 } else
1429 goto out;
1430
1431 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1432 ep0_stall(udc);
1433 else
1434 udc->ep0_state = DATA_STATE_XMIT;
1435out:
1436 return;
1437}
1438
1439static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1440{
1441 u8 ep_num;
1442 u8 direction;
1443
1444 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1445 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1446 switch (setup->wValue) {
1447 case USB_DEVICE_REMOTE_WAKEUP:
1448 udc->remote_wakeup = 1;
1449 break;
1450 case USB_DEVICE_TEST_MODE:
1451 if (setup->wIndex & 0xFF
1452 && udc->gadget.speed != USB_SPEED_HIGH)
1453 goto out;
1454 if (udc->usb_state == USB_STATE_CONFIGURED
1455 || udc->usb_state == USB_STATE_ADDRESS
1456 || udc->usb_state == USB_STATE_DEFAULT)
1457 mv_udc_testmode(udc,
1458 setup->wIndex & 0xFF00, true);
1459 else
1460 goto out;
1461 break;
1462 default:
1463 goto out;
1464 }
1465 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1466 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1467 switch (setup->wValue) {
1468 case USB_ENDPOINT_HALT:
1469 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1470 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1471 ? EP_DIR_IN : EP_DIR_OUT;
1472 if (setup->wValue != 0 || setup->wLength != 0
1473 || ep_num > udc->max_eps)
1474 goto out;
1475 spin_unlock(&udc->lock);
1476 ep_set_stall(udc, ep_num, direction, 1);
1477 spin_lock(&udc->lock);
1478 break;
1479 default:
1480 goto out;
1481 }
1482 } else
1483 goto out;
1484
1485 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1486 ep0_stall(udc);
1487out:
1488 return;
1489}
1490
1491static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1492 struct usb_ctrlrequest *setup)
1493{
1494 bool delegate = false;
1495
1496 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1497
1498 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1499 setup->bRequestType, setup->bRequest,
1500 setup->wValue, setup->wIndex, setup->wLength);
1501 /* We process some stardard setup requests here */
1502 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1503 switch (setup->bRequest) {
1504 case USB_REQ_GET_STATUS:
1505 ch9getstatus(udc, ep_num, setup);
1506 break;
1507
1508 case USB_REQ_SET_ADDRESS:
1509 ch9setaddress(udc, setup);
1510 break;
1511
1512 case USB_REQ_CLEAR_FEATURE:
1513 ch9clearfeature(udc, setup);
1514 break;
1515
1516 case USB_REQ_SET_FEATURE:
1517 ch9setfeature(udc, setup);
1518 break;
1519
1520 default:
1521 delegate = true;
1522 }
1523 } else
1524 delegate = true;
1525
1526 /* delegate USB standard requests to the gadget driver */
1527 if (delegate == true) {
1528 /* USB requests handled by gadget */
1529 if (setup->wLength) {
1530 /* DATA phase from gadget, STATUS phase from udc */
1531 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1532 ? EP_DIR_IN : EP_DIR_OUT;
1533 spin_unlock(&udc->lock);
1534 if (udc->driver->setup(&udc->gadget,
1535 &udc->local_setup_buff) < 0)
1536 ep0_stall(udc);
1537 spin_lock(&udc->lock);
1538 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1539 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1540 } else {
1541 /* no DATA phase, IN STATUS phase from gadget */
1542 udc->ep0_dir = EP_DIR_IN;
1543 spin_unlock(&udc->lock);
1544 if (udc->driver->setup(&udc->gadget,
1545 &udc->local_setup_buff) < 0)
1546 ep0_stall(udc);
1547 spin_lock(&udc->lock);
1548 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1549 }
1550 }
1551}
1552
1553/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1554static void ep0_req_complete(struct mv_udc *udc,
1555 struct mv_ep *ep0, struct mv_req *req)
1556{
1557 u32 new_addr;
1558
1559 if (udc->usb_state == USB_STATE_ADDRESS) {
1560 /* set the new address */
1561 new_addr = (u32)udc->dev_addr;
1562 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1563 &udc->op_regs->deviceaddr);
1564 }
1565
1566 done(ep0, req, 0);
1567
1568 switch (udc->ep0_state) {
1569 case DATA_STATE_XMIT:
1570 /* receive status phase */
1571 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1572 ep0_stall(udc);
1573 break;
1574 case DATA_STATE_RECV:
1575 /* send status phase */
1576 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1577 ep0_stall(udc);
1578 break;
1579 case WAIT_FOR_OUT_STATUS:
1580 udc->ep0_state = WAIT_FOR_SETUP;
1581 break;
1582 case WAIT_FOR_SETUP:
1583 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1584 break;
1585 default:
1586 ep0_stall(udc);
1587 break;
1588 }
1589}
1590
1591static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1592{
1593 u32 temp;
1594 struct mv_dqh *dqh;
1595
1596 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1597
1598 /* Clear bit in ENDPTSETUPSTAT */
1599 temp = readl(&udc->op_regs->epsetupstat);
1600 writel(temp | (1 << ep_num), &udc->op_regs->epsetupstat);
1601
1602 /* while a hazard exists when setup package arrives */
1603 do {
1604 /* Set Setup Tripwire */
1605 temp = readl(&udc->op_regs->usbcmd);
1606 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1607
1608 /* Copy the setup packet to local buffer */
1609 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1610 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1611
1612 /* Clear Setup Tripwire */
1613 temp = readl(&udc->op_regs->usbcmd);
1614 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1615}
1616
1617static void irq_process_tr_complete(struct mv_udc *udc)
1618{
1619 u32 tmp, bit_pos;
1620 int i, ep_num = 0, direction = 0;
1621 struct mv_ep *curr_ep;
1622 struct mv_req *curr_req, *temp_req;
1623 int status;
1624
1625 /*
1626 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1627 * because the setup packets are to be read ASAP
1628 */
1629
1630 /* Process all Setup packet received interrupts */
1631 tmp = readl(&udc->op_regs->epsetupstat);
1632
1633 if (tmp) {
1634 for (i = 0; i < udc->max_eps; i++) {
1635 if (tmp & (1 << i)) {
1636 get_setup_data(udc, i,
1637 (u8 *)(&udc->local_setup_buff));
1638 handle_setup_packet(udc, i,
1639 &udc->local_setup_buff);
1640 }
1641 }
1642 }
1643
1644 /* Don't clear the endpoint setup status register here.
1645 * It is cleared as a setup packet is read out of the buffer
1646 */
1647
1648 /* Process non-setup transaction complete interrupts */
1649 tmp = readl(&udc->op_regs->epcomplete);
1650
1651 if (!tmp)
1652 return;
1653
1654 writel(tmp, &udc->op_regs->epcomplete);
1655
1656 for (i = 0; i < udc->max_eps * 2; i++) {
1657 ep_num = i >> 1;
1658 direction = i % 2;
1659
1660 bit_pos = 1 << (ep_num + 16 * direction);
1661
1662 if (!(bit_pos & tmp))
1663 continue;
1664
1665 if (i == 1)
1666 curr_ep = &udc->eps[0];
1667 else
1668 curr_ep = &udc->eps[i];
1669 /* process the req queue until an uncomplete request */
1670 list_for_each_entry_safe(curr_req, temp_req,
1671 &curr_ep->queue, queue) {
1672 status = process_ep_req(udc, i, curr_req);
1673 if (status)
1674 break;
1675
1676 /* write back status to req */
1677 curr_req->req.status = status;
1678
1679 /* ep0 request completion */
1680 if (ep_num == 0) {
1681 ep0_req_complete(udc, curr_ep, curr_req);
1682 break;
1683 } else {
1684 done(curr_ep, curr_req, status);
1685 }
1686 }
1687 }
1688}
1689
1690void irq_process_reset(struct mv_udc *udc)
1691{
1692 u32 tmp;
1693 unsigned int loops;
1694
1695 udc->ep0_dir = EP_DIR_OUT;
1696 udc->ep0_state = WAIT_FOR_SETUP;
1697 udc->remote_wakeup = 0; /* default to 0 on reset */
1698
1699 /* The address bits are past bit 25-31. Set the address */
1700 tmp = readl(&udc->op_regs->deviceaddr);
1701 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1702 writel(tmp, &udc->op_regs->deviceaddr);
1703
1704 /* Clear all the setup token semaphores */
1705 tmp = readl(&udc->op_regs->epsetupstat);
1706 writel(tmp, &udc->op_regs->epsetupstat);
1707
1708 /* Clear all the endpoint complete status bits */
1709 tmp = readl(&udc->op_regs->epcomplete);
1710 writel(tmp, &udc->op_regs->epcomplete);
1711
1712 /* wait until all endptprime bits cleared */
1713 loops = LOOPS(PRIME_TIMEOUT);
1714 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1715 if (loops == 0) {
1716 dev_err(&udc->dev->dev,
1717 "Timeout for ENDPTPRIME = 0x%x\n",
1718 readl(&udc->op_regs->epprime));
1719 break;
1720 }
1721 loops--;
1722 udelay(LOOPS_USEC);
1723 }
1724
1725 /* Write 1s to the Flush register */
1726 writel((u32)~0, &udc->op_regs->epflush);
1727
1728 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1729 dev_info(&udc->dev->dev, "usb bus reset\n");
1730 udc->usb_state = USB_STATE_DEFAULT;
1731 /* reset all the queues, stop all USB activities */
1732 stop_activity(udc, udc->driver);
1733 } else {
1734 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1735 readl(&udc->op_regs->portsc));
1736
1737 /*
1738 * re-initialize
1739 * controller reset
1740 */
1741 udc_reset(udc);
1742
1743 /* reset all the queues, stop all USB activities */
1744 stop_activity(udc, udc->driver);
1745
1746 /* reset ep0 dQH and endptctrl */
1747 ep0_reset(udc);
1748
1749 /* enable interrupt and set controller to run state */
1750 udc_start(udc);
1751
1752 udc->usb_state = USB_STATE_ATTACHED;
1753 }
1754}
1755
1756static void handle_bus_resume(struct mv_udc *udc)
1757{
1758 udc->usb_state = udc->resume_state;
1759 udc->resume_state = 0;
1760
1761 /* report resume to the driver */
1762 if (udc->driver) {
1763 if (udc->driver->resume) {
1764 spin_unlock(&udc->lock);
1765 udc->driver->resume(&udc->gadget);
1766 spin_lock(&udc->lock);
1767 }
1768 }
1769}
1770
1771static void irq_process_suspend(struct mv_udc *udc)
1772{
1773 udc->resume_state = udc->usb_state;
1774 udc->usb_state = USB_STATE_SUSPENDED;
1775
1776 if (udc->driver->suspend) {
1777 spin_unlock(&udc->lock);
1778 udc->driver->suspend(&udc->gadget);
1779 spin_lock(&udc->lock);
1780 }
1781}
1782
1783static void irq_process_port_change(struct mv_udc *udc)
1784{
1785 u32 portsc;
1786
1787 portsc = readl(&udc->op_regs->portsc[0]);
1788 if (!(portsc & PORTSCX_PORT_RESET)) {
1789 /* Get the speed */
1790 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1791 switch (speed) {
1792 case PORTSCX_PORT_SPEED_HIGH:
1793 udc->gadget.speed = USB_SPEED_HIGH;
1794 break;
1795 case PORTSCX_PORT_SPEED_FULL:
1796 udc->gadget.speed = USB_SPEED_FULL;
1797 break;
1798 case PORTSCX_PORT_SPEED_LOW:
1799 udc->gadget.speed = USB_SPEED_LOW;
1800 break;
1801 default:
1802 udc->gadget.speed = USB_SPEED_UNKNOWN;
1803 break;
1804 }
1805 }
1806
1807 if (portsc & PORTSCX_PORT_SUSPEND) {
1808 udc->resume_state = udc->usb_state;
1809 udc->usb_state = USB_STATE_SUSPENDED;
1810 if (udc->driver->suspend) {
1811 spin_unlock(&udc->lock);
1812 udc->driver->suspend(&udc->gadget);
1813 spin_lock(&udc->lock);
1814 }
1815 }
1816
1817 if (!(portsc & PORTSCX_PORT_SUSPEND)
1818 && udc->usb_state == USB_STATE_SUSPENDED) {
1819 handle_bus_resume(udc);
1820 }
1821
1822 if (!udc->resume_state)
1823 udc->usb_state = USB_STATE_DEFAULT;
1824}
1825
1826static void irq_process_error(struct mv_udc *udc)
1827{
1828 /* Increment the error count */
1829 udc->errors++;
1830}
1831
1832static irqreturn_t mv_udc_irq(int irq, void *dev)
1833{
1834 struct mv_udc *udc = (struct mv_udc *)dev;
1835 u32 status, intr;
1836
1837 spin_lock(&udc->lock);
1838
1839 status = readl(&udc->op_regs->usbsts);
1840 intr = readl(&udc->op_regs->usbintr);
1841 status &= intr;
1842
1843 if (status == 0) {
1844 spin_unlock(&udc->lock);
1845 return IRQ_NONE;
1846 }
1847
1848 /* Clear all the interrupts occured */
1849 writel(status, &udc->op_regs->usbsts);
1850
1851 if (status & USBSTS_ERR)
1852 irq_process_error(udc);
1853
1854 if (status & USBSTS_RESET)
1855 irq_process_reset(udc);
1856
1857 if (status & USBSTS_PORT_CHANGE)
1858 irq_process_port_change(udc);
1859
1860 if (status & USBSTS_INT)
1861 irq_process_tr_complete(udc);
1862
1863 if (status & USBSTS_SUSPEND)
1864 irq_process_suspend(udc);
1865
1866 spin_unlock(&udc->lock);
1867
1868 return IRQ_HANDLED;
1869}
1870
1871/* release device structure */
1872static void gadget_release(struct device *_dev)
1873{
1874 struct mv_udc *udc = the_controller;
1875
1876 complete(udc->done);
1877 kfree(udc);
1878}
1879
1880static int mv_udc_remove(struct platform_device *dev)
1881{
1882 struct mv_udc *udc = the_controller;
1883
1884 DECLARE_COMPLETION(done);
1885
1886 udc->done = &done;
1887
1888 /* free memory allocated in probe */
1889 if (udc->dtd_pool)
1890 dma_pool_destroy(udc->dtd_pool);
1891
1892 if (udc->ep_dqh)
1893 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
1894 udc->ep_dqh, udc->ep_dqh_dma);
1895
1896 kfree(udc->eps);
1897
1898 if (udc->irq)
1899 free_irq(udc->irq, &dev->dev);
1900
1901 if (udc->cap_regs)
1902 iounmap(udc->cap_regs);
1903 udc->cap_regs = NULL;
1904
1905 if (udc->phy_regs)
1906 iounmap((void *)udc->phy_regs);
1907 udc->phy_regs = 0;
1908
1909 if (udc->status_req) {
1910 kfree(udc->status_req->req.buf);
1911 kfree(udc->status_req);
1912 }
1913
1914 device_unregister(&udc->gadget.dev);
1915
1916 /* free dev, wait for the release() finished */
1917 wait_for_completion(&done);
1918
1919 the_controller = NULL;
1920
1921 return 0;
1922}
1923
1924int mv_udc_probe(struct platform_device *dev)
1925{
1926 struct mv_udc *udc;
1927 int retval = 0;
1928 struct resource *r;
1929 size_t size;
1930
1931 udc = kzalloc(sizeof *udc, GFP_KERNEL);
1932 if (udc == NULL) {
1933 dev_err(&dev->dev, "failed to allocate memory for udc\n");
1934 retval = -ENOMEM;
1935 goto error;
1936 }
1937
1938 spin_lock_init(&udc->lock);
1939
1940 udc->dev = dev;
1941
1942 udc->clk = clk_get(&dev->dev, "U2OCLK");
1943 if (IS_ERR(udc->clk)) {
1944 retval = PTR_ERR(udc->clk);
1945 goto error;
1946 }
1947
1948 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "u2o");
1949 if (r == NULL) {
1950 dev_err(&dev->dev, "no I/O memory resource defined\n");
1951 retval = -ENODEV;
1952 goto error;
1953 }
1954
1955 udc->cap_regs = (struct mv_cap_regs __iomem *)
1956 ioremap(r->start, resource_size(r));
1957 if (udc->cap_regs == NULL) {
1958 dev_err(&dev->dev, "failed to map I/O memory\n");
1959 retval = -EBUSY;
1960 goto error;
1961 }
1962
1963 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "u2ophy");
1964 if (r == NULL) {
1965 dev_err(&dev->dev, "no phy I/O memory resource defined\n");
1966 retval = -ENODEV;
1967 goto error;
1968 }
1969
1970 udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
1971 if (udc->phy_regs == 0) {
1972 dev_err(&dev->dev, "failed to map phy I/O memory\n");
1973 retval = -EBUSY;
1974 goto error;
1975 }
1976
1977 /* we will acces controller register, so enable the clk */
1978 clk_enable(udc->clk);
1979 retval = mv_udc_phy_init(udc->phy_regs);
1980 if (retval) {
1981 dev_err(&dev->dev, "phy initialization error %d\n", retval);
1982 goto error;
1983 }
1984
1985 udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
1986 + (readl(&udc->cap_regs->caplength_hciversion)
1987 & CAPLENGTH_MASK));
1988 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
1989
1990 size = udc->max_eps * sizeof(struct mv_dqh) *2;
1991 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
1992 udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
1993 &udc->ep_dqh_dma, GFP_KERNEL);
1994
1995 if (udc->ep_dqh == NULL) {
1996 dev_err(&dev->dev, "allocate dQH memory failed\n");
1997 retval = -ENOMEM;
1998 goto error;
1999 }
2000 udc->ep_dqh_size = size;
2001
2002 /* create dTD dma_pool resource */
2003 udc->dtd_pool = dma_pool_create("mv_dtd",
2004 &dev->dev,
2005 sizeof(struct mv_dtd),
2006 DTD_ALIGNMENT,
2007 DMA_BOUNDARY);
2008
2009 if (!udc->dtd_pool) {
2010 retval = -ENOMEM;
2011 goto error;
2012 }
2013
2014 size = udc->max_eps * sizeof(struct mv_ep) *2;
2015 udc->eps = kzalloc(size, GFP_KERNEL);
2016 if (udc->eps == NULL) {
2017 dev_err(&dev->dev, "allocate ep memory failed\n");
2018 retval = -ENOMEM;
2019 goto error;
2020 }
2021
2022 /* initialize ep0 status request structure */
2023 udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2024 if (!udc->status_req) {
2025 dev_err(&dev->dev, "allocate status_req memory failed\n");
2026 retval = -ENOMEM;
2027 goto error;
2028 }
2029 INIT_LIST_HEAD(&udc->status_req->queue);
2030
2031 /* allocate a small amount of memory to get valid address */
2032 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2033 udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
2034
2035 udc->resume_state = USB_STATE_NOTATTACHED;
2036 udc->usb_state = USB_STATE_POWERED;
2037 udc->ep0_dir = EP_DIR_OUT;
2038 udc->remote_wakeup = 0;
2039
2040 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2041 if (r == NULL) {
2042 dev_err(&dev->dev, "no IRQ resource defined\n");
2043 retval = -ENODEV;
2044 goto error;
2045 }
2046 udc->irq = r->start;
2047 if (request_irq(udc->irq, mv_udc_irq,
2048 IRQF_DISABLED | IRQF_SHARED, driver_name, udc)) {
2049 dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2050 udc->irq);
2051 retval = -ENODEV;
2052 goto error;
2053 }
2054
2055 /* initialize gadget structure */
2056 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2057 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2058 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2059 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
2060 udc->gadget.is_dualspeed = 1; /* support dual speed */
2061
2062 /* the "gadget" abstracts/virtualizes the controller */
2063 dev_set_name(&udc->gadget.dev, "gadget");
2064 udc->gadget.dev.parent = &dev->dev;
2065 udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2066 udc->gadget.dev.release = gadget_release;
2067 udc->gadget.name = driver_name; /* gadget name */
2068
2069 retval = device_register(&udc->gadget.dev);
2070 if (retval)
2071 goto error;
2072
2073 eps_init(udc);
2074
2075 the_controller = udc;
2076
2077 goto out;
2078error:
2079 if (udc)
2080 mv_udc_remove(udc->dev);
2081out:
2082 return retval;
2083}
2084
2085#ifdef CONFIG_PM
2086static int mv_udc_suspend(struct platform_device *_dev, pm_message_t state)
2087{
2088 struct mv_udc *udc = the_controller;
2089
2090 udc_stop(udc);
2091
2092 return 0;
2093}
2094
2095static int mv_udc_resume(struct platform_device *_dev)
2096{
2097 struct mv_udc *udc = the_controller;
2098 int retval;
2099
2100 retval = mv_udc_phy_init(udc->phy_regs);
2101 if (retval) {
2102 dev_err(_dev, "phy initialization error %d\n", retval);
2103 goto error;
2104 }
2105 udc_reset(udc);
2106 ep0_reset(udc);
2107 udc_start(udc);
2108
2109 return 0;
2110}
2111
2112static const struct dev_pm_ops mv_udc_pm_ops = {
2113 .suspend = mv_udc_suspend,
2114 .resume = mv_udc_resume,
2115};
2116#endif
2117
2118static struct platform_driver udc_driver = {
2119 .probe = mv_udc_probe,
2120 .remove = __exit_p(mv_udc_remove),
2121 .driver = {
2122 .owner = THIS_MODULE,
2123 .name = "pxa-u2o",
2124#ifdef CONFIG_PM
2125 .pm = mv_udc_pm_ops,
2126#endif
2127 },
2128};
2129
2130
2131MODULE_DESCRIPTION(DRIVER_DESC);
2132MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2133MODULE_VERSION(DRIVER_VERSION);
2134MODULE_LICENSE("GPL");
2135
2136
2137static int __init init(void)
2138{
2139 return platform_driver_register(&udc_driver);
2140}
2141module_init(init);
2142
2143
2144static void __exit cleanup(void)
2145{
2146 platform_driver_unregister(&udc_driver);
2147}
2148module_exit(cleanup);
2149
diff --git a/drivers/usb/gadget/mv_udc_phy.c b/drivers/usb/gadget/mv_udc_phy.c
new file mode 100644
index 000000000000..d4dea97e38a5
--- /dev/null
+++ b/drivers/usb/gadget/mv_udc_phy.c
@@ -0,0 +1,214 @@
1#include <linux/delay.h>
2#include <linux/timer.h>
3#include <linux/io.h>
4#include <linux/errno.h>
5
6#include <mach/cputype.h>
7
8#ifdef CONFIG_ARCH_MMP
9
10#define UTMI_REVISION 0x0
11#define UTMI_CTRL 0x4
12#define UTMI_PLL 0x8
13#define UTMI_TX 0xc
14#define UTMI_RX 0x10
15#define UTMI_IVREF 0x14
16#define UTMI_T0 0x18
17#define UTMI_T1 0x1c
18#define UTMI_T2 0x20
19#define UTMI_T3 0x24
20#define UTMI_T4 0x28
21#define UTMI_T5 0x2c
22#define UTMI_RESERVE 0x30
23#define UTMI_USB_INT 0x34
24#define UTMI_DBG_CTL 0x38
25#define UTMI_OTG_ADDON 0x3c
26
27/* For UTMICTRL Register */
28#define UTMI_CTRL_USB_CLK_EN (1 << 31)
29/* pxa168 */
30#define UTMI_CTRL_SUSPEND_SET1 (1 << 30)
31#define UTMI_CTRL_SUSPEND_SET2 (1 << 29)
32#define UTMI_CTRL_RXBUF_PDWN (1 << 24)
33#define UTMI_CTRL_TXBUF_PDWN (1 << 11)
34
35#define UTMI_CTRL_INPKT_DELAY_SHIFT 30
36#define UTMI_CTRL_INPKT_DELAY_SOF_SHIFT 28
37#define UTMI_CTRL_PU_REF_SHIFT 20
38#define UTMI_CTRL_ARC_PULLDN_SHIFT 12
39#define UTMI_CTRL_PLL_PWR_UP_SHIFT 1
40#define UTMI_CTRL_PWR_UP_SHIFT 0
41/* For UTMI_PLL Register */
42#define UTMI_PLL_CLK_BLK_EN_SHIFT 24
43#define UTMI_PLL_FBDIV_SHIFT 4
44#define UTMI_PLL_REFDIV_SHIFT 0
45#define UTMI_PLL_FBDIV_MASK 0x00000FF0
46#define UTMI_PLL_REFDIV_MASK 0x0000000F
47#define UTMI_PLL_ICP_MASK 0x00007000
48#define UTMI_PLL_KVCO_MASK 0x00031000
49#define UTMI_PLL_PLLCALI12_SHIFT 29
50#define UTMI_PLL_PLLCALI12_MASK (0x3 << 29)
51#define UTMI_PLL_PLLVDD18_SHIFT 27
52#define UTMI_PLL_PLLVDD18_MASK (0x3 << 27)
53#define UTMI_PLL_PLLVDD12_SHIFT 25
54#define UTMI_PLL_PLLVDD12_MASK (0x3 << 25)
55#define UTMI_PLL_KVCO_SHIFT 15
56#define UTMI_PLL_ICP_SHIFT 12
57/* For UTMI_TX Register */
58#define UTMI_TX_REG_EXT_FS_RCAL_SHIFT 27
59#define UTMI_TX_REG_EXT_FS_RCAL_MASK (0xf << 27)
60#define UTMI_TX_REG_EXT_FS_RCAL_EN_MASK 26
61#define UTMI_TX_REG_EXT_FS_RCAL_EN (0x1 << 26)
62#define UTMI_TX_LOW_VDD_EN_SHIFT 11
63#define UTMI_TX_IMPCAL_VTH_SHIFT 14
64#define UTMI_TX_IMPCAL_VTH_MASK (0x7 << 14)
65#define UTMI_TX_CK60_PHSEL_SHIFT 17
66#define UTMI_TX_CK60_PHSEL_MASK (0xf << 17)
67#define UTMI_TX_TXVDD12_SHIFT 22
68#define UTMI_TX_TXVDD12_MASK (0x3 << 22)
69#define UTMI_TX_AMP_SHIFT 0
70#define UTMI_TX_AMP_MASK (0x7 << 0)
71/* For UTMI_RX Register */
72#define UTMI_RX_SQ_THRESH_SHIFT 4
73#define UTMI_RX_SQ_THRESH_MASK (0xf << 4)
74#define UTMI_REG_SQ_LENGTH_SHIFT 15
75#define UTMI_REG_SQ_LENGTH_MASK (0x3 << 15)
76
77#define REG_RCAL_START 0x00001000
78#define VCOCAL_START 0x00200000
79#define KVCO_EXT 0x00400000
80#define PLL_READY 0x00800000
81#define CLK_BLK_EN 0x01000000
82#endif
83
84static unsigned int u2o_read(unsigned int base, unsigned int offset)
85{
86 return readl(base + offset);
87}
88
89static void u2o_set(unsigned int base, unsigned int offset, unsigned int value)
90{
91 unsigned int reg;
92
93 reg = readl(base + offset);
94 reg |= value;
95 writel(reg, base + offset);
96 readl(base + offset);
97}
98
99static void u2o_clear(unsigned int base, unsigned int offset,
100 unsigned int value)
101{
102 unsigned int reg;
103
104 reg = readl(base + offset);
105 reg &= ~value;
106 writel(reg, base + offset);
107 readl(base + offset);
108}
109
110static void u2o_write(unsigned int base, unsigned int offset,
111 unsigned int value)
112{
113 writel(value, base + offset);
114 readl(base + offset);
115}
116
117#ifdef CONFIG_ARCH_MMP
118int mv_udc_phy_init(unsigned int base)
119{
120 unsigned long timeout;
121
122 /* Initialize the USB PHY power */
123 if (cpu_is_pxa910()) {
124 u2o_set(base, UTMI_CTRL, (1 << UTMI_CTRL_INPKT_DELAY_SOF_SHIFT)
125 | (1 << UTMI_CTRL_PU_REF_SHIFT));
126 }
127
128 u2o_set(base, UTMI_CTRL, 1 << UTMI_CTRL_PLL_PWR_UP_SHIFT);
129 u2o_set(base, UTMI_CTRL, 1 << UTMI_CTRL_PWR_UP_SHIFT);
130
131 /* UTMI_PLL settings */
132 u2o_clear(base, UTMI_PLL, UTMI_PLL_PLLVDD18_MASK
133 | UTMI_PLL_PLLVDD12_MASK | UTMI_PLL_PLLCALI12_MASK
134 | UTMI_PLL_FBDIV_MASK | UTMI_PLL_REFDIV_MASK
135 | UTMI_PLL_ICP_MASK | UTMI_PLL_KVCO_MASK);
136
137 u2o_set(base, UTMI_PLL, (0xee << UTMI_PLL_FBDIV_SHIFT)
138 | (0xb << UTMI_PLL_REFDIV_SHIFT)
139 | (3 << UTMI_PLL_PLLVDD18_SHIFT)
140 | (3 << UTMI_PLL_PLLVDD12_SHIFT)
141 | (3 << UTMI_PLL_PLLCALI12_SHIFT)
142 | (1 << UTMI_PLL_ICP_SHIFT) | (3 << UTMI_PLL_KVCO_SHIFT));
143
144 /* UTMI_TX */
145 u2o_clear(base, UTMI_TX, UTMI_TX_REG_EXT_FS_RCAL_EN_MASK
146 | UTMI_TX_TXVDD12_MASK
147 | UTMI_TX_CK60_PHSEL_MASK | UTMI_TX_IMPCAL_VTH_MASK
148 | UTMI_TX_REG_EXT_FS_RCAL_MASK | UTMI_TX_AMP_MASK);
149 u2o_set(base, UTMI_TX, (3 << UTMI_TX_TXVDD12_SHIFT)
150 | (4 << UTMI_TX_CK60_PHSEL_SHIFT)
151 | (4 << UTMI_TX_IMPCAL_VTH_SHIFT)
152 | (8 << UTMI_TX_REG_EXT_FS_RCAL_SHIFT)
153 | (3 << UTMI_TX_AMP_SHIFT));
154
155 /* UTMI_RX */
156 u2o_clear(base, UTMI_RX, UTMI_RX_SQ_THRESH_MASK
157 | UTMI_REG_SQ_LENGTH_MASK);
158 if (cpu_is_pxa168())
159 u2o_set(base, UTMI_RX, (7 << UTMI_RX_SQ_THRESH_SHIFT)
160 | (2 << UTMI_REG_SQ_LENGTH_SHIFT));
161 else
162 u2o_set(base, UTMI_RX, (0x7 << UTMI_RX_SQ_THRESH_SHIFT)
163 | (2 << UTMI_REG_SQ_LENGTH_SHIFT));
164
165 /* UTMI_IVREF */
166 if (cpu_is_pxa168())
167 /*
168 * fixing Microsoft Altair board interface with NEC hub issue -
169 * Set UTMI_IVREF from 0x4a3 to 0x4bf
170 */
171 u2o_write(base, UTMI_IVREF, 0x4bf);
172
173 /* calibrate */
174 timeout = jiffies + 100;
175 while ((u2o_read(base, UTMI_PLL) & PLL_READY) == 0) {
176 if (time_after(jiffies, timeout))
177 return -ETIME;
178 cpu_relax();
179 }
180
181 /* toggle VCOCAL_START bit of UTMI_PLL */
182 udelay(200);
183 u2o_set(base, UTMI_PLL, VCOCAL_START);
184 udelay(40);
185 u2o_clear(base, UTMI_PLL, VCOCAL_START);
186
187 /* toggle REG_RCAL_START bit of UTMI_TX */
188 udelay(200);
189 u2o_set(base, UTMI_TX, REG_RCAL_START);
190 udelay(40);
191 u2o_clear(base, UTMI_TX, REG_RCAL_START);
192 udelay(200);
193
194 /* make sure phy is ready */
195 timeout = jiffies + 100;
196 while ((u2o_read(base, UTMI_PLL) & PLL_READY) == 0) {
197 if (time_after(jiffies, timeout))
198 return -ETIME;
199 cpu_relax();
200 }
201
202 if (cpu_is_pxa168()) {
203 u2o_set(base, UTMI_RESERVE, 1 << 5);
204 /* Turn on UTMI PHY OTG extension */
205 u2o_write(base, UTMI_OTG_ADDON, 1);
206 }
207 return 0;
208}
209#else
210int mv_udc_phy_init(unsigned int base)
211{
212 return 0;
213}
214#endif
diff --git a/drivers/usb/gadget/ncm.c b/drivers/usb/gadget/ncm.c
new file mode 100644
index 000000000000..99c179ad729d
--- /dev/null
+++ b/drivers/usb/gadget/ncm.c
@@ -0,0 +1,248 @@
1/*
2 * ncm.c -- NCM gadget driver
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com>
6 *
7 * The driver borrows from ether.c which is:
8 *
9 * Copyright (C) 2003-2005,2008 David Brownell
10 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
11 * Copyright (C) 2008 Nokia Corporation
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28/* #define DEBUG */
29/* #define VERBOSE_DEBUG */
30
31#include <linux/kernel.h>
32#include <linux/utsname.h>
33
34
35#include "u_ether.h"
36
37#define DRIVER_DESC "NCM Gadget"
38
39/*-------------------------------------------------------------------------*/
40
41/*
42 * Kbuild is not very cooperative with respect to linking separately
43 * compiled library objects into one module. So for now we won't use
44 * separate compilation ... ensuring init/exit sections work to shrink
45 * the runtime footprint, and giving us at least some parts of what
46 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
47 */
48#include "composite.c"
49#include "usbstring.c"
50#include "config.c"
51#include "epautoconf.c"
52
53#include "f_ncm.c"
54#include "u_ether.c"
55
56/*-------------------------------------------------------------------------*/
57
58/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
59 * Instead: allocate your own, using normal USB-IF procedures.
60 */
61
62/* Thanks to NetChip Technologies for donating this product ID.
63 * It's for devices with only CDC Ethernet configurations.
64 */
65#define CDC_VENDOR_NUM 0x0525 /* NetChip */
66#define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */
67
68/*-------------------------------------------------------------------------*/
69
70static struct usb_device_descriptor device_desc = {
71 .bLength = sizeof device_desc,
72 .bDescriptorType = USB_DT_DEVICE,
73
74 .bcdUSB = cpu_to_le16 (0x0200),
75
76 .bDeviceClass = USB_CLASS_COMM,
77 .bDeviceSubClass = 0,
78 .bDeviceProtocol = 0,
79 /* .bMaxPacketSize0 = f(hardware) */
80
81 /* Vendor and product id defaults change according to what configs
82 * we support. (As does bNumConfigurations.) These values can
83 * also be overridden by module parameters.
84 */
85 .idVendor = cpu_to_le16 (CDC_VENDOR_NUM),
86 .idProduct = cpu_to_le16 (CDC_PRODUCT_NUM),
87 /* .bcdDevice = f(hardware) */
88 /* .iManufacturer = DYNAMIC */
89 /* .iProduct = DYNAMIC */
90 /* NO SERIAL NUMBER */
91 .bNumConfigurations = 1,
92};
93
94static struct usb_otg_descriptor otg_descriptor = {
95 .bLength = sizeof otg_descriptor,
96 .bDescriptorType = USB_DT_OTG,
97
98 /* REVISIT SRP-only hardware is possible, although
99 * it would not be called "OTG" ...
100 */
101 .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
102};
103
104static const struct usb_descriptor_header *otg_desc[] = {
105 (struct usb_descriptor_header *) &otg_descriptor,
106 NULL,
107};
108
109
110/* string IDs are assigned dynamically */
111
112#define STRING_MANUFACTURER_IDX 0
113#define STRING_PRODUCT_IDX 1
114
115static char manufacturer[50];
116
117static struct usb_string strings_dev[] = {
118 [STRING_MANUFACTURER_IDX].s = manufacturer,
119 [STRING_PRODUCT_IDX].s = DRIVER_DESC,
120 { } /* end of list */
121};
122
123static struct usb_gadget_strings stringtab_dev = {
124 .language = 0x0409, /* en-us */
125 .strings = strings_dev,
126};
127
128static struct usb_gadget_strings *dev_strings[] = {
129 &stringtab_dev,
130 NULL,
131};
132
133static u8 hostaddr[ETH_ALEN];
134
135/*-------------------------------------------------------------------------*/
136
137static int __init ncm_do_config(struct usb_configuration *c)
138{
139 /* FIXME alloc iConfiguration string, set it in c->strings */
140
141 if (gadget_is_otg(c->cdev->gadget)) {
142 c->descriptors = otg_desc;
143 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
144 }
145
146 return ncm_bind_config(c, hostaddr);
147}
148
149static struct usb_configuration ncm_config_driver = {
150 /* .label = f(hardware) */
151 .label = "CDC Ethernet (NCM)",
152 .bConfigurationValue = 1,
153 /* .iConfiguration = DYNAMIC */
154 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
155};
156
157/*-------------------------------------------------------------------------*/
158
159static int __init gncm_bind(struct usb_composite_dev *cdev)
160{
161 int gcnum;
162 struct usb_gadget *gadget = cdev->gadget;
163 int status;
164
165 /* set up network link layer */
166 status = gether_setup(cdev->gadget, hostaddr);
167 if (status < 0)
168 return status;
169
170 gcnum = usb_gadget_controller_number(gadget);
171 if (gcnum >= 0)
172 device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
173 else {
174 /* We assume that can_support_ecm() tells the truth;
175 * but if the controller isn't recognized at all then
176 * that assumption is a bit more likely to be wrong.
177 */
178 dev_warn(&gadget->dev,
179 "controller '%s' not recognized; trying %s\n",
180 gadget->name,
181 ncm_config_driver.label);
182 device_desc.bcdDevice =
183 cpu_to_le16(0x0300 | 0x0099);
184 }
185
186
187 /* Allocate string descriptor numbers ... note that string
188 * contents can be overridden by the composite_dev glue.
189 */
190
191 /* device descriptor strings: manufacturer, product */
192 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
193 init_utsname()->sysname, init_utsname()->release,
194 gadget->name);
195 status = usb_string_id(cdev);
196 if (status < 0)
197 goto fail;
198 strings_dev[STRING_MANUFACTURER_IDX].id = status;
199 device_desc.iManufacturer = status;
200
201 status = usb_string_id(cdev);
202 if (status < 0)
203 goto fail;
204 strings_dev[STRING_PRODUCT_IDX].id = status;
205 device_desc.iProduct = status;
206
207 status = usb_add_config(cdev, &ncm_config_driver,
208 ncm_do_config);
209 if (status < 0)
210 goto fail;
211
212 dev_info(&gadget->dev, "%s\n", DRIVER_DESC);
213
214 return 0;
215
216fail:
217 gether_cleanup();
218 return status;
219}
220
221static int __exit gncm_unbind(struct usb_composite_dev *cdev)
222{
223 gether_cleanup();
224 return 0;
225}
226
227static struct usb_composite_driver ncm_driver = {
228 .name = "g_ncm",
229 .dev = &device_desc,
230 .strings = dev_strings,
231 .unbind = __exit_p(gncm_unbind),
232};
233
234MODULE_DESCRIPTION(DRIVER_DESC);
235MODULE_AUTHOR("Yauheni Kaliuta");
236MODULE_LICENSE("GPL");
237
238static int __init init(void)
239{
240 return usb_composite_probe(&ncm_driver, gncm_bind);
241}
242module_init(init);
243
244static void __exit cleanup(void)
245{
246 usb_composite_unregister(&ncm_driver);
247}
248module_exit(cleanup);
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
new file mode 100644
index 000000000000..0c8dd81dddca
--- /dev/null
+++ b/drivers/usb/gadget/pch_udc.c
@@ -0,0 +1,2947 @@
1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/list.h>
24#include <linux/interrupt.h>
25#include <linux/usb/ch9.h>
26#include <linux/usb/gadget.h>
27
28/* Address offset of Registers */
29#define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */
30
31#define UDC_EPCTL_ADDR 0x00 /* Endpoint control */
32#define UDC_EPSTS_ADDR 0x04 /* Endpoint status */
33#define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */
34#define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */
35#define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */
36#define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */
37#define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */
38
39#define UDC_DEVCFG_ADDR 0x400 /* Device configuration */
40#define UDC_DEVCTL_ADDR 0x404 /* Device control */
41#define UDC_DEVSTS_ADDR 0x408 /* Device status */
42#define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */
43#define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */
44#define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */
45#define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */
46#define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */
47#define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */
48#define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */
49#define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */
50
51/* Endpoint control register */
52/* Bit position */
53#define UDC_EPCTL_MRXFLUSH (1 << 12)
54#define UDC_EPCTL_RRDY (1 << 9)
55#define UDC_EPCTL_CNAK (1 << 8)
56#define UDC_EPCTL_SNAK (1 << 7)
57#define UDC_EPCTL_NAK (1 << 6)
58#define UDC_EPCTL_P (1 << 3)
59#define UDC_EPCTL_F (1 << 1)
60#define UDC_EPCTL_S (1 << 0)
61#define UDC_EPCTL_ET_SHIFT 4
62/* Mask patern */
63#define UDC_EPCTL_ET_MASK 0x00000030
64/* Value for ET field */
65#define UDC_EPCTL_ET_CONTROL 0
66#define UDC_EPCTL_ET_ISO 1
67#define UDC_EPCTL_ET_BULK 2
68#define UDC_EPCTL_ET_INTERRUPT 3
69
70/* Endpoint status register */
71/* Bit position */
72#define UDC_EPSTS_XFERDONE (1 << 27)
73#define UDC_EPSTS_RSS (1 << 26)
74#define UDC_EPSTS_RCS (1 << 25)
75#define UDC_EPSTS_TXEMPTY (1 << 24)
76#define UDC_EPSTS_TDC (1 << 10)
77#define UDC_EPSTS_HE (1 << 9)
78#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
79#define UDC_EPSTS_BNA (1 << 7)
80#define UDC_EPSTS_IN (1 << 6)
81#define UDC_EPSTS_OUT_SHIFT 4
82/* Mask patern */
83#define UDC_EPSTS_OUT_MASK 0x00000030
84#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
85/* Value for OUT field */
86#define UDC_EPSTS_OUT_SETUP 2
87#define UDC_EPSTS_OUT_DATA 1
88
89/* Device configuration register */
90/* Bit position */
91#define UDC_DEVCFG_CSR_PRG (1 << 17)
92#define UDC_DEVCFG_SP (1 << 3)
93/* SPD Valee */
94#define UDC_DEVCFG_SPD_HS 0x0
95#define UDC_DEVCFG_SPD_FS 0x1
96#define UDC_DEVCFG_SPD_LS 0x2
97
98/* Device control register */
99/* Bit position */
100#define UDC_DEVCTL_THLEN_SHIFT 24
101#define UDC_DEVCTL_BRLEN_SHIFT 16
102#define UDC_DEVCTL_CSR_DONE (1 << 13)
103#define UDC_DEVCTL_SD (1 << 10)
104#define UDC_DEVCTL_MODE (1 << 9)
105#define UDC_DEVCTL_BREN (1 << 8)
106#define UDC_DEVCTL_THE (1 << 7)
107#define UDC_DEVCTL_DU (1 << 4)
108#define UDC_DEVCTL_TDE (1 << 3)
109#define UDC_DEVCTL_RDE (1 << 2)
110#define UDC_DEVCTL_RES (1 << 0)
111
112/* Device status register */
113/* Bit position */
114#define UDC_DEVSTS_TS_SHIFT 18
115#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
116#define UDC_DEVSTS_ALT_SHIFT 8
117#define UDC_DEVSTS_INTF_SHIFT 4
118#define UDC_DEVSTS_CFG_SHIFT 0
119/* Mask patern */
120#define UDC_DEVSTS_TS_MASK 0xfffc0000
121#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
122#define UDC_DEVSTS_ALT_MASK 0x00000f00
123#define UDC_DEVSTS_INTF_MASK 0x000000f0
124#define UDC_DEVSTS_CFG_MASK 0x0000000f
125/* value for maximum speed for SPEED field */
126#define UDC_DEVSTS_ENUM_SPEED_FULL 1
127#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
128#define UDC_DEVSTS_ENUM_SPEED_LOW 2
129#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
130
131/* Device irq register */
132/* Bit position */
133#define UDC_DEVINT_RWKP (1 << 7)
134#define UDC_DEVINT_ENUM (1 << 6)
135#define UDC_DEVINT_SOF (1 << 5)
136#define UDC_DEVINT_US (1 << 4)
137#define UDC_DEVINT_UR (1 << 3)
138#define UDC_DEVINT_ES (1 << 2)
139#define UDC_DEVINT_SI (1 << 1)
140#define UDC_DEVINT_SC (1 << 0)
141/* Mask patern */
142#define UDC_DEVINT_MSK 0x7f
143
144/* Endpoint irq register */
145/* Bit position */
146#define UDC_EPINT_IN_SHIFT 0
147#define UDC_EPINT_OUT_SHIFT 16
148#define UDC_EPINT_IN_EP0 (1 << 0)
149#define UDC_EPINT_OUT_EP0 (1 << 16)
150/* Mask patern */
151#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
152
153/* UDC_CSR_BUSY Status register */
154/* Bit position */
155#define UDC_CSR_BUSY (1 << 0)
156
157/* SOFT RESET register */
158/* Bit position */
159#define UDC_PSRST (1 << 1)
160#define UDC_SRST (1 << 0)
161
162/* USB_DEVICE endpoint register */
163/* Bit position */
164#define UDC_CSR_NE_NUM_SHIFT 0
165#define UDC_CSR_NE_DIR_SHIFT 4
166#define UDC_CSR_NE_TYPE_SHIFT 5
167#define UDC_CSR_NE_CFG_SHIFT 7
168#define UDC_CSR_NE_INTF_SHIFT 11
169#define UDC_CSR_NE_ALT_SHIFT 15
170#define UDC_CSR_NE_MAX_PKT_SHIFT 19
171/* Mask patern */
172#define UDC_CSR_NE_NUM_MASK 0x0000000f
173#define UDC_CSR_NE_DIR_MASK 0x00000010
174#define UDC_CSR_NE_TYPE_MASK 0x00000060
175#define UDC_CSR_NE_CFG_MASK 0x00000780
176#define UDC_CSR_NE_INTF_MASK 0x00007800
177#define UDC_CSR_NE_ALT_MASK 0x00078000
178#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
179
180#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
181#define PCH_UDC_EPINT(in, num)\
182 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
183
184/* Index of endpoint */
185#define UDC_EP0IN_IDX 0
186#define UDC_EP0OUT_IDX 1
187#define UDC_EPIN_IDX(ep) (ep * 2)
188#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
189#define PCH_UDC_EP0 0
190#define PCH_UDC_EP1 1
191#define PCH_UDC_EP2 2
192#define PCH_UDC_EP3 3
193
194/* Number of endpoint */
195#define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */
196#define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */
197/* Length Value */
198#define PCH_UDC_BRLEN 0x0F /* Burst length */
199#define PCH_UDC_THLEN 0x1F /* Threshold length */
200/* Value of EP Buffer Size */
201#define UDC_EP0IN_BUFF_SIZE 64
202#define UDC_EPIN_BUFF_SIZE 512
203#define UDC_EP0OUT_BUFF_SIZE 64
204#define UDC_EPOUT_BUFF_SIZE 512
205/* Value of EP maximum packet size */
206#define UDC_EP0IN_MAX_PKT_SIZE 64
207#define UDC_EP0OUT_MAX_PKT_SIZE 64
208#define UDC_BULK_MAX_PKT_SIZE 512
209
210/* DMA */
211#define DMA_DIR_RX 1 /* DMA for data receive */
212#define DMA_DIR_TX 2 /* DMA for data transmit */
213#define DMA_ADDR_INVALID (~(dma_addr_t)0)
214#define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */
215
216/**
217 * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
218 * for data
219 * @status: Status quadlet
220 * @reserved: Reserved
221 * @dataptr: Buffer descriptor
222 * @next: Next descriptor
223 */
224struct pch_udc_data_dma_desc {
225 u32 status;
226 u32 reserved;
227 u32 dataptr;
228 u32 next;
229};
230
231/**
232 * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
233 * for control data
234 * @status: Status
235 * @reserved: Reserved
236 * @data12: First setup word
237 * @data34: Second setup word
238 */
239struct pch_udc_stp_dma_desc {
240 u32 status;
241 u32 reserved;
242 struct usb_ctrlrequest request;
243} __attribute((packed));
244
245/* DMA status definitions */
246/* Buffer status */
247#define PCH_UDC_BUFF_STS 0xC0000000
248#define PCH_UDC_BS_HST_RDY 0x00000000
249#define PCH_UDC_BS_DMA_BSY 0x40000000
250#define PCH_UDC_BS_DMA_DONE 0x80000000
251#define PCH_UDC_BS_HST_BSY 0xC0000000
252/* Rx/Tx Status */
253#define PCH_UDC_RXTX_STS 0x30000000
254#define PCH_UDC_RTS_SUCC 0x00000000
255#define PCH_UDC_RTS_DESERR 0x10000000
256#define PCH_UDC_RTS_BUFERR 0x30000000
257/* Last Descriptor Indication */
258#define PCH_UDC_DMA_LAST 0x08000000
259/* Number of Rx/Tx Bytes Mask */
260#define PCH_UDC_RXTX_BYTES 0x0000ffff
261
262/**
263 * struct pch_udc_cfg_data - Structure to hold current configuration
264 * and interface information
265 * @cur_cfg: current configuration in use
266 * @cur_intf: current interface in use
267 * @cur_alt: current alt interface in use
268 */
269struct pch_udc_cfg_data {
270 u16 cur_cfg;
271 u16 cur_intf;
272 u16 cur_alt;
273};
274
275/**
276 * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
277 * @ep: embedded ep request
278 * @td_stp_phys: for setup request
279 * @td_data_phys: for data request
280 * @td_stp: for setup request
281 * @td_data: for data request
282 * @dev: reference to device struct
283 * @offset_addr: offset address of ep register
284 * @desc: for this ep
285 * @queue: queue for requests
286 * @num: endpoint number
287 * @in: endpoint is IN
288 * @halted: endpoint halted?
289 * @epsts: Endpoint status
290 */
291struct pch_udc_ep {
292 struct usb_ep ep;
293 dma_addr_t td_stp_phys;
294 dma_addr_t td_data_phys;
295 struct pch_udc_stp_dma_desc *td_stp;
296 struct pch_udc_data_dma_desc *td_data;
297 struct pch_udc_dev *dev;
298 unsigned long offset_addr;
299 const struct usb_endpoint_descriptor *desc;
300 struct list_head queue;
301 unsigned num:5,
302 in:1,
303 halted:1;
304 unsigned long epsts;
305};
306
307/**
308 * struct pch_udc_dev - Structure holding complete information
309 * of the PCH USB device
310 * @gadget: gadget driver data
311 * @driver: reference to gadget driver bound
312 * @pdev: reference to the PCI device
313 * @ep: array of endpoints
314 * @lock: protects all state
315 * @active: enabled the PCI device
316 * @stall: stall requested
317 * @prot_stall: protcol stall requested
318 * @irq_registered: irq registered with system
319 * @mem_region: device memory mapped
320 * @registered: driver regsitered with system
321 * @suspended: driver in suspended state
322 * @connected: gadget driver associated
323 * @set_cfg_not_acked: pending acknowledgement 4 setup
324 * @waiting_zlp_ack: pending acknowledgement 4 ZLP
325 * @data_requests: DMA pool for data requests
326 * @stp_requests: DMA pool for setup requests
327 * @dma_addr: DMA pool for received
328 * @ep0out_buf: Buffer for DMA
329 * @setup_data: Received setup data
330 * @phys_addr: of device memory
331 * @base_addr: for mapped device memory
332 * @irq: IRQ line for the device
333 * @cfg_data: current cfg, intf, and alt in use
334 */
335struct pch_udc_dev {
336 struct usb_gadget gadget;
337 struct usb_gadget_driver *driver;
338 struct pci_dev *pdev;
339 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
340 spinlock_t lock; /* protects all state */
341 unsigned active:1,
342 stall:1,
343 prot_stall:1,
344 irq_registered:1,
345 mem_region:1,
346 registered:1,
347 suspended:1,
348 connected:1,
349 set_cfg_not_acked:1,
350 waiting_zlp_ack:1;
351 struct pci_pool *data_requests;
352 struct pci_pool *stp_requests;
353 dma_addr_t dma_addr;
354 unsigned long ep0out_buf[64];
355 struct usb_ctrlrequest setup_data;
356 unsigned long phys_addr;
357 void __iomem *base_addr;
358 unsigned irq;
359 struct pch_udc_cfg_data cfg_data;
360};
361
362#define PCH_UDC_PCI_BAR 1
363#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
364
365static const char ep0_string[] = "ep0in";
366static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
367struct pch_udc_dev *pch_udc; /* pointer to device object */
368
369static int speed_fs;
370module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
371MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
372
373/**
374 * struct pch_udc_request - Structure holding a PCH USB device request packet
375 * @req: embedded ep request
376 * @td_data_phys: phys. address
377 * @td_data: first dma desc. of chain
378 * @td_data_last: last dma desc. of chain
379 * @queue: associated queue
380 * @dma_going: DMA in progress for request
381 * @dma_mapped: DMA memory mapped for request
382 * @dma_done: DMA completed for request
383 * @chain_len: chain length
384 */
385struct pch_udc_request {
386 struct usb_request req;
387 dma_addr_t td_data_phys;
388 struct pch_udc_data_dma_desc *td_data;
389 struct pch_udc_data_dma_desc *td_data_last;
390 struct list_head queue;
391 unsigned dma_going:1,
392 dma_mapped:1,
393 dma_done:1;
394 unsigned chain_len;
395};
396
397static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
398{
399 return ioread32(dev->base_addr + reg);
400}
401
402static inline void pch_udc_writel(struct pch_udc_dev *dev,
403 unsigned long val, unsigned long reg)
404{
405 iowrite32(val, dev->base_addr + reg);
406}
407
408static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
409 unsigned long reg,
410 unsigned long bitmask)
411{
412 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
413}
414
415static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
416 unsigned long reg,
417 unsigned long bitmask)
418{
419 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
420}
421
422static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
423{
424 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
425}
426
427static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
428 unsigned long val, unsigned long reg)
429{
430 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
431}
432
433static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
434 unsigned long reg,
435 unsigned long bitmask)
436{
437 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
438}
439
440static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
441 unsigned long reg,
442 unsigned long bitmask)
443{
444 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
445}
446
447/**
448 * pch_udc_csr_busy() - Wait till idle.
449 * @dev: Reference to pch_udc_dev structure
450 */
451static void pch_udc_csr_busy(struct pch_udc_dev *dev)
452{
453 unsigned int count = 200;
454
455 /* Wait till idle */
456 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
457 && --count)
458 cpu_relax();
459 if (!count)
460 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
461}
462
463/**
464 * pch_udc_write_csr() - Write the command and status registers.
465 * @dev: Reference to pch_udc_dev structure
466 * @val: value to be written to CSR register
467 * @addr: address of CSR register
468 */
469static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
470 unsigned int ep)
471{
472 unsigned long reg = PCH_UDC_CSR(ep);
473
474 pch_udc_csr_busy(dev); /* Wait till idle */
475 pch_udc_writel(dev, val, reg);
476 pch_udc_csr_busy(dev); /* Wait till idle */
477}
478
479/**
480 * pch_udc_read_csr() - Read the command and status registers.
481 * @dev: Reference to pch_udc_dev structure
482 * @addr: address of CSR register
483 *
484 * Return codes: content of CSR register
485 */
486static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
487{
488 unsigned long reg = PCH_UDC_CSR(ep);
489
490 pch_udc_csr_busy(dev); /* Wait till idle */
491 pch_udc_readl(dev, reg); /* Dummy read */
492 pch_udc_csr_busy(dev); /* Wait till idle */
493 return pch_udc_readl(dev, reg);
494}
495
496/**
497 * pch_udc_rmt_wakeup() - Initiate for remote wakeup
498 * @dev: Reference to pch_udc_dev structure
499 */
500static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
501{
502 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
503 mdelay(1);
504 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
505}
506
507/**
508 * pch_udc_get_frame() - Get the current frame from device status register
509 * @dev: Reference to pch_udc_dev structure
510 * Retern current frame
511 */
512static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
513{
514 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
515 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
516}
517
518/**
519 * pch_udc_clear_selfpowered() - Clear the self power control
520 * @dev: Reference to pch_udc_regs structure
521 */
522static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
523{
524 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
525}
526
527/**
528 * pch_udc_set_selfpowered() - Set the self power control
529 * @dev: Reference to pch_udc_regs structure
530 */
531static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
532{
533 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
534}
535
536/**
537 * pch_udc_set_disconnect() - Set the disconnect status.
538 * @dev: Reference to pch_udc_regs structure
539 */
540static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
541{
542 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
543}
544
545/**
546 * pch_udc_clear_disconnect() - Clear the disconnect status.
547 * @dev: Reference to pch_udc_regs structure
548 */
549static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
550{
551 /* Clear the disconnect */
552 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
553 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
554 mdelay(1);
555 /* Resume USB signalling */
556 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
557}
558
559/**
560 * pch_udc_vbus_session() - set or clearr the disconnect status.
561 * @dev: Reference to pch_udc_regs structure
562 * @is_active: Parameter specifying the action
563 * 0: indicating VBUS power is ending
564 * !0: indicating VBUS power is starting
565 */
566static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
567 int is_active)
568{
569 if (is_active)
570 pch_udc_clear_disconnect(dev);
571 else
572 pch_udc_set_disconnect(dev);
573}
574
575/**
576 * pch_udc_ep_set_stall() - Set the stall of endpoint
577 * @ep: Reference to structure of type pch_udc_ep_regs
578 */
579static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
580{
581 if (ep->in) {
582 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
583 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
584 } else {
585 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
586 }
587}
588
589/**
590 * pch_udc_ep_clear_stall() - Clear the stall of endpoint
591 * @ep: Reference to structure of type pch_udc_ep_regs
592 */
593static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
594{
595 /* Clear the stall */
596 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
597 /* Clear NAK by writing CNAK */
598 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
599}
600
601/**
602 * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
603 * @ep: Reference to structure of type pch_udc_ep_regs
604 * @type: Type of endpoint
605 */
606static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
607 u8 type)
608{
609 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
610 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
611}
612
613/**
614 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
615 * @ep: Reference to structure of type pch_udc_ep_regs
616 * @buf_size: The buffer size
617 */
618static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
619 u32 buf_size, u32 ep_in)
620{
621 u32 data;
622 if (ep_in) {
623 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
624 data = (data & 0xffff0000) | (buf_size & 0xffff);
625 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
626 } else {
627 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
628 data = (buf_size << 16) | (data & 0xffff);
629 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
630 }
631}
632
633/**
634 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
635 * @ep: Reference to structure of type pch_udc_ep_regs
636 * @pkt_size: The packet size
637 */
638static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
639{
640 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
641 data = (data & 0xffff0000) | (pkt_size & 0xffff);
642 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
643}
644
645/**
646 * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
647 * @ep: Reference to structure of type pch_udc_ep_regs
648 * @addr: Address of the register
649 */
650static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
651{
652 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
653}
654
655/**
656 * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
657 * @ep: Reference to structure of type pch_udc_ep_regs
658 * @addr: Address of the register
659 */
660static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
661{
662 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
663}
664
665/**
666 * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
667 * @ep: Reference to structure of type pch_udc_ep_regs
668 */
669static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
670{
671 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
672}
673
674/**
675 * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
676 * @ep: Reference to structure of type pch_udc_ep_regs
677 */
678static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
679{
680 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
681}
682
683/**
684 * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
685 * @ep: Reference to structure of type pch_udc_ep_regs
686 */
687static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
688{
689 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
690}
691
692/**
693 * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
694 * register depending on the direction specified
695 * @dev: Reference to structure of type pch_udc_regs
696 * @dir: whether Tx or Rx
697 * DMA_DIR_RX: Receive
698 * DMA_DIR_TX: Transmit
699 */
700static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
701{
702 if (dir == DMA_DIR_RX)
703 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
704 else if (dir == DMA_DIR_TX)
705 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
706}
707
708/**
709 * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
710 * register depending on the direction specified
711 * @dev: Reference to structure of type pch_udc_regs
712 * @dir: Whether Tx or Rx
713 * DMA_DIR_RX: Receive
714 * DMA_DIR_TX: Transmit
715 */
716static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
717{
718 if (dir == DMA_DIR_RX)
719 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
720 else if (dir == DMA_DIR_TX)
721 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
722}
723
724/**
725 * pch_udc_set_csr_done() - Set the device control register
726 * CSR done field (bit 13)
727 * @dev: reference to structure of type pch_udc_regs
728 */
729static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
730{
731 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
732}
733
734/**
735 * pch_udc_disable_interrupts() - Disables the specified interrupts
736 * @dev: Reference to structure of type pch_udc_regs
737 * @mask: Mask to disable interrupts
738 */
739static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
740 u32 mask)
741{
742 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
743}
744
745/**
746 * pch_udc_enable_interrupts() - Enable the specified interrupts
747 * @dev: Reference to structure of type pch_udc_regs
748 * @mask: Mask to enable interrupts
749 */
750static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
751 u32 mask)
752{
753 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
754}
755
756/**
757 * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
758 * @dev: Reference to structure of type pch_udc_regs
759 * @mask: Mask to disable interrupts
760 */
761static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
762 u32 mask)
763{
764 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
765}
766
767/**
768 * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
769 * @dev: Reference to structure of type pch_udc_regs
770 * @mask: Mask to enable interrupts
771 */
772static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
773 u32 mask)
774{
775 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
776}
777
778/**
779 * pch_udc_read_device_interrupts() - Read the device interrupts
780 * @dev: Reference to structure of type pch_udc_regs
781 * Retern The device interrupts
782 */
783static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
784{
785 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
786}
787
788/**
789 * pch_udc_write_device_interrupts() - Write device interrupts
790 * @dev: Reference to structure of type pch_udc_regs
791 * @val: The value to be written to interrupt register
792 */
793static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
794 u32 val)
795{
796 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
797}
798
799/**
800 * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
801 * @dev: Reference to structure of type pch_udc_regs
802 * Retern The endpoint interrupt
803 */
804static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
805{
806 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
807}
808
809/**
810 * pch_udc_write_ep_interrupts() - Clear endpoint interupts
811 * @dev: Reference to structure of type pch_udc_regs
812 * @val: The value to be written to interrupt register
813 */
814static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
815 u32 val)
816{
817 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
818}
819
820/**
821 * pch_udc_read_device_status() - Read the device status
822 * @dev: Reference to structure of type pch_udc_regs
823 * Retern The device status
824 */
825static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
826{
827 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
828}
829
830/**
831 * pch_udc_read_ep_control() - Read the endpoint control
832 * @ep: Reference to structure of type pch_udc_ep_regs
833 * Retern The endpoint control register value
834 */
835static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
836{
837 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
838}
839
840/**
841 * pch_udc_clear_ep_control() - Clear the endpoint control register
842 * @ep: Reference to structure of type pch_udc_ep_regs
843 * Retern The endpoint control register value
844 */
845static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
846{
847 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
848}
849
850/**
851 * pch_udc_read_ep_status() - Read the endpoint status
852 * @ep: Reference to structure of type pch_udc_ep_regs
853 * Retern The endpoint status
854 */
855static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
856{
857 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
858}
859
860/**
861 * pch_udc_clear_ep_status() - Clear the endpoint status
862 * @ep: Reference to structure of type pch_udc_ep_regs
863 * @stat: Endpoint status
864 */
865static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
866 u32 stat)
867{
868 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
869}
870
871/**
872 * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
873 * of the endpoint control register
874 * @ep: Reference to structure of type pch_udc_ep_regs
875 */
876static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
877{
878 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
879}
880
881/**
882 * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
883 * of the endpoint control register
884 * @ep: reference to structure of type pch_udc_ep_regs
885 */
886static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
887{
888 unsigned int loopcnt = 0;
889 struct pch_udc_dev *dev = ep->dev;
890
891 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
892 return;
893 if (!ep->in) {
894 loopcnt = 10000;
895 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
896 --loopcnt)
897 udelay(5);
898 if (!loopcnt)
899 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
900 __func__);
901 }
902 loopcnt = 10000;
903 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
904 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
905 udelay(5);
906 }
907 if (!loopcnt)
908 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
909 __func__, ep->num, (ep->in ? "in" : "out"));
910}
911
912/**
913 * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
914 * @ep: reference to structure of type pch_udc_ep_regs
915 * @dir: direction of endpoint
916 * 0: endpoint is OUT
917 * !0: endpoint is IN
918 */
919static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
920{
921 unsigned int loopcnt = 0;
922 struct pch_udc_dev *dev = ep->dev;
923
924 if (dir) { /* IN ep */
925 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
926 return;
927 }
928
929 if (pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP)
930 return;
931 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
932 /* Wait for RxFIFO Empty */
933 loopcnt = 10000;
934 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
935 --loopcnt)
936 udelay(5);
937 if (!loopcnt)
938 dev_err(&dev->pdev->dev, "RxFIFO not Empty\n");
939 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
940}
941
942/**
943 * pch_udc_ep_enable() - This api enables endpoint
944 * @regs: Reference to structure pch_udc_ep_regs
945 * @desc: endpoint descriptor
946 */
947static void pch_udc_ep_enable(struct pch_udc_ep *ep,
948 struct pch_udc_cfg_data *cfg,
949 const struct usb_endpoint_descriptor *desc)
950{
951 u32 val = 0;
952 u32 buff_size = 0;
953
954 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
955 if (ep->in)
956 buff_size = UDC_EPIN_BUFF_SIZE;
957 else
958 buff_size = UDC_EPOUT_BUFF_SIZE;
959 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
960 pch_udc_ep_set_maxpkt(ep, le16_to_cpu(desc->wMaxPacketSize));
961 pch_udc_ep_set_nak(ep);
962 pch_udc_ep_fifo_flush(ep, ep->in);
963 /* Configure the endpoint */
964 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
965 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
966 UDC_CSR_NE_TYPE_SHIFT) |
967 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
968 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
969 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
970 le16_to_cpu(desc->wMaxPacketSize) << UDC_CSR_NE_MAX_PKT_SHIFT;
971
972 if (ep->in)
973 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
974 else
975 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
976}
977
978/**
979 * pch_udc_ep_disable() - This api disables endpoint
980 * @regs: Reference to structure pch_udc_ep_regs
981 */
982static void pch_udc_ep_disable(struct pch_udc_ep *ep)
983{
984 if (ep->in) {
985 /* flush the fifo */
986 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
987 /* set NAK */
988 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
989 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
990 } else {
991 /* set NAK */
992 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
993 }
994 /* reset desc pointer */
995 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
996}
997
998/**
999 * pch_udc_wait_ep_stall() - Wait EP stall.
1000 * @dev: Reference to pch_udc_dev structure
1001 */
1002static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1003{
1004 unsigned int count = 10000;
1005
1006 /* Wait till idle */
1007 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1008 udelay(5);
1009 if (!count)
1010 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1011}
1012
1013/**
1014 * pch_udc_init() - This API initializes usb device controller
1015 * @dev: Rreference to pch_udc_regs structure
1016 */
1017static void pch_udc_init(struct pch_udc_dev *dev)
1018{
1019 if (NULL == dev) {
1020 pr_err("%s: Invalid address\n", __func__);
1021 return;
1022 }
1023 /* Soft Reset and Reset PHY */
1024 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1025 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1026 mdelay(1);
1027 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1028 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1029 mdelay(1);
1030 /* mask and clear all device interrupts */
1031 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1032 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1033
1034 /* mask and clear all ep interrupts */
1035 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1036 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1037
1038 /* enable dynamic CSR programmingi, self powered and device speed */
1039 if (speed_fs)
1040 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1041 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1042 else /* defaul high speed */
1043 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1044 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1045 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1046 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1047 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1048 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1049 UDC_DEVCTL_THE);
1050}
1051
1052/**
1053 * pch_udc_exit() - This API exit usb device controller
1054 * @dev: Reference to pch_udc_regs structure
1055 */
1056static void pch_udc_exit(struct pch_udc_dev *dev)
1057{
1058 /* mask all device interrupts */
1059 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1060 /* mask all ep interrupts */
1061 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1062 /* put device in disconnected state */
1063 pch_udc_set_disconnect(dev);
1064}
1065
1066/**
1067 * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1068 * @gadget: Reference to the gadget driver
1069 *
1070 * Return codes:
1071 * 0: Success
1072 * -EINVAL: If the gadget passed is NULL
1073 */
1074static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1075{
1076 struct pch_udc_dev *dev;
1077
1078 if (!gadget)
1079 return -EINVAL;
1080 dev = container_of(gadget, struct pch_udc_dev, gadget);
1081 return pch_udc_get_frame(dev);
1082}
1083
1084/**
1085 * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1086 * @gadget: Reference to the gadget driver
1087 *
1088 * Return codes:
1089 * 0: Success
1090 * -EINVAL: If the gadget passed is NULL
1091 */
1092static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1093{
1094 struct pch_udc_dev *dev;
1095 unsigned long flags;
1096
1097 if (!gadget)
1098 return -EINVAL;
1099 dev = container_of(gadget, struct pch_udc_dev, gadget);
1100 spin_lock_irqsave(&dev->lock, flags);
1101 pch_udc_rmt_wakeup(dev);
1102 spin_unlock_irqrestore(&dev->lock, flags);
1103 return 0;
1104}
1105
1106/**
1107 * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1108 * is self powered or not
1109 * @gadget: Reference to the gadget driver
1110 * @value: Specifies self powered or not
1111 *
1112 * Return codes:
1113 * 0: Success
1114 * -EINVAL: If the gadget passed is NULL
1115 */
1116static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1117{
1118 struct pch_udc_dev *dev;
1119
1120 if (!gadget)
1121 return -EINVAL;
1122 dev = container_of(gadget, struct pch_udc_dev, gadget);
1123 if (value)
1124 pch_udc_set_selfpowered(dev);
1125 else
1126 pch_udc_clear_selfpowered(dev);
1127 return 0;
1128}
1129
1130/**
1131 * pch_udc_pcd_pullup() - This API is invoked to make the device
1132 * visible/invisible to the host
1133 * @gadget: Reference to the gadget driver
1134 * @is_on: Specifies whether the pull up is made active or inactive
1135 *
1136 * Return codes:
1137 * 0: Success
1138 * -EINVAL: If the gadget passed is NULL
1139 */
1140static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1141{
1142 struct pch_udc_dev *dev;
1143
1144 if (!gadget)
1145 return -EINVAL;
1146 dev = container_of(gadget, struct pch_udc_dev, gadget);
1147 pch_udc_vbus_session(dev, is_on);
1148 return 0;
1149}
1150
1151/**
1152 * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1153 * transceiver (or GPIO) that
1154 * detects a VBUS power session starting/ending
1155 * @gadget: Reference to the gadget driver
1156 * @is_active: specifies whether the session is starting or ending
1157 *
1158 * Return codes:
1159 * 0: Success
1160 * -EINVAL: If the gadget passed is NULL
1161 */
1162static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1163{
1164 struct pch_udc_dev *dev;
1165
1166 if (!gadget)
1167 return -EINVAL;
1168 dev = container_of(gadget, struct pch_udc_dev, gadget);
1169 pch_udc_vbus_session(dev, is_active);
1170 return 0;
1171}
1172
1173/**
1174 * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1175 * SET_CONFIGURATION calls to
1176 * specify how much power the device can consume
1177 * @gadget: Reference to the gadget driver
1178 * @mA: specifies the current limit in 2mA unit
1179 *
1180 * Return codes:
1181 * -EINVAL: If the gadget passed is NULL
1182 * -EOPNOTSUPP:
1183 */
1184static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1185{
1186 return -EOPNOTSUPP;
1187}
1188
1189static const struct usb_gadget_ops pch_udc_ops = {
1190 .get_frame = pch_udc_pcd_get_frame,
1191 .wakeup = pch_udc_pcd_wakeup,
1192 .set_selfpowered = pch_udc_pcd_selfpowered,
1193 .pullup = pch_udc_pcd_pullup,
1194 .vbus_session = pch_udc_pcd_vbus_session,
1195 .vbus_draw = pch_udc_pcd_vbus_draw,
1196};
1197
1198/**
1199 * complete_req() - This API is invoked from the driver when processing
1200 * of a request is complete
1201 * @ep: Reference to the endpoint structure
1202 * @req: Reference to the request structure
1203 * @status: Indicates the success/failure of completion
1204 */
1205static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1206 int status)
1207{
1208 struct pch_udc_dev *dev;
1209 unsigned halted = ep->halted;
1210
1211 list_del_init(&req->queue);
1212
1213 /* set new status if pending */
1214 if (req->req.status == -EINPROGRESS)
1215 req->req.status = status;
1216 else
1217 status = req->req.status;
1218
1219 dev = ep->dev;
1220 if (req->dma_mapped) {
1221 if (ep->in)
1222 pci_unmap_single(dev->pdev, req->req.dma,
1223 req->req.length, PCI_DMA_TODEVICE);
1224 else
1225 pci_unmap_single(dev->pdev, req->req.dma,
1226 req->req.length, PCI_DMA_FROMDEVICE);
1227 req->dma_mapped = 0;
1228 req->req.dma = DMA_ADDR_INVALID;
1229 }
1230 ep->halted = 1;
1231 spin_unlock(&dev->lock);
1232 if (!ep->in)
1233 pch_udc_ep_clear_rrdy(ep);
1234 req->req.complete(&ep->ep, &req->req);
1235 spin_lock(&dev->lock);
1236 ep->halted = halted;
1237}
1238
1239/**
1240 * empty_req_queue() - This API empties the request queue of an endpoint
1241 * @ep: Reference to the endpoint structure
1242 */
1243static void empty_req_queue(struct pch_udc_ep *ep)
1244{
1245 struct pch_udc_request *req;
1246
1247 ep->halted = 1;
1248 while (!list_empty(&ep->queue)) {
1249 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1250 complete_req(ep, req, -ESHUTDOWN); /* Remove from list */
1251 }
1252}
1253
1254/**
1255 * pch_udc_free_dma_chain() - This function frees the DMA chain created
1256 * for the request
1257 * @dev Reference to the driver structure
1258 * @req Reference to the request to be freed
1259 *
1260 * Return codes:
1261 * 0: Success
1262 */
1263static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1264 struct pch_udc_request *req)
1265{
1266 struct pch_udc_data_dma_desc *td = req->td_data;
1267 unsigned i = req->chain_len;
1268
1269 for (; i > 1; --i) {
1270 dma_addr_t addr = (dma_addr_t)td->next;
1271 /* do not free first desc., will be done by free for request */
1272 td = phys_to_virt(addr);
1273 pci_pool_free(dev->data_requests, td, addr);
1274 }
1275}
1276
1277/**
1278 * pch_udc_create_dma_chain() - This function creates or reinitializes
1279 * a DMA chain
1280 * @ep: Reference to the endpoint structure
1281 * @req: Reference to the request
1282 * @buf_len: The buffer length
1283 * @gfp_flags: Flags to be used while mapping the data buffer
1284 *
1285 * Return codes:
1286 * 0: success,
1287 * -ENOMEM: pci_pool_alloc invocation fails
1288 */
1289static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1290 struct pch_udc_request *req,
1291 unsigned long buf_len,
1292 gfp_t gfp_flags)
1293{
1294 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1295 unsigned long bytes = req->req.length, i = 0;
1296 dma_addr_t dma_addr;
1297 unsigned len = 1;
1298
1299 if (req->chain_len > 1)
1300 pch_udc_free_dma_chain(ep->dev, req);
1301
1302 for (; ; bytes -= buf_len, ++len) {
1303 if (ep->in)
1304 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1305 else
1306 td->status = PCH_UDC_BS_HST_BSY;
1307
1308 if (bytes <= buf_len)
1309 break;
1310
1311 last = td;
1312 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1313 &dma_addr);
1314 if (!td)
1315 goto nomem;
1316
1317 i += buf_len;
1318 td->dataptr = req->req.dma + i;
1319 last->next = dma_addr;
1320 }
1321
1322 req->td_data_last = td;
1323 td->status |= PCH_UDC_DMA_LAST;
1324 td->next = req->td_data_phys;
1325 req->chain_len = len;
1326 return 0;
1327
1328nomem:
1329 if (len > 1) {
1330 req->chain_len = len;
1331 pch_udc_free_dma_chain(ep->dev, req);
1332 }
1333 req->chain_len = 1;
1334 return -ENOMEM;
1335}
1336
1337/**
1338 * prepare_dma() - This function creates and initializes the DMA chain
1339 * for the request
1340 * @ep: Reference to the endpoint structure
1341 * @req: Reference to the request
1342 * @gfp: Flag to be used while mapping the data buffer
1343 *
1344 * Return codes:
1345 * 0: Success
1346 * Other 0: linux error number on failure
1347 */
1348static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1349 gfp_t gfp)
1350{
1351 int retval;
1352
1353 req->td_data->dataptr = req->req.dma;
1354 req->td_data->status |= PCH_UDC_DMA_LAST;
1355 /* Allocate and create a DMA chain */
1356 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1357 if (retval) {
1358 pr_err("%s: could not create DMA chain: %d\n",
1359 __func__, retval);
1360 return retval;
1361 }
1362 if (!ep->in)
1363 return 0;
1364 if (req->req.length <= ep->ep.maxpacket)
1365 req->td_data->status = PCH_UDC_DMA_LAST | PCH_UDC_BS_HST_BSY |
1366 req->req.length;
1367 /* if bytes < max packet then tx bytes must
1368 * be written in packet per buffer mode
1369 */
1370 if ((req->req.length < ep->ep.maxpacket) || !ep->num)
1371 req->td_data->status = (req->td_data->status &
1372 ~PCH_UDC_RXTX_BYTES) | req->req.length;
1373 req->td_data->status = (req->td_data->status &
1374 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_BSY;
1375 return 0;
1376}
1377
1378/**
1379 * process_zlp() - This function process zero length packets
1380 * from the gadget driver
1381 * @ep: Reference to the endpoint structure
1382 * @req: Reference to the request
1383 */
1384static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1385{
1386 struct pch_udc_dev *dev = ep->dev;
1387
1388 /* IN zlp's are handled by hardware */
1389 complete_req(ep, req, 0);
1390
1391 /* if set_config or set_intf is waiting for ack by zlp
1392 * then set CSR_DONE
1393 */
1394 if (dev->set_cfg_not_acked) {
1395 pch_udc_set_csr_done(dev);
1396 dev->set_cfg_not_acked = 0;
1397 }
1398 /* setup command is ACK'ed now by zlp */
1399 if (!dev->stall && dev->waiting_zlp_ack) {
1400 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1401 dev->waiting_zlp_ack = 0;
1402 }
1403}
1404
1405/**
1406 * pch_udc_start_rxrequest() - This function starts the receive requirement.
1407 * @ep: Reference to the endpoint structure
1408 * @req: Reference to the request structure
1409 */
1410static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1411 struct pch_udc_request *req)
1412{
1413 struct pch_udc_data_dma_desc *td_data;
1414
1415 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1416 td_data = req->td_data;
1417 ep->td_data = req->td_data;
1418 /* Set the status bits for all descriptors */
1419 while (1) {
1420 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1421 PCH_UDC_BS_HST_RDY;
1422 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1423 break;
1424 td_data = phys_to_virt(td_data->next);
1425 }
1426 /* Write the descriptor pointer */
1427 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1428 req->dma_going = 1;
1429 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1430 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1431 pch_udc_ep_clear_nak(ep);
1432 pch_udc_ep_set_rrdy(ep);
1433}
1434
1435/**
1436 * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1437 * from gadget driver
1438 * @usbep: Reference to the USB endpoint structure
1439 * @desc: Reference to the USB endpoint descriptor structure
1440 *
1441 * Return codes:
1442 * 0: Success
1443 * -EINVAL:
1444 * -ESHUTDOWN:
1445 */
1446static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1447 const struct usb_endpoint_descriptor *desc)
1448{
1449 struct pch_udc_ep *ep;
1450 struct pch_udc_dev *dev;
1451 unsigned long iflags;
1452
1453 if (!usbep || (usbep->name == ep0_string) || !desc ||
1454 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1455 return -EINVAL;
1456
1457 ep = container_of(usbep, struct pch_udc_ep, ep);
1458 dev = ep->dev;
1459 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1460 return -ESHUTDOWN;
1461 spin_lock_irqsave(&dev->lock, iflags);
1462 ep->desc = desc;
1463 ep->halted = 0;
1464 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1465 ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
1466 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1467 spin_unlock_irqrestore(&dev->lock, iflags);
1468 return 0;
1469}
1470
1471/**
1472 * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1473 * from gadget driver
1474 * @usbep Reference to the USB endpoint structure
1475 *
1476 * Return codes:
1477 * 0: Success
1478 * -EINVAL:
1479 */
1480static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1481{
1482 struct pch_udc_ep *ep;
1483 struct pch_udc_dev *dev;
1484 unsigned long iflags;
1485
1486 if (!usbep)
1487 return -EINVAL;
1488
1489 ep = container_of(usbep, struct pch_udc_ep, ep);
1490 dev = ep->dev;
1491 if ((usbep->name == ep0_string) || !ep->desc)
1492 return -EINVAL;
1493
1494 spin_lock_irqsave(&ep->dev->lock, iflags);
1495 empty_req_queue(ep);
1496 ep->halted = 1;
1497 pch_udc_ep_disable(ep);
1498 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1499 ep->desc = NULL;
1500 INIT_LIST_HEAD(&ep->queue);
1501 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1502 return 0;
1503}
1504
1505/**
1506 * pch_udc_alloc_request() - This function allocates request structure.
1507 * It is called by gadget driver
1508 * @usbep: Reference to the USB endpoint structure
1509 * @gfp: Flag to be used while allocating memory
1510 *
1511 * Return codes:
1512 * NULL: Failure
1513 * Allocated address: Success
1514 */
1515static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1516 gfp_t gfp)
1517{
1518 struct pch_udc_request *req;
1519 struct pch_udc_ep *ep;
1520 struct pch_udc_data_dma_desc *dma_desc;
1521 struct pch_udc_dev *dev;
1522
1523 if (!usbep)
1524 return NULL;
1525 ep = container_of(usbep, struct pch_udc_ep, ep);
1526 dev = ep->dev;
1527 req = kzalloc(sizeof *req, gfp);
1528 if (!req)
1529 return NULL;
1530 req->req.dma = DMA_ADDR_INVALID;
1531 INIT_LIST_HEAD(&req->queue);
1532 if (!ep->dev->dma_addr)
1533 return &req->req;
1534 /* ep0 in requests are allocated from data pool here */
1535 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1536 &req->td_data_phys);
1537 if (NULL == dma_desc) {
1538 kfree(req);
1539 return NULL;
1540 }
1541 /* prevent from using desc. - set HOST BUSY */
1542 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1543 dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
1544 req->td_data = dma_desc;
1545 req->td_data_last = dma_desc;
1546 req->chain_len = 1;
1547 return &req->req;
1548}
1549
1550/**
1551 * pch_udc_free_request() - This function frees request structure.
1552 * It is called by gadget driver
1553 * @usbep: Reference to the USB endpoint structure
1554 * @usbreq: Reference to the USB request
1555 */
1556static void pch_udc_free_request(struct usb_ep *usbep,
1557 struct usb_request *usbreq)
1558{
1559 struct pch_udc_ep *ep;
1560 struct pch_udc_request *req;
1561 struct pch_udc_dev *dev;
1562
1563 if (!usbep || !usbreq)
1564 return;
1565 ep = container_of(usbep, struct pch_udc_ep, ep);
1566 req = container_of(usbreq, struct pch_udc_request, req);
1567 dev = ep->dev;
1568 if (!list_empty(&req->queue))
1569 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1570 __func__, usbep->name, req);
1571 if (req->td_data != NULL) {
1572 if (req->chain_len > 1)
1573 pch_udc_free_dma_chain(ep->dev, req);
1574 pci_pool_free(ep->dev->data_requests, req->td_data,
1575 req->td_data_phys);
1576 }
1577 kfree(req);
1578}
1579
1580/**
1581 * pch_udc_pcd_queue() - This function queues a request packet. It is called
1582 * by gadget driver
1583 * @usbep: Reference to the USB endpoint structure
1584 * @usbreq: Reference to the USB request
1585 * @gfp: Flag to be used while mapping the data buffer
1586 *
1587 * Return codes:
1588 * 0: Success
1589 * linux error number: Failure
1590 */
1591static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1592 gfp_t gfp)
1593{
1594 int retval = 0;
1595 struct pch_udc_ep *ep;
1596 struct pch_udc_dev *dev;
1597 struct pch_udc_request *req;
1598 unsigned long iflags;
1599
1600 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1601 return -EINVAL;
1602 ep = container_of(usbep, struct pch_udc_ep, ep);
1603 dev = ep->dev;
1604 if (!ep->desc && ep->num)
1605 return -EINVAL;
1606 req = container_of(usbreq, struct pch_udc_request, req);
1607 if (!list_empty(&req->queue))
1608 return -EINVAL;
1609 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1610 return -ESHUTDOWN;
1611 spin_lock_irqsave(&ep->dev->lock, iflags);
1612 /* map the buffer for dma */
1613 if (usbreq->length &&
1614 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1615 if (ep->in)
1616 usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
1617 usbreq->length, PCI_DMA_TODEVICE);
1618 else
1619 usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
1620 usbreq->length, PCI_DMA_FROMDEVICE);
1621 req->dma_mapped = 1;
1622 }
1623 if (usbreq->length > 0) {
1624 retval = prepare_dma(ep, req, gfp);
1625 if (retval)
1626 goto probe_end;
1627 }
1628 usbreq->actual = 0;
1629 usbreq->status = -EINPROGRESS;
1630 req->dma_done = 0;
1631 if (list_empty(&ep->queue) && !ep->halted) {
1632 /* no pending transfer, so start this req */
1633 if (!usbreq->length) {
1634 process_zlp(ep, req);
1635 retval = 0;
1636 goto probe_end;
1637 }
1638 if (!ep->in) {
1639 pch_udc_start_rxrequest(ep, req);
1640 } else {
1641 /*
1642 * For IN trfr the descriptors will be programmed and
1643 * P bit will be set when
1644 * we get an IN token
1645 */
1646 pch_udc_wait_ep_stall(ep);
1647 pch_udc_ep_clear_nak(ep);
1648 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1649 pch_udc_set_dma(dev, DMA_DIR_TX);
1650 }
1651 }
1652 /* Now add this request to the ep's pending requests */
1653 if (req != NULL)
1654 list_add_tail(&req->queue, &ep->queue);
1655
1656probe_end:
1657 spin_unlock_irqrestore(&dev->lock, iflags);
1658 return retval;
1659}
1660
1661/**
1662 * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1663 * It is called by gadget driver
1664 * @usbep: Reference to the USB endpoint structure
1665 * @usbreq: Reference to the USB request
1666 *
1667 * Return codes:
1668 * 0: Success
1669 * linux error number: Failure
1670 */
1671static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1672 struct usb_request *usbreq)
1673{
1674 struct pch_udc_ep *ep;
1675 struct pch_udc_request *req;
1676 struct pch_udc_dev *dev;
1677 unsigned long flags;
1678 int ret = -EINVAL;
1679
1680 ep = container_of(usbep, struct pch_udc_ep, ep);
1681 dev = ep->dev;
1682 if (!usbep || !usbreq || (!ep->desc && ep->num))
1683 return ret;
1684 req = container_of(usbreq, struct pch_udc_request, req);
1685 spin_lock_irqsave(&ep->dev->lock, flags);
1686 /* make sure it's still queued on this endpoint */
1687 list_for_each_entry(req, &ep->queue, queue) {
1688 if (&req->req == usbreq) {
1689 pch_udc_ep_set_nak(ep);
1690 if (!list_empty(&req->queue))
1691 complete_req(ep, req, -ECONNRESET);
1692 ret = 0;
1693 break;
1694 }
1695 }
1696 spin_unlock_irqrestore(&ep->dev->lock, flags);
1697 return ret;
1698}
1699
1700/**
1701 * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1702 * feature
1703 * @usbep: Reference to the USB endpoint structure
1704 * @halt: Specifies whether to set or clear the feature
1705 *
1706 * Return codes:
1707 * 0: Success
1708 * linux error number: Failure
1709 */
1710static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1711{
1712 struct pch_udc_ep *ep;
1713 struct pch_udc_dev *dev;
1714 unsigned long iflags;
1715 int ret;
1716
1717 if (!usbep)
1718 return -EINVAL;
1719 ep = container_of(usbep, struct pch_udc_ep, ep);
1720 dev = ep->dev;
1721 if (!ep->desc && !ep->num)
1722 return -EINVAL;
1723 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1724 return -ESHUTDOWN;
1725 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1726 if (list_empty(&ep->queue)) {
1727 if (halt) {
1728 if (ep->num == PCH_UDC_EP0)
1729 ep->dev->stall = 1;
1730 pch_udc_ep_set_stall(ep);
1731 pch_udc_enable_ep_interrupts(ep->dev,
1732 PCH_UDC_EPINT(ep->in,
1733 ep->num));
1734 } else {
1735 pch_udc_ep_clear_stall(ep);
1736 }
1737 ret = 0;
1738 } else {
1739 ret = -EAGAIN;
1740 }
1741 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1742 return ret;
1743}
1744
1745/**
1746 * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
1747 * halt feature
1748 * @usbep: Reference to the USB endpoint structure
1749 * @halt: Specifies whether to set or clear the feature
1750 *
1751 * Return codes:
1752 * 0: Success
1753 * linux error number: Failure
1754 */
1755static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
1756{
1757 struct pch_udc_ep *ep;
1758 struct pch_udc_dev *dev;
1759 unsigned long iflags;
1760 int ret;
1761
1762 if (!usbep)
1763 return -EINVAL;
1764 ep = container_of(usbep, struct pch_udc_ep, ep);
1765 dev = ep->dev;
1766 if (!ep->desc && !ep->num)
1767 return -EINVAL;
1768 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1769 return -ESHUTDOWN;
1770 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1771 if (!list_empty(&ep->queue)) {
1772 ret = -EAGAIN;
1773 } else {
1774 if (ep->num == PCH_UDC_EP0)
1775 ep->dev->stall = 1;
1776 pch_udc_ep_set_stall(ep);
1777 pch_udc_enable_ep_interrupts(ep->dev,
1778 PCH_UDC_EPINT(ep->in, ep->num));
1779 ep->dev->prot_stall = 1;
1780 ret = 0;
1781 }
1782 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1783 return ret;
1784}
1785
1786/**
1787 * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
1788 * @usbep: Reference to the USB endpoint structure
1789 */
1790static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
1791{
1792 struct pch_udc_ep *ep;
1793
1794 if (!usbep)
1795 return;
1796
1797 ep = container_of(usbep, struct pch_udc_ep, ep);
1798 if (ep->desc || !ep->num)
1799 pch_udc_ep_fifo_flush(ep, ep->in);
1800}
1801
1802static const struct usb_ep_ops pch_udc_ep_ops = {
1803 .enable = pch_udc_pcd_ep_enable,
1804 .disable = pch_udc_pcd_ep_disable,
1805 .alloc_request = pch_udc_alloc_request,
1806 .free_request = pch_udc_free_request,
1807 .queue = pch_udc_pcd_queue,
1808 .dequeue = pch_udc_pcd_dequeue,
1809 .set_halt = pch_udc_pcd_set_halt,
1810 .set_wedge = pch_udc_pcd_set_wedge,
1811 .fifo_status = NULL,
1812 .fifo_flush = pch_udc_pcd_fifo_flush,
1813};
1814
1815/**
1816 * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
1817 * @td_stp: Reference to the SETP buffer structure
1818 */
1819static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
1820{
1821 static u32 pky_marker;
1822
1823 if (!td_stp)
1824 return;
1825 td_stp->reserved = ++pky_marker;
1826 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
1827 td_stp->status = PCH_UDC_BS_HST_RDY;
1828}
1829
1830/**
1831 * pch_udc_start_next_txrequest() - This function starts
1832 * the next transmission requirement
1833 * @ep: Reference to the endpoint structure
1834 */
1835static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
1836{
1837 struct pch_udc_request *req;
1838 struct pch_udc_data_dma_desc *td_data;
1839
1840 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
1841 return;
1842
1843 if (list_empty(&ep->queue))
1844 return;
1845
1846 /* next request */
1847 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1848 if (req->dma_going)
1849 return;
1850 if (!req->td_data)
1851 return;
1852 pch_udc_wait_ep_stall(ep);
1853 req->dma_going = 1;
1854 pch_udc_ep_set_ddptr(ep, 0);
1855 td_data = req->td_data;
1856 while (1) {
1857 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1858 PCH_UDC_BS_HST_RDY;
1859 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1860 break;
1861 td_data = phys_to_virt(td_data->next);
1862 }
1863 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1864 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
1865 pch_udc_ep_set_pd(ep);
1866 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1867 pch_udc_ep_clear_nak(ep);
1868}
1869
1870/**
1871 * pch_udc_complete_transfer() - This function completes a transfer
1872 * @ep: Reference to the endpoint structure
1873 */
1874static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
1875{
1876 struct pch_udc_request *req;
1877 struct pch_udc_dev *dev = ep->dev;
1878
1879 if (list_empty(&ep->queue))
1880 return;
1881 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1882 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
1883 PCH_UDC_BS_DMA_DONE)
1884 return;
1885 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
1886 PCH_UDC_RTS_SUCC) {
1887 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
1888 "epstatus=0x%08x\n",
1889 (req->td_data_last->status & PCH_UDC_RXTX_STS),
1890 (int)(ep->epsts));
1891 return;
1892 }
1893
1894 req->req.actual = req->req.length;
1895 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
1896 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
1897 complete_req(ep, req, 0);
1898 req->dma_going = 0;
1899 if (!list_empty(&ep->queue)) {
1900 pch_udc_wait_ep_stall(ep);
1901 pch_udc_ep_clear_nak(ep);
1902 pch_udc_enable_ep_interrupts(ep->dev,
1903 PCH_UDC_EPINT(ep->in, ep->num));
1904 } else {
1905 pch_udc_disable_ep_interrupts(ep->dev,
1906 PCH_UDC_EPINT(ep->in, ep->num));
1907 }
1908}
1909
1910/**
1911 * pch_udc_complete_receiver() - This function completes a receiver
1912 * @ep: Reference to the endpoint structure
1913 */
1914static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
1915{
1916 struct pch_udc_request *req;
1917 struct pch_udc_dev *dev = ep->dev;
1918 unsigned int count;
1919
1920 if (list_empty(&ep->queue))
1921 return;
1922
1923 /* next request */
1924 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1925 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
1926 PCH_UDC_BS_DMA_DONE)
1927 return;
1928 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1929 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
1930 PCH_UDC_RTS_SUCC) {
1931 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
1932 "epstatus=0x%08x\n",
1933 (req->td_data_last->status & PCH_UDC_RXTX_STS),
1934 (int)(ep->epsts));
1935 return;
1936 }
1937 count = req->td_data_last->status & PCH_UDC_RXTX_BYTES;
1938
1939 /* on 64k packets the RXBYTES field is zero */
1940 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
1941 count = UDC_DMA_MAXPACKET;
1942 req->td_data->status |= PCH_UDC_DMA_LAST;
1943 req->td_data_last->status |= PCH_UDC_BS_HST_BSY;
1944
1945 req->dma_going = 0;
1946 req->req.actual = count;
1947 complete_req(ep, req, 0);
1948 /* If there is a new/failed requests try that now */
1949 if (!list_empty(&ep->queue)) {
1950 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1951 pch_udc_start_rxrequest(ep, req);
1952 }
1953}
1954
1955/**
1956 * pch_udc_svc_data_in() - This function process endpoint interrupts
1957 * for IN endpoints
1958 * @dev: Reference to the device structure
1959 * @ep_num: Endpoint that generated the interrupt
1960 */
1961static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
1962{
1963 u32 epsts;
1964 struct pch_udc_ep *ep;
1965
1966 ep = &dev->ep[2*ep_num];
1967 epsts = ep->epsts;
1968 ep->epsts = 0;
1969
1970 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
1971 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
1972 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
1973 return;
1974 if ((epsts & UDC_EPSTS_BNA))
1975 return;
1976 if (epsts & UDC_EPSTS_HE)
1977 return;
1978 if (epsts & UDC_EPSTS_RSS) {
1979 pch_udc_ep_set_stall(ep);
1980 pch_udc_enable_ep_interrupts(ep->dev,
1981 PCH_UDC_EPINT(ep->in, ep->num));
1982 }
1983 if (epsts & UDC_EPSTS_RCS) {
1984 if (!dev->prot_stall) {
1985 pch_udc_ep_clear_stall(ep);
1986 } else {
1987 pch_udc_ep_set_stall(ep);
1988 pch_udc_enable_ep_interrupts(ep->dev,
1989 PCH_UDC_EPINT(ep->in, ep->num));
1990 }
1991 }
1992 if (epsts & UDC_EPSTS_TDC)
1993 pch_udc_complete_transfer(ep);
1994 /* On IN interrupt, provide data if we have any */
1995 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
1996 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
1997 pch_udc_start_next_txrequest(ep);
1998}
1999
2000/**
2001 * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2002 * @dev: Reference to the device structure
2003 * @ep_num: Endpoint that generated the interrupt
2004 */
2005static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2006{
2007 u32 epsts;
2008 struct pch_udc_ep *ep;
2009 struct pch_udc_request *req = NULL;
2010
2011 ep = &dev->ep[2*ep_num + 1];
2012 epsts = ep->epsts;
2013 ep->epsts = 0;
2014
2015 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2016 /* next request */
2017 req = list_entry(ep->queue.next, struct pch_udc_request,
2018 queue);
2019 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2020 PCH_UDC_BS_DMA_DONE) {
2021 if (!req->dma_going)
2022 pch_udc_start_rxrequest(ep, req);
2023 return;
2024 }
2025 }
2026 if (epsts & UDC_EPSTS_HE)
2027 return;
2028 if (epsts & UDC_EPSTS_RSS)
2029 pch_udc_ep_set_stall(ep);
2030 pch_udc_enable_ep_interrupts(ep->dev,
2031 PCH_UDC_EPINT(ep->in, ep->num));
2032 if (epsts & UDC_EPSTS_RCS) {
2033 if (!dev->prot_stall) {
2034 pch_udc_ep_clear_stall(ep);
2035 } else {
2036 pch_udc_ep_set_stall(ep);
2037 pch_udc_enable_ep_interrupts(ep->dev,
2038 PCH_UDC_EPINT(ep->in, ep->num));
2039 }
2040 }
2041 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2042 UDC_EPSTS_OUT_DATA) {
2043 if (ep->dev->prot_stall == 1) {
2044 pch_udc_ep_set_stall(ep);
2045 pch_udc_enable_ep_interrupts(ep->dev,
2046 PCH_UDC_EPINT(ep->in, ep->num));
2047 } else {
2048 pch_udc_complete_receiver(ep);
2049 }
2050 }
2051 if (list_empty(&ep->queue))
2052 pch_udc_set_dma(dev, DMA_DIR_RX);
2053}
2054
2055/**
2056 * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2057 * @dev: Reference to the device structure
2058 */
2059static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2060{
2061 u32 epsts;
2062 struct pch_udc_ep *ep;
2063
2064 ep = &dev->ep[UDC_EP0IN_IDX];
2065 epsts = ep->epsts;
2066 ep->epsts = 0;
2067
2068 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2069 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2070 UDC_EPSTS_XFERDONE)))
2071 return;
2072 if ((epsts & UDC_EPSTS_BNA))
2073 return;
2074 if (epsts & UDC_EPSTS_HE)
2075 return;
2076 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall))
2077 pch_udc_complete_transfer(ep);
2078 /* On IN interrupt, provide data if we have any */
2079 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2080 !(epsts & UDC_EPSTS_TXEMPTY))
2081 pch_udc_start_next_txrequest(ep);
2082}
2083
2084/**
2085 * pch_udc_svc_control_out() - Routine that handle Control
2086 * OUT endpoint interrupts
2087 * @dev: Reference to the device structure
2088 */
2089static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2090{
2091 u32 stat;
2092 int setup_supported;
2093 struct pch_udc_ep *ep;
2094
2095 ep = &dev->ep[UDC_EP0OUT_IDX];
2096 stat = ep->epsts;
2097 ep->epsts = 0;
2098
2099 /* If setup data */
2100 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2101 UDC_EPSTS_OUT_SETUP) {
2102 dev->stall = 0;
2103 dev->ep[UDC_EP0IN_IDX].halted = 0;
2104 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2105 /* In data not ready */
2106 pch_udc_ep_set_nak(&(dev->ep[UDC_EP0IN_IDX]));
2107 dev->setup_data = ep->td_stp->request;
2108 pch_udc_init_setup_buff(ep->td_stp);
2109 pch_udc_clear_dma(dev, DMA_DIR_TX);
2110 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2111 dev->ep[UDC_EP0IN_IDX].in);
2112 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2113 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2114 else /* OUT */
2115 dev->gadget.ep0 = &ep->ep;
2116 spin_unlock(&dev->lock);
2117 /* If Mass storage Reset */
2118 if ((dev->setup_data.bRequestType == 0x21) &&
2119 (dev->setup_data.bRequest == 0xFF))
2120 dev->prot_stall = 0;
2121 /* call gadget with setup data received */
2122 setup_supported = dev->driver->setup(&dev->gadget,
2123 &dev->setup_data);
2124 spin_lock(&dev->lock);
2125 /* ep0 in returns data on IN phase */
2126 if (setup_supported >= 0 && setup_supported <
2127 UDC_EP0IN_MAX_PKT_SIZE) {
2128 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2129 /* Gadget would have queued a request when
2130 * we called the setup */
2131 pch_udc_set_dma(dev, DMA_DIR_RX);
2132 pch_udc_ep_clear_nak(ep);
2133 } else if (setup_supported < 0) {
2134 /* if unsupported request, then stall */
2135 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2136 pch_udc_enable_ep_interrupts(ep->dev,
2137 PCH_UDC_EPINT(ep->in, ep->num));
2138 dev->stall = 0;
2139 pch_udc_set_dma(dev, DMA_DIR_RX);
2140 } else {
2141 dev->waiting_zlp_ack = 1;
2142 }
2143 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2144 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2145 if (list_empty(&ep->queue)) {
2146 dev_err(&dev->pdev->dev, "%s: No request\n", __func__);
2147 ep->td_data->status = (ep->td_data->status &
2148 ~PCH_UDC_BUFF_STS) |
2149 PCH_UDC_BS_HST_RDY;
2150 pch_udc_set_dma(dev, DMA_DIR_RX);
2151 } else {
2152 /* control write */
2153 /* next function will pickuo an clear the status */
2154 ep->epsts = stat;
2155
2156 pch_udc_svc_data_out(dev, 0);
2157 /* re-program desc. pointer for possible ZLPs */
2158 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2159 pch_udc_set_dma(dev, DMA_DIR_RX);
2160 }
2161 }
2162 pch_udc_ep_set_rrdy(ep);
2163}
2164
2165
2166/**
2167 * pch_udc_postsvc_epinters() - This function enables end point interrupts
2168 * and clears NAK status
2169 * @dev: Reference to the device structure
2170 * @ep_num: End point number
2171 */
2172static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2173{
2174 struct pch_udc_ep *ep;
2175 struct pch_udc_request *req;
2176
2177 ep = &dev->ep[2*ep_num];
2178 if (!list_empty(&ep->queue)) {
2179 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2180 pch_udc_enable_ep_interrupts(ep->dev,
2181 PCH_UDC_EPINT(ep->in, ep->num));
2182 pch_udc_ep_clear_nak(ep);
2183 }
2184}
2185
2186/**
2187 * pch_udc_read_all_epstatus() - This function read all endpoint status
2188 * @dev: Reference to the device structure
2189 * @ep_intr: Status of endpoint interrupt
2190 */
2191static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2192{
2193 int i;
2194 struct pch_udc_ep *ep;
2195
2196 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2197 /* IN */
2198 if (ep_intr & (0x1 << i)) {
2199 ep = &dev->ep[2*i];
2200 ep->epsts = pch_udc_read_ep_status(ep);
2201 pch_udc_clear_ep_status(ep, ep->epsts);
2202 }
2203 /* OUT */
2204 if (ep_intr & (0x10000 << i)) {
2205 ep = &dev->ep[2*i+1];
2206 ep->epsts = pch_udc_read_ep_status(ep);
2207 pch_udc_clear_ep_status(ep, ep->epsts);
2208 }
2209 }
2210}
2211
2212/**
2213 * pch_udc_activate_control_ep() - This function enables the control endpoints
2214 * for traffic after a reset
2215 * @dev: Reference to the device structure
2216 */
2217static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2218{
2219 struct pch_udc_ep *ep;
2220 u32 val;
2221
2222 /* Setup the IN endpoint */
2223 ep = &dev->ep[UDC_EP0IN_IDX];
2224 pch_udc_clear_ep_control(ep);
2225 pch_udc_ep_fifo_flush(ep, ep->in);
2226 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2227 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2228 /* Initialize the IN EP Descriptor */
2229 ep->td_data = NULL;
2230 ep->td_stp = NULL;
2231 ep->td_data_phys = 0;
2232 ep->td_stp_phys = 0;
2233
2234 /* Setup the OUT endpoint */
2235 ep = &dev->ep[UDC_EP0OUT_IDX];
2236 pch_udc_clear_ep_control(ep);
2237 pch_udc_ep_fifo_flush(ep, ep->in);
2238 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2239 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2240 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2241 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2242
2243 /* Initialize the SETUP buffer */
2244 pch_udc_init_setup_buff(ep->td_stp);
2245 /* Write the pointer address of dma descriptor */
2246 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2247 /* Write the pointer address of Setup descriptor */
2248 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2249
2250 /* Initialize the dma descriptor */
2251 ep->td_data->status = PCH_UDC_DMA_LAST;
2252 ep->td_data->dataptr = dev->dma_addr;
2253 ep->td_data->next = ep->td_data_phys;
2254
2255 pch_udc_ep_clear_nak(ep);
2256}
2257
2258
2259/**
2260 * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2261 * @dev: Reference to driver structure
2262 */
2263static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2264{
2265 struct pch_udc_ep *ep;
2266 int i;
2267
2268 pch_udc_clear_dma(dev, DMA_DIR_TX);
2269 pch_udc_clear_dma(dev, DMA_DIR_RX);
2270 /* Mask all endpoint interrupts */
2271 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2272 /* clear all endpoint interrupts */
2273 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2274
2275 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2276 ep = &dev->ep[i];
2277 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2278 pch_udc_clear_ep_control(ep);
2279 pch_udc_ep_set_ddptr(ep, 0);
2280 pch_udc_write_csr(ep->dev, 0x00, i);
2281 }
2282 dev->stall = 0;
2283 dev->prot_stall = 0;
2284 dev->waiting_zlp_ack = 0;
2285 dev->set_cfg_not_acked = 0;
2286
2287 /* disable ep to empty req queue. Skip the control EP's */
2288 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2289 ep = &dev->ep[i];
2290 pch_udc_ep_set_nak(ep);
2291 pch_udc_ep_fifo_flush(ep, ep->in);
2292 /* Complete request queue */
2293 empty_req_queue(ep);
2294 }
2295 if (dev->driver && dev->driver->disconnect)
2296 dev->driver->disconnect(&dev->gadget);
2297}
2298
2299/**
2300 * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2301 * done interrupt
2302 * @dev: Reference to driver structure
2303 */
2304static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2305{
2306 u32 dev_stat, dev_speed;
2307 u32 speed = USB_SPEED_FULL;
2308
2309 dev_stat = pch_udc_read_device_status(dev);
2310 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2311 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2312 switch (dev_speed) {
2313 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2314 speed = USB_SPEED_HIGH;
2315 break;
2316 case UDC_DEVSTS_ENUM_SPEED_FULL:
2317 speed = USB_SPEED_FULL;
2318 break;
2319 case UDC_DEVSTS_ENUM_SPEED_LOW:
2320 speed = USB_SPEED_LOW;
2321 break;
2322 default:
2323 BUG();
2324 }
2325 dev->gadget.speed = speed;
2326 pch_udc_activate_control_ep(dev);
2327 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2328 pch_udc_set_dma(dev, DMA_DIR_TX);
2329 pch_udc_set_dma(dev, DMA_DIR_RX);
2330 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2331}
2332
2333/**
2334 * pch_udc_svc_intf_interrupt() - This function handles a set interface
2335 * interrupt
2336 * @dev: Reference to driver structure
2337 */
2338static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2339{
2340 u32 reg, dev_stat = 0;
2341 int i, ret;
2342
2343 dev_stat = pch_udc_read_device_status(dev);
2344 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2345 UDC_DEVSTS_INTF_SHIFT;
2346 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2347 UDC_DEVSTS_ALT_SHIFT;
2348 dev->set_cfg_not_acked = 1;
2349 /* Construct the usb request for gadget driver and inform it */
2350 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2351 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2352 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2353 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2354 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2355 /* programm the Endpoint Cfg registers */
2356 /* Only one end point cfg register */
2357 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2358 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2359 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2360 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2361 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2362 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2363 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2364 /* clear stall bits */
2365 pch_udc_ep_clear_stall(&(dev->ep[i]));
2366 dev->ep[i].halted = 0;
2367 }
2368 dev->stall = 0;
2369 spin_unlock(&dev->lock);
2370 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2371 spin_lock(&dev->lock);
2372}
2373
2374/**
2375 * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2376 * interrupt
2377 * @dev: Reference to driver structure
2378 */
2379static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2380{
2381 int i, ret;
2382 u32 reg, dev_stat = 0;
2383
2384 dev_stat = pch_udc_read_device_status(dev);
2385 dev->set_cfg_not_acked = 1;
2386 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2387 UDC_DEVSTS_CFG_SHIFT;
2388 /* make usb request for gadget driver */
2389 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2390 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2391 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2392 /* program the NE registers */
2393 /* Only one end point cfg register */
2394 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2395 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2396 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2397 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2398 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2399 /* clear stall bits */
2400 pch_udc_ep_clear_stall(&(dev->ep[i]));
2401 dev->ep[i].halted = 0;
2402 }
2403 dev->stall = 0;
2404
2405 /* call gadget zero with setup data received */
2406 spin_unlock(&dev->lock);
2407 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2408 spin_lock(&dev->lock);
2409}
2410
2411/**
2412 * pch_udc_dev_isr() - This function services device interrupts
2413 * by invoking appropriate routines.
2414 * @dev: Reference to the device structure
2415 * @dev_intr: The Device interrupt status.
2416 */
2417static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2418{
2419 /* USB Reset Interrupt */
2420 if (dev_intr & UDC_DEVINT_UR)
2421 pch_udc_svc_ur_interrupt(dev);
2422 /* Enumeration Done Interrupt */
2423 if (dev_intr & UDC_DEVINT_ENUM)
2424 pch_udc_svc_enum_interrupt(dev);
2425 /* Set Interface Interrupt */
2426 if (dev_intr & UDC_DEVINT_SI)
2427 pch_udc_svc_intf_interrupt(dev);
2428 /* Set Config Interrupt */
2429 if (dev_intr & UDC_DEVINT_SC)
2430 pch_udc_svc_cfg_interrupt(dev);
2431 /* USB Suspend interrupt */
2432 if (dev_intr & UDC_DEVINT_US)
2433 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2434 /* Clear the SOF interrupt, if enabled */
2435 if (dev_intr & UDC_DEVINT_SOF)
2436 dev_dbg(&dev->pdev->dev, "SOF\n");
2437 /* ES interrupt, IDLE > 3ms on the USB */
2438 if (dev_intr & UDC_DEVINT_ES)
2439 dev_dbg(&dev->pdev->dev, "ES\n");
2440 /* RWKP interrupt */
2441 if (dev_intr & UDC_DEVINT_RWKP)
2442 dev_dbg(&dev->pdev->dev, "RWKP\n");
2443}
2444
2445/**
2446 * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2447 * @irq: Interrupt request number
2448 * @dev: Reference to the device structure
2449 */
2450static irqreturn_t pch_udc_isr(int irq, void *pdev)
2451{
2452 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2453 u32 dev_intr, ep_intr;
2454 int i;
2455
2456 dev_intr = pch_udc_read_device_interrupts(dev);
2457 ep_intr = pch_udc_read_ep_interrupts(dev);
2458
2459 if (dev_intr)
2460 /* Clear device interrupts */
2461 pch_udc_write_device_interrupts(dev, dev_intr);
2462 if (ep_intr)
2463 /* Clear ep interrupts */
2464 pch_udc_write_ep_interrupts(dev, ep_intr);
2465 if (!dev_intr && !ep_intr)
2466 return IRQ_NONE;
2467 spin_lock(&dev->lock);
2468 if (dev_intr)
2469 pch_udc_dev_isr(dev, dev_intr);
2470 if (ep_intr) {
2471 pch_udc_read_all_epstatus(dev, ep_intr);
2472 /* Process Control In interrupts, if present */
2473 if (ep_intr & UDC_EPINT_IN_EP0) {
2474 pch_udc_svc_control_in(dev);
2475 pch_udc_postsvc_epinters(dev, 0);
2476 }
2477 /* Process Control Out interrupts, if present */
2478 if (ep_intr & UDC_EPINT_OUT_EP0)
2479 pch_udc_svc_control_out(dev);
2480 /* Process data in end point interrupts */
2481 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2482 if (ep_intr & (1 << i)) {
2483 pch_udc_svc_data_in(dev, i);
2484 pch_udc_postsvc_epinters(dev, i);
2485 }
2486 }
2487 /* Process data out end point interrupts */
2488 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2489 PCH_UDC_USED_EP_NUM); i++)
2490 if (ep_intr & (1 << i))
2491 pch_udc_svc_data_out(dev, i -
2492 UDC_EPINT_OUT_SHIFT);
2493 }
2494 spin_unlock(&dev->lock);
2495 return IRQ_HANDLED;
2496}
2497
2498/**
2499 * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2500 * @dev: Reference to the device structure
2501 */
2502static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2503{
2504 /* enable ep0 interrupts */
2505 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2506 UDC_EPINT_OUT_EP0);
2507 /* enable device interrupts */
2508 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2509 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2510 UDC_DEVINT_SI | UDC_DEVINT_SC);
2511}
2512
2513/**
2514 * gadget_release() - Free the gadget driver private data
2515 * @pdev reference to struct pci_dev
2516 */
2517static void gadget_release(struct device *pdev)
2518{
2519 struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2520
2521 kfree(dev);
2522}
2523
2524/**
2525 * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2526 * @dev: Reference to the driver structure
2527 */
2528static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2529{
2530 const char *const ep_string[] = {
2531 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2532 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2533 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2534 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2535 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2536 "ep15in", "ep15out",
2537 };
2538 int i;
2539
2540 dev->gadget.speed = USB_SPEED_UNKNOWN;
2541 INIT_LIST_HEAD(&dev->gadget.ep_list);
2542
2543 /* Initialize the endpoints structures */
2544 memset(dev->ep, 0, sizeof dev->ep);
2545 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2546 struct pch_udc_ep *ep = &dev->ep[i];
2547 ep->dev = dev;
2548 ep->halted = 1;
2549 ep->num = i / 2;
2550 ep->in = ~i & 1;
2551 ep->ep.name = ep_string[i];
2552 ep->ep.ops = &pch_udc_ep_ops;
2553 if (ep->in)
2554 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2555 else
2556 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2557 UDC_EP_REG_SHIFT;
2558 /* need to set ep->ep.maxpacket and set Default Configuration?*/
2559 ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
2560 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2561 INIT_LIST_HEAD(&ep->queue);
2562 }
2563 dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
2564 dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
2565
2566 dev->dma_addr = pci_map_single(dev->pdev, dev->ep0out_buf, 256,
2567 PCI_DMA_FROMDEVICE);
2568
2569 /* remove ep0 in and out from the list. They have own pointer */
2570 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2571 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2572
2573 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2574 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2575}
2576
2577/**
2578 * pch_udc_pcd_init() - This API initializes the driver structure
2579 * @dev: Reference to the driver structure
2580 *
2581 * Return codes:
2582 * 0: Success
2583 */
2584static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2585{
2586 pch_udc_init(dev);
2587 pch_udc_pcd_reinit(dev);
2588 return 0;
2589}
2590
2591/**
2592 * init_dma_pools() - create dma pools during initialization
2593 * @pdev: reference to struct pci_dev
2594 */
2595static int init_dma_pools(struct pch_udc_dev *dev)
2596{
2597 struct pch_udc_stp_dma_desc *td_stp;
2598 struct pch_udc_data_dma_desc *td_data;
2599
2600 /* DMA setup */
2601 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2602 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2603 if (!dev->data_requests) {
2604 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2605 __func__);
2606 return -ENOMEM;
2607 }
2608
2609 /* dma desc for setup data */
2610 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2611 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2612 if (!dev->stp_requests) {
2613 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2614 __func__);
2615 return -ENOMEM;
2616 }
2617 /* setup */
2618 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2619 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2620 if (!td_stp) {
2621 dev_err(&dev->pdev->dev,
2622 "%s: can't allocate setup dma descriptor\n", __func__);
2623 return -ENOMEM;
2624 }
2625 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2626
2627 /* data: 0 packets !? */
2628 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2629 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2630 if (!td_data) {
2631 dev_err(&dev->pdev->dev,
2632 "%s: can't allocate data dma descriptor\n", __func__);
2633 return -ENOMEM;
2634 }
2635 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2636 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2637 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2638 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2639 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2640 return 0;
2641}
2642
2643int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2644 int (*bind)(struct usb_gadget *))
2645{
2646 struct pch_udc_dev *dev = pch_udc;
2647 int retval;
2648
2649 if (!driver || (driver->speed == USB_SPEED_UNKNOWN) || !bind ||
2650 !driver->setup || !driver->unbind || !driver->disconnect) {
2651 dev_err(&dev->pdev->dev,
2652 "%s: invalid driver parameter\n", __func__);
2653 return -EINVAL;
2654 }
2655
2656 if (!dev)
2657 return -ENODEV;
2658
2659 if (dev->driver) {
2660 dev_err(&dev->pdev->dev, "%s: already bound\n", __func__);
2661 return -EBUSY;
2662 }
2663 driver->driver.bus = NULL;
2664 dev->driver = driver;
2665 dev->gadget.dev.driver = &driver->driver;
2666
2667 /* Invoke the bind routine of the gadget driver */
2668 retval = bind(&dev->gadget);
2669
2670 if (retval) {
2671 dev_err(&dev->pdev->dev, "%s: binding to %s returning %d\n",
2672 __func__, driver->driver.name, retval);
2673 dev->driver = NULL;
2674 dev->gadget.dev.driver = NULL;
2675 return retval;
2676 }
2677 /* get ready for ep0 traffic */
2678 pch_udc_setup_ep0(dev);
2679
2680 /* clear SD */
2681 pch_udc_clear_disconnect(dev);
2682
2683 dev->connected = 1;
2684 return 0;
2685}
2686EXPORT_SYMBOL(usb_gadget_probe_driver);
2687
2688int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2689{
2690 struct pch_udc_dev *dev = pch_udc;
2691
2692 if (!dev)
2693 return -ENODEV;
2694
2695 if (!driver || (driver != dev->driver)) {
2696 dev_err(&dev->pdev->dev,
2697 "%s: invalid driver parameter\n", __func__);
2698 return -EINVAL;
2699 }
2700
2701 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2702
2703 /* Assues that there are no pending requets with this driver */
2704 driver->unbind(&dev->gadget);
2705 dev->gadget.dev.driver = NULL;
2706 dev->driver = NULL;
2707 dev->connected = 0;
2708
2709 /* set SD */
2710 pch_udc_set_disconnect(dev);
2711 return 0;
2712}
2713EXPORT_SYMBOL(usb_gadget_unregister_driver);
2714
2715static void pch_udc_shutdown(struct pci_dev *pdev)
2716{
2717 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2718
2719 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2720 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2721
2722 /* disable the pullup so the host will think we're gone */
2723 pch_udc_set_disconnect(dev);
2724}
2725
2726static void pch_udc_remove(struct pci_dev *pdev)
2727{
2728 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2729
2730 /* gadget driver must not be registered */
2731 if (dev->driver)
2732 dev_err(&pdev->dev,
2733 "%s: gadget driver still bound!!!\n", __func__);
2734 /* dma pool cleanup */
2735 if (dev->data_requests)
2736 pci_pool_destroy(dev->data_requests);
2737
2738 if (dev->stp_requests) {
2739 /* cleanup DMA desc's for ep0in */
2740 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
2741 pci_pool_free(dev->stp_requests,
2742 dev->ep[UDC_EP0OUT_IDX].td_stp,
2743 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2744 }
2745 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
2746 pci_pool_free(dev->stp_requests,
2747 dev->ep[UDC_EP0OUT_IDX].td_data,
2748 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2749 }
2750 pci_pool_destroy(dev->stp_requests);
2751 }
2752
2753 pch_udc_exit(dev);
2754
2755 if (dev->irq_registered)
2756 free_irq(pdev->irq, dev);
2757 if (dev->base_addr)
2758 iounmap(dev->base_addr);
2759 if (dev->mem_region)
2760 release_mem_region(dev->phys_addr,
2761 pci_resource_len(pdev, PCH_UDC_PCI_BAR));
2762 if (dev->active)
2763 pci_disable_device(pdev);
2764 if (dev->registered)
2765 device_unregister(&dev->gadget.dev);
2766 kfree(dev);
2767 pci_set_drvdata(pdev, NULL);
2768}
2769
2770#ifdef CONFIG_PM
2771static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
2772{
2773 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2774
2775 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2776 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2777
2778 pci_disable_device(pdev);
2779 pci_enable_wake(pdev, PCI_D3hot, 0);
2780
2781 if (pci_save_state(pdev)) {
2782 dev_err(&pdev->dev,
2783 "%s: could not save PCI config state\n", __func__);
2784 return -ENOMEM;
2785 }
2786 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2787 return 0;
2788}
2789
2790static int pch_udc_resume(struct pci_dev *pdev)
2791{
2792 int ret;
2793
2794 pci_set_power_state(pdev, PCI_D0);
2795 ret = pci_restore_state(pdev);
2796 if (ret) {
2797 dev_err(&pdev->dev, "%s: pci_restore_state failed\n", __func__);
2798 return ret;
2799 }
2800 ret = pci_enable_device(pdev);
2801 if (ret) {
2802 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
2803 return ret;
2804 }
2805 pci_enable_wake(pdev, PCI_D3hot, 0);
2806 return 0;
2807}
2808#else
2809#define pch_udc_suspend NULL
2810#define pch_udc_resume NULL
2811#endif /* CONFIG_PM */
2812
2813static int pch_udc_probe(struct pci_dev *pdev,
2814 const struct pci_device_id *id)
2815{
2816 unsigned long resource;
2817 unsigned long len;
2818 int retval;
2819 struct pch_udc_dev *dev;
2820
2821 /* one udc only */
2822 if (pch_udc) {
2823 pr_err("%s: already probed\n", __func__);
2824 return -EBUSY;
2825 }
2826 /* init */
2827 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2828 if (!dev) {
2829 pr_err("%s: no memory for device structure\n", __func__);
2830 return -ENOMEM;
2831 }
2832 /* pci setup */
2833 if (pci_enable_device(pdev) < 0) {
2834 kfree(dev);
2835 pr_err("%s: pci_enable_device failed\n", __func__);
2836 return -ENODEV;
2837 }
2838 dev->active = 1;
2839 pci_set_drvdata(pdev, dev);
2840
2841 /* PCI resource allocation */
2842 resource = pci_resource_start(pdev, 1);
2843 len = pci_resource_len(pdev, 1);
2844
2845 if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
2846 dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
2847 retval = -EBUSY;
2848 goto finished;
2849 }
2850 dev->phys_addr = resource;
2851 dev->mem_region = 1;
2852
2853 dev->base_addr = ioremap_nocache(resource, len);
2854 if (!dev->base_addr) {
2855 pr_err("%s: device memory cannot be mapped\n", __func__);
2856 retval = -ENOMEM;
2857 goto finished;
2858 }
2859 if (!pdev->irq) {
2860 dev_err(&pdev->dev, "%s: irq not set\n", __func__);
2861 retval = -ENODEV;
2862 goto finished;
2863 }
2864 pch_udc = dev;
2865 /* initialize the hardware */
2866 if (pch_udc_pcd_init(dev))
2867 goto finished;
2868 if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
2869 dev)) {
2870 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
2871 pdev->irq);
2872 retval = -ENODEV;
2873 goto finished;
2874 }
2875 dev->irq = pdev->irq;
2876 dev->irq_registered = 1;
2877
2878 pci_set_master(pdev);
2879 pci_try_set_mwi(pdev);
2880
2881 /* device struct setup */
2882 spin_lock_init(&dev->lock);
2883 dev->pdev = pdev;
2884 dev->gadget.ops = &pch_udc_ops;
2885
2886 retval = init_dma_pools(dev);
2887 if (retval)
2888 goto finished;
2889
2890 dev_set_name(&dev->gadget.dev, "gadget");
2891 dev->gadget.dev.parent = &pdev->dev;
2892 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2893 dev->gadget.dev.release = gadget_release;
2894 dev->gadget.name = KBUILD_MODNAME;
2895 dev->gadget.is_dualspeed = 1;
2896
2897 retval = device_register(&dev->gadget.dev);
2898 if (retval)
2899 goto finished;
2900 dev->registered = 1;
2901
2902 /* Put the device in disconnected state till a driver is bound */
2903 pch_udc_set_disconnect(dev);
2904 return 0;
2905
2906finished:
2907 pch_udc_remove(pdev);
2908 return retval;
2909}
2910
2911static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
2912 {
2913 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
2914 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2915 .class_mask = 0xffffffff,
2916 },
2917 { 0 },
2918};
2919
2920MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
2921
2922
2923static struct pci_driver pch_udc_driver = {
2924 .name = KBUILD_MODNAME,
2925 .id_table = pch_udc_pcidev_id,
2926 .probe = pch_udc_probe,
2927 .remove = pch_udc_remove,
2928 .suspend = pch_udc_suspend,
2929 .resume = pch_udc_resume,
2930 .shutdown = pch_udc_shutdown,
2931};
2932
2933static int __init pch_udc_pci_init(void)
2934{
2935 return pci_register_driver(&pch_udc_driver);
2936}
2937module_init(pch_udc_pci_init);
2938
2939static void __exit pch_udc_pci_exit(void)
2940{
2941 pci_unregister_driver(&pch_udc_driver);
2942}
2943module_exit(pch_udc_pci_exit);
2944
2945MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
2946MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
2947MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
index 7a86d2c9109c..59ffe1ecf1c9 100644
--- a/drivers/usb/gadget/u_audio.c
+++ b/drivers/usb/gadget/u_audio.c
@@ -255,6 +255,7 @@ static int gaudio_open_snd_dev(struct gaudio *card)
255 ERROR(card, "No such PCM capture device: %s\n", fn_cap); 255 ERROR(card, "No such PCM capture device: %s\n", fn_cap);
256 snd->substream = NULL; 256 snd->substream = NULL;
257 snd->card = NULL; 257 snd->card = NULL;
258 snd->filp = NULL;
258 } else { 259 } else {
259 pcm_file = snd->filp->private_data; 260 pcm_file = snd->filp->private_data;
260 snd->substream = pcm_file->substream; 261 snd->substream = pcm_file->substream;
@@ -273,17 +274,17 @@ static int gaudio_close_snd_dev(struct gaudio *gau)
273 274
274 /* Close control device */ 275 /* Close control device */
275 snd = &gau->control; 276 snd = &gau->control;
276 if (!IS_ERR(snd->filp)) 277 if (snd->filp)
277 filp_close(snd->filp, current->files); 278 filp_close(snd->filp, current->files);
278 279
279 /* Close PCM playback device and setup substream */ 280 /* Close PCM playback device and setup substream */
280 snd = &gau->playback; 281 snd = &gau->playback;
281 if (!IS_ERR(snd->filp)) 282 if (snd->filp)
282 filp_close(snd->filp, current->files); 283 filp_close(snd->filp, current->files);
283 284
284 /* Close PCM capture device and setup substream */ 285 /* Close PCM capture device and setup substream */
285 snd = &gau->capture; 286 snd = &gau->capture;
286 if (!IS_ERR(snd->filp)) 287 if (snd->filp)
287 filp_close(snd->filp, current->files); 288 filp_close(snd->filp, current->files);
288 289
289 return 0; 290 return 0;
@@ -304,8 +305,7 @@ int __init gaudio_setup(struct gaudio *card)
304 ret = gaudio_open_snd_dev(card); 305 ret = gaudio_open_snd_dev(card);
305 if (ret) 306 if (ret)
306 ERROR(card, "we need at least one control device\n"); 307 ERROR(card, "we need at least one control device\n");
307 308 else if (!the_card)
308 if (!the_card)
309 the_card = card; 309 the_card = card;
310 310
311 return ret; 311 return ret;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index fbe86ca95802..e3454fe46b47 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -240,6 +240,9 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
240 size += out->maxpacket - 1; 240 size += out->maxpacket - 1;
241 size -= size % out->maxpacket; 241 size -= size % out->maxpacket;
242 242
243 if (dev->port_usb->is_fixed)
244 size = max(size, dev->port_usb->fixed_out_len);
245
243 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); 246 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
244 if (skb == NULL) { 247 if (skb == NULL) {
245 DBG(dev, "no rx skb\n"); 248 DBG(dev, "no rx skb\n");
@@ -578,12 +581,19 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
578 req->context = skb; 581 req->context = skb;
579 req->complete = tx_complete; 582 req->complete = tx_complete;
580 583
584 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
585 if (dev->port_usb->is_fixed &&
586 length == dev->port_usb->fixed_in_len &&
587 (length % in->maxpacket) == 0)
588 req->zero = 0;
589 else
590 req->zero = 1;
591
581 /* use zlp framing on tx for strict CDC-Ether conformance, 592 /* use zlp framing on tx for strict CDC-Ether conformance,
582 * though any robust network rx path ignores extra padding. 593 * though any robust network rx path ignores extra padding.
583 * and some hardware doesn't like to write zlps. 594 * and some hardware doesn't like to write zlps.
584 */ 595 */
585 req->zero = 1; 596 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
586 if (!dev->zlp && (length % in->maxpacket) == 0)
587 length++; 597 length++;
588 598
589 req->length = length; 599 req->length = length;
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 3c8c0c9f9d72..b56e1e7d423c 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -62,6 +62,10 @@ struct gether {
62 62
63 /* hooks for added framing, as needed for RNDIS and EEM. */ 63 /* hooks for added framing, as needed for RNDIS and EEM. */
64 u32 header_len; 64 u32 header_len;
65 /* NCM requires fixed size bundles */
66 bool is_fixed;
67 u32 fixed_out_len;
68 u32 fixed_in_len;
65 struct sk_buff *(*wrap)(struct gether *port, 69 struct sk_buff *(*wrap)(struct gether *port,
66 struct sk_buff *skb); 70 struct sk_buff *skb);
67 int (*unwrap)(struct gether *port, 71 int (*unwrap)(struct gether *port,
@@ -103,6 +107,7 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
103/* each configuration may bind one instance of an ethernet link */ 107/* each configuration may bind one instance of an ethernet link */
104int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); 108int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
105int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); 109int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
110int ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
106int eem_bind_config(struct usb_configuration *c); 111int eem_bind_config(struct usb_configuration *c);
107 112
108#ifdef USB_ETH_RNDIS 113#ifdef USB_ETH_RNDIS
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index f8970d151d2a..24046c0f5878 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -133,6 +133,25 @@ config USB_EHCI_MXC
133 ---help--- 133 ---help---
134 Variation of ARC USB block used in some Freescale chips. 134 Variation of ARC USB block used in some Freescale chips.
135 135
136config USB_EHCI_HCD_OMAP
137 bool "EHCI support for OMAP3 and later chips"
138 depends on USB_EHCI_HCD && ARCH_OMAP
139 default y
140 --- help ---
141 Enables support for the on-chip EHCI controller on
142 OMAP3 and later chips.
143
144config USB_EHCI_MSM
145 bool "Support for MSM on-chip EHCI USB controller"
146 depends on USB_EHCI_HCD && ARCH_MSM
147 select USB_EHCI_ROOT_HUB_TT
148 select USB_MSM_OTG_72K
149 ---help---
150 Enables support for the USB Host controller present on the
151 Qualcomm chipsets. Root Hub has inbuilt TT.
152 This driver depends on OTG driver for PHY initialization,
153 clock management, powering up VBUS, and power management.
154
136config USB_EHCI_HCD_PPC_OF 155config USB_EHCI_HCD_PPC_OF
137 bool "EHCI support for PPC USB controller on OF platform bus" 156 bool "EHCI support for PPC USB controller on OF platform bus"
138 depends on USB_EHCI_HCD && PPC_OF 157 depends on USB_EHCI_HCD && PPC_OF
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 51bd0edf544f..d6a69d514a84 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -99,6 +99,7 @@ static const struct hc_driver ehci_atmel_hc_driver = {
99 .urb_enqueue = ehci_urb_enqueue, 99 .urb_enqueue = ehci_urb_enqueue,
100 .urb_dequeue = ehci_urb_dequeue, 100 .urb_dequeue = ehci_urb_dequeue,
101 .endpoint_disable = ehci_endpoint_disable, 101 .endpoint_disable = ehci_endpoint_disable,
102 .endpoint_reset = ehci_endpoint_reset,
102 103
103 /* scheduling support */ 104 /* scheduling support */
104 .get_frame_number = ehci_get_frame, 105 .get_frame_number = ehci_get_frame,
@@ -110,6 +111,8 @@ static const struct hc_driver ehci_atmel_hc_driver = {
110 .bus_resume = ehci_bus_resume, 111 .bus_resume = ehci_bus_resume,
111 .relinquish_port = ehci_relinquish_port, 112 .relinquish_port = ehci_relinquish_port,
112 .port_handed_over = ehci_port_handed_over, 113 .port_handed_over = ehci_port_handed_over,
114
115 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
113}; 116};
114 117
115static int __init ehci_atmel_drv_probe(struct platform_device *pdev) 118static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 6e2599661b5b..3be238a24cc5 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -879,7 +879,7 @@ static int fill_buffer(struct debug_buffer *buf)
879 int ret = 0; 879 int ret = 0;
880 880
881 if (!buf->output_buf) 881 if (!buf->output_buf)
882 buf->output_buf = (char *)vmalloc(buf->alloc_size); 882 buf->output_buf = vmalloc(buf->alloc_size);
883 883
884 if (!buf->output_buf) { 884 if (!buf->output_buf) {
885 ret = -ENOMEM; 885 ret = -ENOMEM;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d0c8f7c03e05..6fee3cd58efe 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -114,6 +114,9 @@ MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
114 114
115#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) 115#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
116 116
117/* for ASPM quirk of ISOC on AMD SB800 */
118static struct pci_dev *amd_nb_dev;
119
117/*-------------------------------------------------------------------------*/ 120/*-------------------------------------------------------------------------*/
118 121
119#include "ehci.h" 122#include "ehci.h"
@@ -529,6 +532,11 @@ static void ehci_stop (struct usb_hcd *hcd)
529 spin_unlock_irq (&ehci->lock); 532 spin_unlock_irq (&ehci->lock);
530 ehci_mem_cleanup (ehci); 533 ehci_mem_cleanup (ehci);
531 534
535 if (amd_nb_dev) {
536 pci_dev_put(amd_nb_dev);
537 amd_nb_dev = NULL;
538 }
539
532#ifdef EHCI_STATS 540#ifdef EHCI_STATS
533 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n", 541 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
534 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim, 542 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
@@ -1166,12 +1174,17 @@ MODULE_LICENSE ("GPL");
1166#define PLATFORM_DRIVER ehci_mxc_driver 1174#define PLATFORM_DRIVER ehci_mxc_driver
1167#endif 1175#endif
1168 1176
1177#ifdef CONFIG_CPU_SUBTYPE_SH7786
1178#include "ehci-sh.c"
1179#define PLATFORM_DRIVER ehci_hcd_sh_driver
1180#endif
1181
1169#ifdef CONFIG_SOC_AU1200 1182#ifdef CONFIG_SOC_AU1200
1170#include "ehci-au1xxx.c" 1183#include "ehci-au1xxx.c"
1171#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver 1184#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
1172#endif 1185#endif
1173 1186
1174#ifdef CONFIG_ARCH_OMAP3 1187#ifdef CONFIG_USB_EHCI_HCD_OMAP
1175#include "ehci-omap.c" 1188#include "ehci-omap.c"
1176#define PLATFORM_DRIVER ehci_hcd_omap_driver 1189#define PLATFORM_DRIVER ehci_hcd_omap_driver
1177#endif 1190#endif
@@ -1221,6 +1234,21 @@ MODULE_LICENSE ("GPL");
1221#define PLATFORM_DRIVER cns3xxx_ehci_driver 1234#define PLATFORM_DRIVER cns3xxx_ehci_driver
1222#endif 1235#endif
1223 1236
1237#ifdef CONFIG_ARCH_VT8500
1238#include "ehci-vt8500.c"
1239#define PLATFORM_DRIVER vt8500_ehci_driver
1240#endif
1241
1242#ifdef CONFIG_PLAT_SPEAR
1243#include "ehci-spear.c"
1244#define PLATFORM_DRIVER spear_ehci_hcd_driver
1245#endif
1246
1247#ifdef CONFIG_USB_EHCI_MSM
1248#include "ehci-msm.c"
1249#define PLATFORM_DRIVER ehci_msm_driver
1250#endif
1251
1224#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ 1252#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
1225 !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \ 1253 !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
1226 !defined(XILINX_OF_PLATFORM_DRIVER) 1254 !defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
new file mode 100644
index 000000000000..413f4deca532
--- /dev/null
+++ b/drivers/usb/host/ehci-msm.c
@@ -0,0 +1,345 @@
1/* ehci-msm.c - HSUSB Host Controller Driver Implementation
2 *
3 * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
4 *
5 * Partly derived from ehci-fsl.c and ehci-hcd.c
6 * Copyright (c) 2000-2004 by David Brownell
7 * Copyright (c) 2005 MontaVista Software
8 *
9 * All source code in this file is licensed under the following license except
10 * where indicated.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published
14 * by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 *
20 * See the GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, you can find it at http://www.fsf.org
23 */
24
25#include <linux/platform_device.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/pm_runtime.h>
29
30#include <linux/usb/otg.h>
31#include <linux/usb/msm_hsusb_hw.h>
32
33#define MSM_USB_BASE (hcd->regs)
34
35static struct otg_transceiver *otg;
36
37/*
38 * ehci_run defined in drivers/usb/host/ehci-hcd.c reset the controller and
39 * the configuration settings in ehci_msm_reset vanish after controller is
40 * reset. Resetting the controler in ehci_run seems to be un-necessary
41 * provided HCD reset the controller before calling ehci_run. Most of the HCD
42 * do but some are not. So this function is same as ehci_run but we don't
43 * reset the controller here.
44 */
45static int ehci_msm_run(struct usb_hcd *hcd)
46{
47 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
48 u32 temp;
49 u32 hcc_params;
50
51 hcd->uses_new_polling = 1;
52
53 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
54 ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
55
56 /*
57 * hcc_params controls whether ehci->regs->segment must (!!!)
58 * be used; it constrains QH/ITD/SITD and QTD locations.
59 * pci_pool consistent memory always uses segment zero.
60 * streaming mappings for I/O buffers, like pci_map_single(),
61 * can return segments above 4GB, if the device allows.
62 *
63 * NOTE: the dma mask is visible through dma_supported(), so
64 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
65 * Scsi_Host.highmem_io, and so forth. It's readonly to all
66 * host side drivers though.
67 */
68 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
69 if (HCC_64BIT_ADDR(hcc_params))
70 ehci_writel(ehci, 0, &ehci->regs->segment);
71
72 /*
73 * Philips, Intel, and maybe others need CMD_RUN before the
74 * root hub will detect new devices (why?); NEC doesn't
75 */
76 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
77 ehci->command |= CMD_RUN;
78 ehci_writel(ehci, ehci->command, &ehci->regs->command);
79 dbg_cmd(ehci, "init", ehci->command);
80
81 /*
82 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
83 * are explicitly handed to companion controller(s), so no TT is
84 * involved with the root hub. (Except where one is integrated,
85 * and there's no companion controller unless maybe for USB OTG.)
86 *
87 * Turning on the CF flag will transfer ownership of all ports
88 * from the companions to the EHCI controller. If any of the
89 * companions are in the middle of a port reset at the time, it
90 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
91 * guarantees that no resets are in progress. After we set CF,
92 * a short delay lets the hardware catch up; new resets shouldn't
93 * be started before the port switching actions could complete.
94 */
95 down_write(&ehci_cf_port_reset_rwsem);
96 hcd->state = HC_STATE_RUNNING;
97 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
98 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
99 usleep_range(5000, 5500);
100 up_write(&ehci_cf_port_reset_rwsem);
101 ehci->last_periodic_enable = ktime_get_real();
102
103 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
104 ehci_info(ehci,
105 "USB %x.%x started, EHCI %x.%02x%s\n",
106 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
107 temp >> 8, temp & 0xff,
108 ignore_oc ? ", overcurrent ignored" : "");
109
110 ehci_writel(ehci, INTR_MASK,
111 &ehci->regs->intr_enable); /* Turn On Interrupts */
112
113 /* GRR this is run-once init(), being done every time the HC starts.
114 * So long as they're part of class devices, we can't do it init()
115 * since the class device isn't created that early.
116 */
117 create_debug_files(ehci);
118 create_companion_file(ehci);
119
120 return 0;
121}
122
123static int ehci_msm_reset(struct usb_hcd *hcd)
124{
125 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
126 int retval;
127
128 ehci->caps = USB_CAPLENGTH;
129 ehci->regs = USB_CAPLENGTH +
130 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
131
132 /* cache the data to minimize the chip reads*/
133 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
134
135 hcd->has_tt = 1;
136 ehci->sbrn = HCD_USB2;
137
138 /* data structure init */
139 retval = ehci_init(hcd);
140 if (retval)
141 return retval;
142
143 retval = ehci_reset(ehci);
144 if (retval)
145 return retval;
146
147 /* bursts of unspecified length. */
148 writel(0, USB_AHBBURST);
149 /* Use the AHB transactor */
150 writel(0, USB_AHBMODE);
151 /* Disable streaming mode and select host mode */
152 writel(0x13, USB_USBMODE);
153
154 ehci_port_power(ehci, 1);
155 return 0;
156}
157
158static struct hc_driver msm_hc_driver = {
159 .description = hcd_name,
160 .product_desc = "Qualcomm On-Chip EHCI Host Controller",
161 .hcd_priv_size = sizeof(struct ehci_hcd),
162
163 /*
164 * generic hardware linkage
165 */
166 .irq = ehci_irq,
167 .flags = HCD_USB2 | HCD_MEMORY,
168
169 .reset = ehci_msm_reset,
170 .start = ehci_msm_run,
171
172 .stop = ehci_stop,
173 .shutdown = ehci_shutdown,
174
175 /*
176 * managing i/o requests and associated device resources
177 */
178 .urb_enqueue = ehci_urb_enqueue,
179 .urb_dequeue = ehci_urb_dequeue,
180 .endpoint_disable = ehci_endpoint_disable,
181 .endpoint_reset = ehci_endpoint_reset,
182 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
183
184 /*
185 * scheduling support
186 */
187 .get_frame_number = ehci_get_frame,
188
189 /*
190 * root hub support
191 */
192 .hub_status_data = ehci_hub_status_data,
193 .hub_control = ehci_hub_control,
194 .relinquish_port = ehci_relinquish_port,
195 .port_handed_over = ehci_port_handed_over,
196
197 /*
198 * PM support
199 */
200 .bus_suspend = ehci_bus_suspend,
201 .bus_resume = ehci_bus_resume,
202};
203
204static int ehci_msm_probe(struct platform_device *pdev)
205{
206 struct usb_hcd *hcd;
207 struct resource *res;
208 int ret;
209
210 dev_dbg(&pdev->dev, "ehci_msm proble\n");
211
212 hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
213 if (!hcd) {
214 dev_err(&pdev->dev, "Unable to create HCD\n");
215 return -ENOMEM;
216 }
217
218 hcd->irq = platform_get_irq(pdev, 0);
219 if (hcd->irq < 0) {
220 dev_err(&pdev->dev, "Unable to get IRQ resource\n");
221 ret = hcd->irq;
222 goto put_hcd;
223 }
224
225 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
226 if (!res) {
227 dev_err(&pdev->dev, "Unable to get memory resource\n");
228 ret = -ENODEV;
229 goto put_hcd;
230 }
231
232 hcd->rsrc_start = res->start;
233 hcd->rsrc_len = resource_size(res);
234 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
235 if (!hcd->regs) {
236 dev_err(&pdev->dev, "ioremap failed\n");
237 ret = -ENOMEM;
238 goto put_hcd;
239 }
240
241 /*
242 * OTG driver takes care of PHY initialization, clock management,
243 * powering up VBUS, mapping of registers address space and power
244 * management.
245 */
246 otg = otg_get_transceiver();
247 if (!otg) {
248 dev_err(&pdev->dev, "unable to find transceiver\n");
249 ret = -ENODEV;
250 goto unmap;
251 }
252
253 ret = otg_set_host(otg, &hcd->self);
254 if (ret < 0) {
255 dev_err(&pdev->dev, "unable to register with transceiver\n");
256 goto put_transceiver;
257 }
258
259 device_init_wakeup(&pdev->dev, 1);
260 /*
261 * OTG device parent of HCD takes care of putting
262 * hardware into low power mode.
263 */
264 pm_runtime_no_callbacks(&pdev->dev);
265 pm_runtime_enable(&pdev->dev);
266
267 return 0;
268
269put_transceiver:
270 otg_put_transceiver(otg);
271unmap:
272 iounmap(hcd->regs);
273put_hcd:
274 usb_put_hcd(hcd);
275
276 return ret;
277}
278
279static int __devexit ehci_msm_remove(struct platform_device *pdev)
280{
281 struct usb_hcd *hcd = platform_get_drvdata(pdev);
282
283 device_init_wakeup(&pdev->dev, 0);
284 pm_runtime_disable(&pdev->dev);
285 pm_runtime_set_suspended(&pdev->dev);
286
287 otg_set_host(otg, NULL);
288 otg_put_transceiver(otg);
289
290 usb_put_hcd(hcd);
291
292 return 0;
293}
294
295#ifdef CONFIG_PM
296static int ehci_msm_pm_suspend(struct device *dev)
297{
298 struct usb_hcd *hcd = dev_get_drvdata(dev);
299 bool wakeup = device_may_wakeup(dev);
300
301 dev_dbg(dev, "ehci-msm PM suspend\n");
302
303 /*
304 * EHCI helper function has also the same check before manipulating
305 * port wakeup flags. We do check here the same condition before
306 * calling the same helper function to avoid bringing hardware
307 * from Low power mode when there is no need for adjusting port
308 * wakeup flags.
309 */
310 if (hcd->self.root_hub->do_remote_wakeup && !wakeup) {
311 pm_runtime_resume(dev);
312 ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
313 wakeup);
314 }
315
316 return 0;
317}
318
319static int ehci_msm_pm_resume(struct device *dev)
320{
321 struct usb_hcd *hcd = dev_get_drvdata(dev);
322
323 dev_dbg(dev, "ehci-msm PM resume\n");
324 ehci_prepare_ports_for_controller_resume(hcd_to_ehci(hcd));
325
326 return 0;
327}
328#else
329#define ehci_msm_pm_suspend NULL
330#define ehci_msm_pm_resume NULL
331#endif
332
333static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
334 .suspend = ehci_msm_pm_suspend,
335 .resume = ehci_msm_pm_resume,
336};
337
338static struct platform_driver ehci_msm_driver = {
339 .probe = ehci_msm_probe,
340 .remove = __devexit_p(ehci_msm_remove),
341 .driver = {
342 .name = "msm_hsusb_host",
343 .pm = &ehci_msm_dev_pm_ops,
344 },
345};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index a22d2df769a9..fa59b26fc5bc 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,14 +36,8 @@ struct ehci_mxc_priv {
36static int ehci_mxc_setup(struct usb_hcd *hcd) 36static int ehci_mxc_setup(struct usb_hcd *hcd)
37{ 37{
38 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 38 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
39 struct device *dev = hcd->self.controller;
40 struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
41 int retval; 39 int retval;
42 40
43 /* EHCI registers start at offset 0x100 */
44 ehci->caps = hcd->regs + 0x100;
45 ehci->regs = hcd->regs + 0x100 +
46 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
47 dbg_hcs_params(ehci, "reset"); 41 dbg_hcs_params(ehci, "reset");
48 dbg_hcc_params(ehci, "reset"); 42 dbg_hcc_params(ehci, "reset");
49 43
@@ -65,12 +59,6 @@ static int ehci_mxc_setup(struct usb_hcd *hcd)
65 59
66 ehci_reset(ehci); 60 ehci_reset(ehci);
67 61
68 /* set up the PORTSCx register */
69 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
70
71 /* is this really needed? */
72 msleep(10);
73
74 ehci_port_power(ehci, 0); 62 ehci_port_power(ehci, 0);
75 return 0; 63 return 0;
76} 64}
@@ -100,6 +88,7 @@ static const struct hc_driver ehci_mxc_hc_driver = {
100 .urb_enqueue = ehci_urb_enqueue, 88 .urb_enqueue = ehci_urb_enqueue,
101 .urb_dequeue = ehci_urb_dequeue, 89 .urb_dequeue = ehci_urb_dequeue,
102 .endpoint_disable = ehci_endpoint_disable, 90 .endpoint_disable = ehci_endpoint_disable,
91 .endpoint_reset = ehci_endpoint_reset,
103 92
104 /* 93 /*
105 * scheduling support 94 * scheduling support
@@ -115,6 +104,8 @@ static const struct hc_driver ehci_mxc_hc_driver = {
115 .bus_resume = ehci_bus_resume, 104 .bus_resume = ehci_bus_resume,
116 .relinquish_port = ehci_relinquish_port, 105 .relinquish_port = ehci_relinquish_port,
117 .port_handed_over = ehci_port_handed_over, 106 .port_handed_over = ehci_port_handed_over,
107
108 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
118}; 109};
119 110
120static int ehci_mxc_drv_probe(struct platform_device *pdev) 111static int ehci_mxc_drv_probe(struct platform_device *pdev)
@@ -125,6 +116,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
125 int irq, ret; 116 int irq, ret;
126 struct ehci_mxc_priv *priv; 117 struct ehci_mxc_priv *priv;
127 struct device *dev = &pdev->dev; 118 struct device *dev = &pdev->dev;
119 struct ehci_hcd *ehci;
128 120
129 dev_info(&pdev->dev, "initializing i.MX USB Controller\n"); 121 dev_info(&pdev->dev, "initializing i.MX USB Controller\n");
130 122
@@ -212,6 +204,19 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
212 if (ret < 0) 204 if (ret < 0)
213 goto err_init; 205 goto err_init;
214 206
207 ehci = hcd_to_ehci(hcd);
208
209 /* EHCI registers start at offset 0x100 */
210 ehci->caps = hcd->regs + 0x100;
211 ehci->regs = hcd->regs + 0x100 +
212 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
213
214 /* set up the PORTSCx register */
215 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
216
217 /* is this really needed? */
218 msleep(10);
219
215 /* Initialize the transceiver */ 220 /* Initialize the transceiver */
216 if (pdata->otg) { 221 if (pdata->otg) {
217 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; 222 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 116ae280053a..680f2ef4e59f 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -1,11 +1,12 @@
1/* 1/*
2 * ehci-omap.c - driver for USBHOST on OMAP 34xx processor 2 * ehci-omap.c - driver for USBHOST on OMAP3/4 processors
3 * 3 *
4 * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller 4 * Bus Glue for the EHCI controllers in OMAP3/4
5 * Tested on OMAP3430 ES2.0 SDP 5 * Tested on several OMAP3 boards, and OMAP4 Pandaboard
6 * 6 *
7 * Copyright (C) 2007-2008 Texas Instruments, Inc. 7 * Copyright (C) 2007-2010 Texas Instruments, Inc.
8 * Author: Vikram Pandita <vikram.pandita@ti.com> 8 * Author: Vikram Pandita <vikram.pandita@ti.com>
9 * Author: Anand Gadiyar <gadiyar@ti.com>
9 * 10 *
10 * Copyright (C) 2009 Nokia Corporation 11 * Copyright (C) 2009 Nokia Corporation
11 * Contact: Felipe Balbi <felipe.balbi@nokia.com> 12 * Contact: Felipe Balbi <felipe.balbi@nokia.com>
@@ -26,11 +27,14 @@
26 * along with this program; if not, write to the Free Software 27 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * 29 *
29 * TODO (last updated Feb 12, 2010): 30 * TODO (last updated Nov 21, 2010):
30 * - add kernel-doc 31 * - add kernel-doc
31 * - enable AUTOIDLE 32 * - enable AUTOIDLE
32 * - add suspend/resume 33 * - add suspend/resume
33 * - move workarounds to board-files 34 * - move workarounds to board-files
35 * - factor out code common to OHCI
36 * - add HSIC and TLL support
37 * - convert to use hwmod and runtime PM
34 */ 38 */
35 39
36#include <linux/platform_device.h> 40#include <linux/platform_device.h>
@@ -86,9 +90,9 @@
86#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num) 90#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num)
87 91
88#define OMAP_TLL_CHANNEL_COUNT 3 92#define OMAP_TLL_CHANNEL_COUNT 3
89#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 1) 93#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 0)
90#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 2) 94#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 1)
91#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 4) 95#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 2)
92 96
93/* UHH Register Set */ 97/* UHH Register Set */
94#define OMAP_UHH_REVISION (0x00) 98#define OMAP_UHH_REVISION (0x00)
@@ -114,6 +118,23 @@
114#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9) 118#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
115#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10) 119#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
116 120
121/* OMAP4-specific defines */
122#define OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR (3 << 2)
123#define OMAP4_UHH_SYSCONFIG_NOIDLE (1 << 2)
124
125#define OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR (3 << 4)
126#define OMAP4_UHH_SYSCONFIG_NOSTDBY (1 << 4)
127#define OMAP4_UHH_SYSCONFIG_SOFTRESET (1 << 0)
128
129#define OMAP4_P1_MODE_CLEAR (3 << 16)
130#define OMAP4_P1_MODE_TLL (1 << 16)
131#define OMAP4_P1_MODE_HSIC (3 << 16)
132#define OMAP4_P2_MODE_CLEAR (3 << 18)
133#define OMAP4_P2_MODE_TLL (1 << 18)
134#define OMAP4_P2_MODE_HSIC (3 << 18)
135
136#define OMAP_REV2_TLL_CHANNEL_COUNT 2
137
117#define OMAP_UHH_DEBUG_CSR (0x44) 138#define OMAP_UHH_DEBUG_CSR (0x44)
118 139
119/* EHCI Register Set */ 140/* EHCI Register Set */
@@ -127,6 +148,17 @@
127#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 148#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
128#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 149#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
129 150
151/* Values of UHH_REVISION - Note: these are not given in the TRM */
152#define OMAP_EHCI_REV1 0x00000010 /* OMAP3 */
153#define OMAP_EHCI_REV2 0x50700100 /* OMAP4 */
154
155#define is_omap_ehci_rev1(x) (x->omap_ehci_rev == OMAP_EHCI_REV1)
156#define is_omap_ehci_rev2(x) (x->omap_ehci_rev == OMAP_EHCI_REV2)
157
158#define is_ehci_phy_mode(x) (x == EHCI_HCD_OMAP_MODE_PHY)
159#define is_ehci_tll_mode(x) (x == EHCI_HCD_OMAP_MODE_TLL)
160#define is_ehci_hsic_mode(x) (x == EHCI_HCD_OMAP_MODE_HSIC)
161
130/*-------------------------------------------------------------------------*/ 162/*-------------------------------------------------------------------------*/
131 163
132static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val) 164static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val)
@@ -156,10 +188,14 @@ struct ehci_hcd_omap {
156 struct device *dev; 188 struct device *dev;
157 189
158 struct clk *usbhost_ick; 190 struct clk *usbhost_ick;
159 struct clk *usbhost2_120m_fck; 191 struct clk *usbhost_hs_fck;
160 struct clk *usbhost1_48m_fck; 192 struct clk *usbhost_fs_fck;
161 struct clk *usbtll_fck; 193 struct clk *usbtll_fck;
162 struct clk *usbtll_ick; 194 struct clk *usbtll_ick;
195 struct clk *xclk60mhsp1_ck;
196 struct clk *xclk60mhsp2_ck;
197 struct clk *utmi_p1_fck;
198 struct clk *utmi_p2_fck;
163 199
164 /* FIXME the following two workarounds are 200 /* FIXME the following two workarounds are
165 * board specific not silicon-specific so these 201 * board specific not silicon-specific so these
@@ -176,6 +212,9 @@ struct ehci_hcd_omap {
176 /* phy reset workaround */ 212 /* phy reset workaround */
177 int phy_reset; 213 int phy_reset;
178 214
215 /* IP revision */
216 u32 omap_ehci_rev;
217
179 /* desired phy_mode: TLL, PHY */ 218 /* desired phy_mode: TLL, PHY */
180 enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS]; 219 enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS];
181 220
@@ -191,13 +230,14 @@ struct ehci_hcd_omap {
191 230
192/*-------------------------------------------------------------------------*/ 231/*-------------------------------------------------------------------------*/
193 232
194static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask) 233static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask,
234 u8 tll_channel_count)
195{ 235{
196 unsigned reg; 236 unsigned reg;
197 int i; 237 int i;
198 238
199 /* Program the 3 TLL channels upfront */ 239 /* Program the 3 TLL channels upfront */
200 for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) { 240 for (i = 0; i < tll_channel_count; i++) {
201 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i)); 241 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
202 242
203 /* Disable AutoIdle, BitStuffing and use SDR Mode */ 243 /* Disable AutoIdle, BitStuffing and use SDR Mode */
@@ -217,7 +257,7 @@ static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
217 ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg); 257 ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
218 258
219 /* Enable channels now */ 259 /* Enable channels now */
220 for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) { 260 for (i = 0; i < tll_channel_count; i++) {
221 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i)); 261 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
222 262
223 /* Enable only the reg that is needed */ 263 /* Enable only the reg that is needed */
@@ -286,19 +326,19 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
286 } 326 }
287 clk_enable(omap->usbhost_ick); 327 clk_enable(omap->usbhost_ick);
288 328
289 omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck"); 329 omap->usbhost_hs_fck = clk_get(omap->dev, "hs_fck");
290 if (IS_ERR(omap->usbhost2_120m_fck)) { 330 if (IS_ERR(omap->usbhost_hs_fck)) {
291 ret = PTR_ERR(omap->usbhost2_120m_fck); 331 ret = PTR_ERR(omap->usbhost_hs_fck);
292 goto err_host_120m_fck; 332 goto err_host_120m_fck;
293 } 333 }
294 clk_enable(omap->usbhost2_120m_fck); 334 clk_enable(omap->usbhost_hs_fck);
295 335
296 omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck"); 336 omap->usbhost_fs_fck = clk_get(omap->dev, "fs_fck");
297 if (IS_ERR(omap->usbhost1_48m_fck)) { 337 if (IS_ERR(omap->usbhost_fs_fck)) {
298 ret = PTR_ERR(omap->usbhost1_48m_fck); 338 ret = PTR_ERR(omap->usbhost_fs_fck);
299 goto err_host_48m_fck; 339 goto err_host_48m_fck;
300 } 340 }
301 clk_enable(omap->usbhost1_48m_fck); 341 clk_enable(omap->usbhost_fs_fck);
302 342
303 if (omap->phy_reset) { 343 if (omap->phy_reset) {
304 /* Refer: ISSUE1 */ 344 /* Refer: ISSUE1 */
@@ -333,6 +373,80 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
333 } 373 }
334 clk_enable(omap->usbtll_ick); 374 clk_enable(omap->usbtll_ick);
335 375
376 omap->omap_ehci_rev = ehci_omap_readl(omap->uhh_base,
377 OMAP_UHH_REVISION);
378 dev_dbg(omap->dev, "OMAP UHH_REVISION 0x%x\n",
379 omap->omap_ehci_rev);
380
381 /*
382 * Enable per-port clocks as needed (newer controllers only).
383 * - External ULPI clock for PHY mode
384 * - Internal clocks for TLL and HSIC modes (TODO)
385 */
386 if (is_omap_ehci_rev2(omap)) {
387 switch (omap->port_mode[0]) {
388 case EHCI_HCD_OMAP_MODE_PHY:
389 omap->xclk60mhsp1_ck = clk_get(omap->dev,
390 "xclk60mhsp1_ck");
391 if (IS_ERR(omap->xclk60mhsp1_ck)) {
392 ret = PTR_ERR(omap->xclk60mhsp1_ck);
393 dev_err(omap->dev,
394 "Unable to get Port1 ULPI clock\n");
395 }
396
397 omap->utmi_p1_fck = clk_get(omap->dev,
398 "utmi_p1_gfclk");
399 if (IS_ERR(omap->utmi_p1_fck)) {
400 ret = PTR_ERR(omap->utmi_p1_fck);
401 dev_err(omap->dev,
402 "Unable to get utmi_p1_fck\n");
403 }
404
405 ret = clk_set_parent(omap->utmi_p1_fck,
406 omap->xclk60mhsp1_ck);
407 if (ret != 0) {
408 dev_err(omap->dev,
409 "Unable to set P1 f-clock\n");
410 }
411 break;
412 case EHCI_HCD_OMAP_MODE_TLL:
413 /* TODO */
414 default:
415 break;
416 }
417 switch (omap->port_mode[1]) {
418 case EHCI_HCD_OMAP_MODE_PHY:
419 omap->xclk60mhsp2_ck = clk_get(omap->dev,
420 "xclk60mhsp2_ck");
421 if (IS_ERR(omap->xclk60mhsp2_ck)) {
422 ret = PTR_ERR(omap->xclk60mhsp2_ck);
423 dev_err(omap->dev,
424 "Unable to get Port2 ULPI clock\n");
425 }
426
427 omap->utmi_p2_fck = clk_get(omap->dev,
428 "utmi_p2_gfclk");
429 if (IS_ERR(omap->utmi_p2_fck)) {
430 ret = PTR_ERR(omap->utmi_p2_fck);
431 dev_err(omap->dev,
432 "Unable to get utmi_p2_fck\n");
433 }
434
435 ret = clk_set_parent(omap->utmi_p2_fck,
436 omap->xclk60mhsp2_ck);
437 if (ret != 0) {
438 dev_err(omap->dev,
439 "Unable to set P2 f-clock\n");
440 }
441 break;
442 case EHCI_HCD_OMAP_MODE_TLL:
443 /* TODO */
444 default:
445 break;
446 }
447 }
448
449
336 /* perform TLL soft reset, and wait until reset is complete */ 450 /* perform TLL soft reset, and wait until reset is complete */
337 ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, 451 ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
338 OMAP_USBTLL_SYSCONFIG_SOFTRESET); 452 OMAP_USBTLL_SYSCONFIG_SOFTRESET);
@@ -360,12 +474,20 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
360 474
361 /* Put UHH in NoIdle/NoStandby mode */ 475 /* Put UHH in NoIdle/NoStandby mode */
362 reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG); 476 reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
363 reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP 477 if (is_omap_ehci_rev1(omap)) {
364 | OMAP_UHH_SYSCONFIG_SIDLEMODE 478 reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
365 | OMAP_UHH_SYSCONFIG_CACTIVITY 479 | OMAP_UHH_SYSCONFIG_SIDLEMODE
366 | OMAP_UHH_SYSCONFIG_MIDLEMODE); 480 | OMAP_UHH_SYSCONFIG_CACTIVITY
367 reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE; 481 | OMAP_UHH_SYSCONFIG_MIDLEMODE);
482 reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
483
368 484
485 } else if (is_omap_ehci_rev2(omap)) {
486 reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
487 reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
488 reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
489 reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
490 }
369 ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg); 491 ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
370 492
371 reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG); 493 reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
@@ -376,40 +498,56 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
376 | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN); 498 | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
377 reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN; 499 reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
378 500
379 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN) 501 if (is_omap_ehci_rev1(omap)) {
380 reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS; 502 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
381 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN) 503 reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
382 reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS; 504 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
383 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN) 505 reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
384 reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; 506 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
385 507 reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
386 /* Bypass the TLL module for PHY mode operation */ 508
387 if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) { 509 /* Bypass the TLL module for PHY mode operation */
388 dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n"); 510 if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
389 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) || 511 dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
390 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) || 512 if (is_ehci_phy_mode(omap->port_mode[0]) ||
391 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)) 513 is_ehci_phy_mode(omap->port_mode[1]) ||
392 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; 514 is_ehci_phy_mode(omap->port_mode[2]))
393 else 515 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
394 reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; 516 else
395 } else { 517 reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
396 dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n"); 518 } else {
397 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) 519 dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
398 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; 520 if (is_ehci_phy_mode(omap->port_mode[0]))
399 else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) 521 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
400 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; 522 else if (is_ehci_tll_mode(omap->port_mode[0]))
401 523 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
402 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) 524
403 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; 525 if (is_ehci_phy_mode(omap->port_mode[1]))
404 else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) 526 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
405 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; 527 else if (is_ehci_tll_mode(omap->port_mode[1]))
406 528 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
407 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY) 529
408 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; 530 if (is_ehci_phy_mode(omap->port_mode[2]))
409 else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL) 531 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
410 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; 532 else if (is_ehci_tll_mode(omap->port_mode[2]))
533 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
534 }
535 } else if (is_omap_ehci_rev2(omap)) {
536 /* Clear port mode fields for PHY mode*/
537 reg &= ~OMAP4_P1_MODE_CLEAR;
538 reg &= ~OMAP4_P2_MODE_CLEAR;
539
540 if (is_ehci_tll_mode(omap->port_mode[0]))
541 reg |= OMAP4_P1_MODE_TLL;
542 else if (is_ehci_hsic_mode(omap->port_mode[0]))
543 reg |= OMAP4_P1_MODE_HSIC;
411 544
545 if (is_ehci_tll_mode(omap->port_mode[1]))
546 reg |= OMAP4_P2_MODE_TLL;
547 else if (is_ehci_hsic_mode(omap->port_mode[1]))
548 reg |= OMAP4_P2_MODE_HSIC;
412 } 549 }
550
413 ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg); 551 ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
414 dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg); 552 dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
415 553
@@ -438,7 +576,7 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
438 tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK; 576 tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK;
439 577
440 /* Enable UTMI mode for required TLL channels */ 578 /* Enable UTMI mode for required TLL channels */
441 omap_usb_utmi_init(omap, tll_ch_mask); 579 omap_usb_utmi_init(omap, tll_ch_mask, OMAP_TLL_CHANNEL_COUNT);
442 } 580 }
443 581
444 if (omap->phy_reset) { 582 if (omap->phy_reset) {
@@ -464,6 +602,14 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
464 return 0; 602 return 0;
465 603
466err_sys_status: 604err_sys_status:
605 clk_disable(omap->utmi_p2_fck);
606 clk_put(omap->utmi_p2_fck);
607 clk_disable(omap->xclk60mhsp2_ck);
608 clk_put(omap->xclk60mhsp2_ck);
609 clk_disable(omap->utmi_p1_fck);
610 clk_put(omap->utmi_p1_fck);
611 clk_disable(omap->xclk60mhsp1_ck);
612 clk_put(omap->xclk60mhsp1_ck);
467 clk_disable(omap->usbtll_ick); 613 clk_disable(omap->usbtll_ick);
468 clk_put(omap->usbtll_ick); 614 clk_put(omap->usbtll_ick);
469 615
@@ -472,8 +618,8 @@ err_tll_ick:
472 clk_put(omap->usbtll_fck); 618 clk_put(omap->usbtll_fck);
473 619
474err_tll_fck: 620err_tll_fck:
475 clk_disable(omap->usbhost1_48m_fck); 621 clk_disable(omap->usbhost_fs_fck);
476 clk_put(omap->usbhost1_48m_fck); 622 clk_put(omap->usbhost_fs_fck);
477 623
478 if (omap->phy_reset) { 624 if (omap->phy_reset) {
479 if (gpio_is_valid(omap->reset_gpio_port[0])) 625 if (gpio_is_valid(omap->reset_gpio_port[0]))
@@ -484,8 +630,8 @@ err_tll_fck:
484 } 630 }
485 631
486err_host_48m_fck: 632err_host_48m_fck:
487 clk_disable(omap->usbhost2_120m_fck); 633 clk_disable(omap->usbhost_hs_fck);
488 clk_put(omap->usbhost2_120m_fck); 634 clk_put(omap->usbhost_hs_fck);
489 635
490err_host_120m_fck: 636err_host_120m_fck:
491 clk_disable(omap->usbhost_ick); 637 clk_disable(omap->usbhost_ick);
@@ -503,6 +649,8 @@ static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
503 649
504 /* Reset OMAP modules for insmod/rmmod to work */ 650 /* Reset OMAP modules for insmod/rmmod to work */
505 ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, 651 ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
652 is_omap_ehci_rev2(omap) ?
653 OMAP4_UHH_SYSCONFIG_SOFTRESET :
506 OMAP_UHH_SYSCONFIG_SOFTRESET); 654 OMAP_UHH_SYSCONFIG_SOFTRESET);
507 while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) 655 while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
508 & (1 << 0))) { 656 & (1 << 0))) {
@@ -550,16 +698,16 @@ static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
550 omap->usbhost_ick = NULL; 698 omap->usbhost_ick = NULL;
551 } 699 }
552 700
553 if (omap->usbhost1_48m_fck != NULL) { 701 if (omap->usbhost_fs_fck != NULL) {
554 clk_disable(omap->usbhost1_48m_fck); 702 clk_disable(omap->usbhost_fs_fck);
555 clk_put(omap->usbhost1_48m_fck); 703 clk_put(omap->usbhost_fs_fck);
556 omap->usbhost1_48m_fck = NULL; 704 omap->usbhost_fs_fck = NULL;
557 } 705 }
558 706
559 if (omap->usbhost2_120m_fck != NULL) { 707 if (omap->usbhost_hs_fck != NULL) {
560 clk_disable(omap->usbhost2_120m_fck); 708 clk_disable(omap->usbhost_hs_fck);
561 clk_put(omap->usbhost2_120m_fck); 709 clk_put(omap->usbhost_hs_fck);
562 omap->usbhost2_120m_fck = NULL; 710 omap->usbhost_hs_fck = NULL;
563 } 711 }
564 712
565 if (omap->usbtll_ick != NULL) { 713 if (omap->usbtll_ick != NULL) {
@@ -568,6 +716,32 @@ static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
568 omap->usbtll_ick = NULL; 716 omap->usbtll_ick = NULL;
569 } 717 }
570 718
719 if (is_omap_ehci_rev2(omap)) {
720 if (omap->xclk60mhsp1_ck != NULL) {
721 clk_disable(omap->xclk60mhsp1_ck);
722 clk_put(omap->xclk60mhsp1_ck);
723 omap->xclk60mhsp1_ck = NULL;
724 }
725
726 if (omap->utmi_p1_fck != NULL) {
727 clk_disable(omap->utmi_p1_fck);
728 clk_put(omap->utmi_p1_fck);
729 omap->utmi_p1_fck = NULL;
730 }
731
732 if (omap->xclk60mhsp2_ck != NULL) {
733 clk_disable(omap->xclk60mhsp2_ck);
734 clk_put(omap->xclk60mhsp2_ck);
735 omap->xclk60mhsp2_ck = NULL;
736 }
737
738 if (omap->utmi_p2_fck != NULL) {
739 clk_disable(omap->utmi_p2_fck);
740 clk_put(omap->utmi_p2_fck);
741 omap->utmi_p2_fck = NULL;
742 }
743 }
744
571 if (omap->phy_reset) { 745 if (omap->phy_reset) {
572 if (gpio_is_valid(omap->reset_gpio_port[0])) 746 if (gpio_is_valid(omap->reset_gpio_port[0]))
573 gpio_free(omap->reset_gpio_port[0]); 747 gpio_free(omap->reset_gpio_port[0]);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 655f3c9f88bf..76179c39c0e3 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -22,6 +22,9 @@
22#error "This file is PCI bus glue. CONFIG_PCI must be defined." 22#error "This file is PCI bus glue. CONFIG_PCI must be defined."
23#endif 23#endif
24 24
25/* defined here to avoid adding to pci_ids.h for single instance use */
26#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
27
25/*-------------------------------------------------------------------------*/ 28/*-------------------------------------------------------------------------*/
26 29
27/* called after powerup, by probe or system-pm "wakeup" */ 30/* called after powerup, by probe or system-pm "wakeup" */
@@ -41,6 +44,35 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
41 return 0; 44 return 0;
42} 45}
43 46
47static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci)
48{
49 struct pci_dev *amd_smbus_dev;
50 u8 rev = 0;
51
52 amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
53 if (!amd_smbus_dev)
54 return 0;
55
56 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
57 if (rev < 0x40) {
58 pci_dev_put(amd_smbus_dev);
59 amd_smbus_dev = NULL;
60 return 0;
61 }
62
63 if (!amd_nb_dev)
64 amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
65 if (!amd_nb_dev)
66 ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
67
68 ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n");
69
70 pci_dev_put(amd_smbus_dev);
71 amd_smbus_dev = NULL;
72
73 return 1;
74}
75
44/* called during probe() after chip reset completes */ 76/* called during probe() after chip reset completes */
45static int ehci_pci_setup(struct usb_hcd *hcd) 77static int ehci_pci_setup(struct usb_hcd *hcd)
46{ 78{
@@ -99,6 +131,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
99 /* cache this readonly data; minimize chip reads */ 131 /* cache this readonly data; minimize chip reads */
100 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); 132 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
101 133
134 if (ehci_quirk_amd_SB800(ehci))
135 ehci->amd_l1_fix = 1;
136
102 retval = ehci_halt(ehci); 137 retval = ehci_halt(ehci);
103 if (retval) 138 if (retval)
104 return retval; 139 return retval;
@@ -137,6 +172,10 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
137 ehci_info(ehci, "disable lpm for langwell/penwell\n"); 172 ehci_info(ehci, "disable lpm for langwell/penwell\n");
138 ehci->has_lpm = 0; 173 ehci->has_lpm = 0;
139 } 174 }
175 if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) {
176 hcd->has_tt = 1;
177 tdi_reset(ehci);
178 }
140 break; 179 break;
141 case PCI_VENDOR_ID_TDI: 180 case PCI_VENDOR_ID_TDI:
142 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 181 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index d9f78eb26572..aa46f57f9ec8 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1590,6 +1590,63 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1590 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); 1590 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1591} 1591}
1592 1592
1593#define AB_REG_BAR_LOW 0xe0
1594#define AB_REG_BAR_HIGH 0xe1
1595#define AB_INDX(addr) ((addr) + 0x00)
1596#define AB_DATA(addr) ((addr) + 0x04)
1597#define NB_PCIE_INDX_ADDR 0xe0
1598#define NB_PCIE_INDX_DATA 0xe4
1599#define NB_PIF0_PWRDOWN_0 0x01100012
1600#define NB_PIF0_PWRDOWN_1 0x01100013
1601
1602static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
1603{
1604 u32 addr, addr_low, addr_high, val;
1605
1606 outb_p(AB_REG_BAR_LOW, 0xcd6);
1607 addr_low = inb_p(0xcd7);
1608 outb_p(AB_REG_BAR_HIGH, 0xcd6);
1609 addr_high = inb_p(0xcd7);
1610 addr = addr_high << 8 | addr_low;
1611 outl_p(0x30, AB_INDX(addr));
1612 outl_p(0x40, AB_DATA(addr));
1613 outl_p(0x34, AB_INDX(addr));
1614 val = inl_p(AB_DATA(addr));
1615
1616 if (disable) {
1617 val &= ~0x8;
1618 val |= (1 << 4) | (1 << 9);
1619 } else {
1620 val |= 0x8;
1621 val &= ~((1 << 4) | (1 << 9));
1622 }
1623 outl_p(val, AB_DATA(addr));
1624
1625 if (amd_nb_dev) {
1626 addr = NB_PIF0_PWRDOWN_0;
1627 pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
1628 pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
1629 if (disable)
1630 val &= ~(0x3f << 7);
1631 else
1632 val |= 0x3f << 7;
1633
1634 pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
1635
1636 addr = NB_PIF0_PWRDOWN_1;
1637 pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
1638 pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
1639 if (disable)
1640 val &= ~(0x3f << 7);
1641 else
1642 val |= 0x3f << 7;
1643
1644 pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
1645 }
1646
1647 return;
1648}
1649
1593/* fit urb's itds into the selected schedule slot; activate as needed */ 1650/* fit urb's itds into the selected schedule slot; activate as needed */
1594static int 1651static int
1595itd_link_urb ( 1652itd_link_urb (
@@ -1616,6 +1673,12 @@ itd_link_urb (
1616 urb->interval, 1673 urb->interval,
1617 next_uframe >> 3, next_uframe & 0x7); 1674 next_uframe >> 3, next_uframe & 0x7);
1618 } 1675 }
1676
1677 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1678 if (ehci->amd_l1_fix == 1)
1679 ehci_quirk_amd_L1(ehci, 1);
1680 }
1681
1619 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 1682 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1620 1683
1621 /* fill iTDs uframe by uframe */ 1684 /* fill iTDs uframe by uframe */
@@ -1740,6 +1803,11 @@ itd_complete (
1740 (void) disable_periodic(ehci); 1803 (void) disable_periodic(ehci);
1741 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 1804 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1742 1805
1806 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1807 if (ehci->amd_l1_fix == 1)
1808 ehci_quirk_amd_L1(ehci, 0);
1809 }
1810
1743 if (unlikely(list_is_singular(&stream->td_list))) { 1811 if (unlikely(list_is_singular(&stream->td_list))) {
1744 ehci_to_hcd(ehci)->self.bandwidth_allocated 1812 ehci_to_hcd(ehci)->self.bandwidth_allocated
1745 -= stream->bandwidth; 1813 -= stream->bandwidth;
@@ -2025,6 +2093,12 @@ sitd_link_urb (
2025 (next_uframe >> 3) & (ehci->periodic_size - 1), 2093 (next_uframe >> 3) & (ehci->periodic_size - 1),
2026 stream->interval, hc32_to_cpu(ehci, stream->splits)); 2094 stream->interval, hc32_to_cpu(ehci, stream->splits));
2027 } 2095 }
2096
2097 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2098 if (ehci->amd_l1_fix == 1)
2099 ehci_quirk_amd_L1(ehci, 1);
2100 }
2101
2028 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 2102 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2029 2103
2030 /* fill sITDs frame by frame */ 2104 /* fill sITDs frame by frame */
@@ -2125,6 +2199,11 @@ sitd_complete (
2125 (void) disable_periodic(ehci); 2199 (void) disable_periodic(ehci);
2126 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 2200 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2127 2201
2202 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2203 if (ehci->amd_l1_fix == 1)
2204 ehci_quirk_amd_L1(ehci, 0);
2205 }
2206
2128 if (list_is_singular(&stream->td_list)) { 2207 if (list_is_singular(&stream->td_list)) {
2129 ehci_to_hcd(ehci)->self.bandwidth_allocated 2208 ehci_to_hcd(ehci)->self.bandwidth_allocated
2130 -= stream->bandwidth; 2209 -= stream->bandwidth;
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
new file mode 100644
index 000000000000..595f70f42b52
--- /dev/null
+++ b/drivers/usb/host/ehci-sh.c
@@ -0,0 +1,243 @@
1/*
2 * SuperH EHCI host controller driver
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * Based on ohci-sh.c and ehci-atmel.c.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/platform_device.h>
13#include <linux/clk.h>
14
15struct ehci_sh_priv {
16 struct clk *iclk, *fclk;
17 struct usb_hcd *hcd;
18};
19
20static int ehci_sh_reset(struct usb_hcd *hcd)
21{
22 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
23 int ret;
24
25 ehci->caps = hcd->regs;
26 ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci,
27 &ehci->caps->hc_capbase));
28
29 dbg_hcs_params(ehci, "reset");
30 dbg_hcc_params(ehci, "reset");
31
32 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
33
34 ret = ehci_halt(ehci);
35 if (unlikely(ret))
36 return ret;
37
38 ret = ehci_init(hcd);
39 if (unlikely(ret))
40 return ret;
41
42 ehci->sbrn = 0x20;
43
44 ehci_reset(ehci);
45 ehci_port_power(ehci, 0);
46
47 return ret;
48}
49
50static const struct hc_driver ehci_sh_hc_driver = {
51 .description = hcd_name,
52 .product_desc = "SuperH EHCI",
53 .hcd_priv_size = sizeof(struct ehci_hcd),
54
55 /*
56 * generic hardware linkage
57 */
58 .irq = ehci_irq,
59 .flags = HCD_USB2 | HCD_MEMORY,
60
61 /*
62 * basic lifecycle operations
63 */
64 .reset = ehci_sh_reset,
65 .start = ehci_run,
66 .stop = ehci_stop,
67 .shutdown = ehci_shutdown,
68
69 /*
70 * managing i/o requests and associated device resources
71 */
72 .urb_enqueue = ehci_urb_enqueue,
73 .urb_dequeue = ehci_urb_dequeue,
74 .endpoint_disable = ehci_endpoint_disable,
75 .endpoint_reset = ehci_endpoint_reset,
76
77 /*
78 * scheduling support
79 */
80 .get_frame_number = ehci_get_frame,
81
82 /*
83 * root hub support
84 */
85 .hub_status_data = ehci_hub_status_data,
86 .hub_control = ehci_hub_control,
87
88#ifdef CONFIG_PM
89 .bus_suspend = ehci_bus_suspend,
90 .bus_resume = ehci_bus_resume,
91#endif
92
93 .relinquish_port = ehci_relinquish_port,
94 .port_handed_over = ehci_port_handed_over,
95 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
96};
97
98static int ehci_hcd_sh_probe(struct platform_device *pdev)
99{
100 const struct hc_driver *driver = &ehci_sh_hc_driver;
101 struct resource *res;
102 struct ehci_sh_priv *priv;
103 struct usb_hcd *hcd;
104 int irq, ret;
105
106 if (usb_disabled())
107 return -ENODEV;
108
109 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
110 if (!res) {
111 dev_err(&pdev->dev,
112 "Found HC with no register addr. Check %s setup!\n",
113 dev_name(&pdev->dev));
114 ret = -ENODEV;
115 goto fail_create_hcd;
116 }
117
118 irq = platform_get_irq(pdev, 0);
119 if (irq <= 0) {
120 dev_err(&pdev->dev,
121 "Found HC with no IRQ. Check %s setup!\n",
122 dev_name(&pdev->dev));
123 ret = -ENODEV;
124 goto fail_create_hcd;
125 }
126
127 /* initialize hcd */
128 hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
129 dev_name(&pdev->dev));
130 if (!hcd) {
131 ret = -ENOMEM;
132 goto fail_create_hcd;
133 }
134
135 hcd->rsrc_start = res->start;
136 hcd->rsrc_len = resource_size(res);
137
138 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
139 driver->description)) {
140 dev_dbg(&pdev->dev, "controller already in use\n");
141 ret = -EBUSY;
142 goto fail_request_resource;
143 }
144
145 hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
146 if (hcd->regs == NULL) {
147 dev_dbg(&pdev->dev, "error mapping memory\n");
148 ret = -ENXIO;
149 goto fail_ioremap;
150 }
151
152 priv = kmalloc(sizeof(struct ehci_sh_priv), GFP_KERNEL);
153 if (!priv) {
154 dev_dbg(&pdev->dev, "error allocating priv data\n");
155 ret = -ENOMEM;
156 goto fail_alloc;
157 }
158
159 /* These are optional, we don't care if they fail */
160 priv->fclk = clk_get(&pdev->dev, "usb_fck");
161 if (IS_ERR(priv->fclk))
162 priv->fclk = NULL;
163
164 priv->iclk = clk_get(&pdev->dev, "usb_ick");
165 if (IS_ERR(priv->iclk))
166 priv->iclk = NULL;
167
168 clk_enable(priv->fclk);
169 clk_enable(priv->iclk);
170
171 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
172 if (ret != 0) {
173 dev_err(&pdev->dev, "Failed to add hcd");
174 goto fail_add_hcd;
175 }
176
177 priv->hcd = hcd;
178 platform_set_drvdata(pdev, priv);
179
180 return ret;
181
182fail_add_hcd:
183 clk_disable(priv->iclk);
184 clk_disable(priv->fclk);
185
186 clk_put(priv->iclk);
187 clk_put(priv->fclk);
188
189 kfree(priv);
190fail_alloc:
191 iounmap(hcd->regs);
192fail_ioremap:
193 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
194fail_request_resource:
195 usb_put_hcd(hcd);
196fail_create_hcd:
197 dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
198
199 return ret;
200}
201
202static int __exit ehci_hcd_sh_remove(struct platform_device *pdev)
203{
204 struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
205 struct usb_hcd *hcd = priv->hcd;
206
207 usb_remove_hcd(hcd);
208 iounmap(hcd->regs);
209 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
210 usb_put_hcd(hcd);
211 platform_set_drvdata(pdev, NULL);
212
213 clk_disable(priv->fclk);
214 clk_disable(priv->iclk);
215
216 clk_put(priv->fclk);
217 clk_put(priv->iclk);
218
219 kfree(priv);
220
221 return 0;
222}
223
224static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
225{
226 struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
227 struct usb_hcd *hcd = priv->hcd;
228
229 if (hcd->driver->shutdown)
230 hcd->driver->shutdown(hcd);
231}
232
233static struct platform_driver ehci_hcd_sh_driver = {
234 .probe = ehci_hcd_sh_probe,
235 .remove = __exit_p(ehci_hcd_sh_remove),
236 .shutdown = ehci_hcd_sh_shutdown,
237 .driver = {
238 .name = "sh_ehci",
239 .owner = THIS_MODULE,
240 },
241};
242
243MODULE_ALIAS("platform:sh_ehci");
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
new file mode 100644
index 000000000000..75c00873443d
--- /dev/null
+++ b/drivers/usb/host/ehci-spear.c
@@ -0,0 +1,212 @@
1/*
2* Driver for EHCI HCD on SPEAR SOC
3*
4* Copyright (C) 2010 ST Micro Electronics,
5* Deepak Sikri <deepak.sikri@st.com>
6*
7* Based on various ehci-*.c drivers
8*
9* This file is subject to the terms and conditions of the GNU General Public
10* License. See the file COPYING in the main directory of this archive for
11* more details.
12*/
13
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16
17struct spear_ehci {
18 struct ehci_hcd ehci;
19 struct clk *clk;
20};
21
22#define to_spear_ehci(hcd) (struct spear_ehci *)hcd_to_ehci(hcd)
23
24static void spear_start_ehci(struct spear_ehci *ehci)
25{
26 clk_enable(ehci->clk);
27}
28
29static void spear_stop_ehci(struct spear_ehci *ehci)
30{
31 clk_disable(ehci->clk);
32}
33
34static int ehci_spear_setup(struct usb_hcd *hcd)
35{
36 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
37 int retval = 0;
38
39 /* registers start at offset 0x0 */
40 ehci->caps = hcd->regs;
41 ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci,
42 &ehci->caps->hc_capbase));
43 /* cache this readonly data; minimize chip reads */
44 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
45 retval = ehci_halt(ehci);
46 if (retval)
47 return retval;
48
49 retval = ehci_init(hcd);
50 if (retval)
51 return retval;
52
53 ehci_reset(ehci);
54 ehci_port_power(ehci, 0);
55
56 return retval;
57}
58
59static const struct hc_driver ehci_spear_hc_driver = {
60 .description = hcd_name,
61 .product_desc = "SPEAr EHCI",
62 .hcd_priv_size = sizeof(struct spear_ehci),
63
64 /* generic hardware linkage */
65 .irq = ehci_irq,
66 .flags = HCD_MEMORY | HCD_USB2,
67
68 /* basic lifecycle operations */
69 .reset = ehci_spear_setup,
70 .start = ehci_run,
71 .stop = ehci_stop,
72 .shutdown = ehci_shutdown,
73
74 /* managing i/o requests and associated device resources */
75 .urb_enqueue = ehci_urb_enqueue,
76 .urb_dequeue = ehci_urb_dequeue,
77 .endpoint_disable = ehci_endpoint_disable,
78 .endpoint_reset = ehci_endpoint_reset,
79
80 /* scheduling support */
81 .get_frame_number = ehci_get_frame,
82
83 /* root hub support */
84 .hub_status_data = ehci_hub_status_data,
85 .hub_control = ehci_hub_control,
86 .bus_suspend = ehci_bus_suspend,
87 .bus_resume = ehci_bus_resume,
88 .relinquish_port = ehci_relinquish_port,
89 .port_handed_over = ehci_port_handed_over,
90 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
91};
92
93static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
94{
95 struct usb_hcd *hcd ;
96 struct spear_ehci *ehci;
97 struct resource *res;
98 struct clk *usbh_clk;
99 const struct hc_driver *driver = &ehci_spear_hc_driver;
100 int *pdata = pdev->dev.platform_data;
101 int irq, retval;
102 char clk_name[20] = "usbh_clk";
103
104 if (pdata == NULL)
105 return -EFAULT;
106
107 if (usb_disabled())
108 return -ENODEV;
109
110 irq = platform_get_irq(pdev, 0);
111 if (irq < 0) {
112 retval = irq;
113 goto fail_irq_get;
114 }
115
116 if (*pdata >= 0)
117 sprintf(clk_name, "usbh.%01d_clk", *pdata);
118
119 usbh_clk = clk_get(NULL, clk_name);
120 if (IS_ERR(usbh_clk)) {
121 dev_err(&pdev->dev, "Error getting interface clock\n");
122 retval = PTR_ERR(usbh_clk);
123 goto fail_get_usbh_clk;
124 }
125
126 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
127 if (!hcd) {
128 retval = -ENOMEM;
129 goto fail_create_hcd;
130 }
131
132 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
133 if (!res) {
134 retval = -ENODEV;
135 goto fail_request_resource;
136 }
137
138 hcd->rsrc_start = res->start;
139 hcd->rsrc_len = resource_size(res);
140 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
141 driver->description)) {
142 retval = -EBUSY;
143 goto fail_request_resource;
144 }
145
146 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
147 if (hcd->regs == NULL) {
148 dev_dbg(&pdev->dev, "error mapping memory\n");
149 retval = -ENOMEM;
150 goto fail_ioremap;
151 }
152
153 ehci = (struct spear_ehci *)hcd_to_ehci(hcd);
154 ehci->clk = usbh_clk;
155
156 spear_start_ehci(ehci);
157 retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
158 if (retval)
159 goto fail_add_hcd;
160
161 return retval;
162
163fail_add_hcd:
164 spear_stop_ehci(ehci);
165 iounmap(hcd->regs);
166fail_ioremap:
167 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
168fail_request_resource:
169 usb_put_hcd(hcd);
170fail_create_hcd:
171 clk_put(usbh_clk);
172fail_get_usbh_clk:
173fail_irq_get:
174 dev_err(&pdev->dev, "init fail, %d\n", retval);
175
176 return retval ;
177}
178
179static int spear_ehci_hcd_drv_remove(struct platform_device *pdev)
180{
181 struct usb_hcd *hcd = platform_get_drvdata(pdev);
182 struct spear_ehci *ehci_p = to_spear_ehci(hcd);
183
184 if (!hcd)
185 return 0;
186 if (in_interrupt())
187 BUG();
188 usb_remove_hcd(hcd);
189
190 if (ehci_p->clk)
191 spear_stop_ehci(ehci_p);
192 iounmap(hcd->regs);
193 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
194 usb_put_hcd(hcd);
195
196 if (ehci_p->clk)
197 clk_put(ehci_p->clk);
198
199 return 0;
200}
201
202static struct platform_driver spear_ehci_hcd_driver = {
203 .probe = spear_ehci_hcd_drv_probe,
204 .remove = spear_ehci_hcd_drv_remove,
205 .shutdown = usb_hcd_platform_shutdown,
206 .driver = {
207 .name = "spear-ehci",
208 .bus = &platform_bus_type
209 }
210};
211
212MODULE_ALIAS("platform:spear-ehci");
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c
new file mode 100644
index 000000000000..20168062035a
--- /dev/null
+++ b/drivers/usb/host/ehci-vt8500.c
@@ -0,0 +1,172 @@
1/*
2 * drivers/usb/host/ehci-vt8500.c
3 *
4 * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
5 *
6 * Based on ehci-au1xxx.c
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/platform_device.h>
20
21static int ehci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
22{
23 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
24 int rc = 0;
25
26 if (!udev->parent) /* udev is root hub itself, impossible */
27 rc = -1;
28 /* we only support lpm device connected to root hub yet */
29 if (ehci->has_lpm && !udev->parent->parent) {
30 rc = ehci_lpm_set_da(ehci, udev->devnum, udev->portnum);
31 if (!rc)
32 rc = ehci_lpm_check(ehci, udev->portnum);
33 }
34 return rc;
35}
36
37static const struct hc_driver vt8500_ehci_hc_driver = {
38 .description = hcd_name,
39 .product_desc = "VT8500 EHCI",
40 .hcd_priv_size = sizeof(struct ehci_hcd),
41
42 /*
43 * generic hardware linkage
44 */
45 .irq = ehci_irq,
46 .flags = HCD_MEMORY | HCD_USB2,
47
48 /*
49 * basic lifecycle operations
50 */
51 .reset = ehci_init,
52 .start = ehci_run,
53 .stop = ehci_stop,
54 .shutdown = ehci_shutdown,
55
56 /*
57 * managing i/o requests and associated device resources
58 */
59 .urb_enqueue = ehci_urb_enqueue,
60 .urb_dequeue = ehci_urb_dequeue,
61 .endpoint_disable = ehci_endpoint_disable,
62 .endpoint_reset = ehci_endpoint_reset,
63
64 /*
65 * scheduling support
66 */
67 .get_frame_number = ehci_get_frame,
68
69 /*
70 * root hub support
71 */
72 .hub_status_data = ehci_hub_status_data,
73 .hub_control = ehci_hub_control,
74 .bus_suspend = ehci_bus_suspend,
75 .bus_resume = ehci_bus_resume,
76 .relinquish_port = ehci_relinquish_port,
77 .port_handed_over = ehci_port_handed_over,
78
79 /*
80 * call back when device connected and addressed
81 */
82 .update_device = ehci_update_device,
83
84 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
85};
86
87static int vt8500_ehci_drv_probe(struct platform_device *pdev)
88{
89 struct usb_hcd *hcd;
90 struct ehci_hcd *ehci;
91 struct resource *res;
92 int ret;
93
94 if (usb_disabled())
95 return -ENODEV;
96
97 if (pdev->resource[1].flags != IORESOURCE_IRQ) {
98 pr_debug("resource[1] is not IORESOURCE_IRQ");
99 return -ENOMEM;
100 }
101 hcd = usb_create_hcd(&vt8500_ehci_hc_driver, &pdev->dev, "VT8500");
102 if (!hcd)
103 return -ENOMEM;
104
105 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
106 hcd->rsrc_start = res->start;
107 hcd->rsrc_len = resource_size(res);
108
109 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
110 pr_debug("request_mem_region failed");
111 ret = -EBUSY;
112 goto err1;
113 }
114
115 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
116 if (!hcd->regs) {
117 pr_debug("ioremap failed");
118 ret = -ENOMEM;
119 goto err2;
120 }
121
122 ehci = hcd_to_ehci(hcd);
123 ehci->caps = hcd->regs;
124 ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase));
125
126 dbg_hcs_params(ehci, "reset");
127 dbg_hcc_params(ehci, "reset");
128
129 /* cache this readonly data; minimize chip reads */
130 ehci->hcs_params = readl(&ehci->caps->hcs_params);
131
132 ehci_port_power(ehci, 1);
133
134 ret = usb_add_hcd(hcd, pdev->resource[1].start,
135 IRQF_DISABLED | IRQF_SHARED);
136 if (ret == 0) {
137 platform_set_drvdata(pdev, hcd);
138 return ret;
139 }
140
141 iounmap(hcd->regs);
142err2:
143 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
144err1:
145 usb_put_hcd(hcd);
146 return ret;
147}
148
149static int vt8500_ehci_drv_remove(struct platform_device *pdev)
150{
151 struct usb_hcd *hcd = platform_get_drvdata(pdev);
152
153 usb_remove_hcd(hcd);
154 iounmap(hcd->regs);
155 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
156 usb_put_hcd(hcd);
157 platform_set_drvdata(pdev, NULL);
158
159 return 0;
160}
161
162static struct platform_driver vt8500_ehci_driver = {
163 .probe = vt8500_ehci_drv_probe,
164 .remove = vt8500_ehci_drv_remove,
165 .shutdown = usb_hcd_platform_shutdown,
166 .driver = {
167 .name = "vt8500-ehci",
168 .owner = THIS_MODULE,
169 }
170};
171
172MODULE_ALIAS("platform:vt8500-ehci");
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index cfa21ea20f82..6bc35809a5c6 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -130,6 +130,7 @@ static const struct hc_driver ehci_w90x900_hc_driver = {
130 .urb_enqueue = ehci_urb_enqueue, 130 .urb_enqueue = ehci_urb_enqueue,
131 .urb_dequeue = ehci_urb_dequeue, 131 .urb_dequeue = ehci_urb_dequeue,
132 .endpoint_disable = ehci_endpoint_disable, 132 .endpoint_disable = ehci_endpoint_disable,
133 .endpoint_reset = ehci_endpoint_reset,
133 134
134 /* 135 /*
135 * scheduling support 136 * scheduling support
@@ -147,6 +148,8 @@ static const struct hc_driver ehci_w90x900_hc_driver = {
147#endif 148#endif
148 .relinquish_port = ehci_relinquish_port, 149 .relinquish_port = ehci_relinquish_port,
149 .port_handed_over = ehci_port_handed_over, 150 .port_handed_over = ehci_port_handed_over,
151
152 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
150}; 153};
151 154
152static int __devinit ehci_w90x900_probe(struct platform_device *pdev) 155static int __devinit ehci_w90x900_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 6c8076ad821d..e8f4f36fdf0b 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -117,6 +117,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
117 .urb_enqueue = ehci_urb_enqueue, 117 .urb_enqueue = ehci_urb_enqueue,
118 .urb_dequeue = ehci_urb_dequeue, 118 .urb_dequeue = ehci_urb_dequeue,
119 .endpoint_disable = ehci_endpoint_disable, 119 .endpoint_disable = ehci_endpoint_disable,
120 .endpoint_reset = ehci_endpoint_reset,
120 121
121 /* 122 /*
122 * scheduling support 123 * scheduling support
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index ba8eab366b82..799ac16a54b4 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -131,6 +131,7 @@ struct ehci_hcd { /* one per controller */
131 unsigned has_amcc_usb23:1; 131 unsigned has_amcc_usb23:1;
132 unsigned need_io_watchdog:1; 132 unsigned need_io_watchdog:1;
133 unsigned broken_periodic:1; 133 unsigned broken_periodic:1;
134 unsigned amd_l1_fix:1;
134 unsigned fs_i_thresh:1; /* Intel iso scheduling */ 135 unsigned fs_i_thresh:1; /* Intel iso scheduling */
135 unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ 136 unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
136 137
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 5cb6731ba443..9751647665df 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1081,6 +1081,11 @@ MODULE_LICENSE ("GPL");
1081#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver 1081#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
1082#endif 1082#endif
1083 1083
1084#ifdef CONFIG_PLAT_SPEAR
1085#include "ohci-spear.c"
1086#define PLATFORM_DRIVER spear_ohci_hcd_driver
1087#endif
1088
1084#ifdef CONFIG_PPC_PS3 1089#ifdef CONFIG_PPC_PS3
1085#include "ohci-ps3.c" 1090#include "ohci-ps3.c"
1086#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver 1091#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 2cc8a504b18c..a37d5993e4e3 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -648,7 +648,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
648 648
649 ret = omap3_start_ohci(omap, hcd); 649 ret = omap3_start_ohci(omap, hcd);
650 if (ret) { 650 if (ret) {
651 dev_dbg(&pdev->dev, "failed to start ehci\n"); 651 dev_dbg(&pdev->dev, "failed to start ohci\n");
652 goto err_start; 652 goto err_start;
653 } 653 }
654 654
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index 0b35d22cc70e..f47867ff78c7 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -109,7 +109,7 @@ static int ohci_hcd_sh_probe(struct platform_device *pdev)
109 hcd->regs = (void __iomem *)res->start; 109 hcd->regs = (void __iomem *)res->start;
110 hcd->rsrc_start = res->start; 110 hcd->rsrc_start = res->start;
111 hcd->rsrc_len = resource_size(res); 111 hcd->rsrc_len = resource_size(res);
112 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); 112 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
113 if (ret != 0) { 113 if (ret != 0) {
114 err("Failed to add hcd"); 114 err("Failed to add hcd");
115 usb_put_hcd(hcd); 115 usb_put_hcd(hcd);
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
new file mode 100644
index 000000000000..4fd4bea9ac7a
--- /dev/null
+++ b/drivers/usb/host/ohci-spear.c
@@ -0,0 +1,240 @@
1/*
2* OHCI HCD (Host Controller Driver) for USB.
3*
4* Copyright (C) 2010 ST Microelectronics.
5* Deepak Sikri<deepak.sikri@st.com>
6*
7* Based on various ohci-*.c drivers
8*
9* This file is licensed under the terms of the GNU General Public
10* License version 2. This program is licensed "as is" without any
11* warranty of any kind, whether express or implied.
12*/
13
14#include <linux/signal.h>
15#include <linux/platform_device.h>
16#include <linux/clk.h>
17
18struct spear_ohci {
19 struct ohci_hcd ohci;
20 struct clk *clk;
21};
22
23#define to_spear_ohci(hcd) (struct spear_ohci *)hcd_to_ohci(hcd)
24
25static void spear_start_ohci(struct spear_ohci *ohci)
26{
27 clk_enable(ohci->clk);
28}
29
30static void spear_stop_ohci(struct spear_ohci *ohci)
31{
32 clk_disable(ohci->clk);
33}
34
35static int __devinit ohci_spear_start(struct usb_hcd *hcd)
36{
37 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
38 int ret;
39
40 ret = ohci_init(ohci);
41 if (ret < 0)
42 return ret;
43 ohci->regs = hcd->regs;
44
45 ret = ohci_run(ohci);
46 if (ret < 0) {
47 dev_err(hcd->self.controller, "can't start\n");
48 ohci_stop(hcd);
49 return ret;
50 }
51
52 create_debug_files(ohci);
53
54#ifdef DEBUG
55 ohci_dump(ohci, 1);
56#endif
57 return 0;
58}
59
60static const struct hc_driver ohci_spear_hc_driver = {
61 .description = hcd_name,
62 .product_desc = "SPEAr OHCI",
63 .hcd_priv_size = sizeof(struct spear_ohci),
64
65 /* generic hardware linkage */
66 .irq = ohci_irq,
67 .flags = HCD_USB11 | HCD_MEMORY,
68
69 /* basic lifecycle operations */
70 .start = ohci_spear_start,
71 .stop = ohci_stop,
72 .shutdown = ohci_shutdown,
73#ifdef CONFIG_PM
74 .bus_suspend = ohci_bus_suspend,
75 .bus_resume = ohci_bus_resume,
76#endif
77
78 /* managing i/o requests and associated device resources */
79 .urb_enqueue = ohci_urb_enqueue,
80 .urb_dequeue = ohci_urb_dequeue,
81 .endpoint_disable = ohci_endpoint_disable,
82
83 /* scheduling support */
84 .get_frame_number = ohci_get_frame,
85
86 /* root hub support */
87 .hub_status_data = ohci_hub_status_data,
88 .hub_control = ohci_hub_control,
89
90 .start_port_reset = ohci_start_port_reset,
91};
92
93static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
94{
95 const struct hc_driver *driver = &ohci_spear_hc_driver;
96 struct usb_hcd *hcd = NULL;
97 struct clk *usbh_clk;
98 struct spear_ohci *ohci_p;
99 struct resource *res;
100 int retval, irq;
101 int *pdata = pdev->dev.platform_data;
102 char clk_name[20] = "usbh_clk";
103
104 if (pdata == NULL)
105 return -EFAULT;
106
107 irq = platform_get_irq(pdev, 0);
108 if (irq < 0) {
109 retval = irq;
110 goto fail_irq_get;
111 }
112
113 if (*pdata >= 0)
114 sprintf(clk_name, "usbh.%01d_clk", *pdata);
115
116 usbh_clk = clk_get(NULL, clk_name);
117 if (IS_ERR(usbh_clk)) {
118 dev_err(&pdev->dev, "Error getting interface clock\n");
119 retval = PTR_ERR(usbh_clk);
120 goto fail_get_usbh_clk;
121 }
122
123 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
124 if (!hcd) {
125 retval = -ENOMEM;
126 goto fail_create_hcd;
127 }
128
129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
130 if (!res) {
131 retval = -ENODEV;
132 goto fail_request_resource;
133 }
134
135 hcd->rsrc_start = pdev->resource[0].start;
136 hcd->rsrc_len = resource_size(res);
137 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
138 dev_dbg(&pdev->dev, "request_mem_region failed\n");
139 retval = -EBUSY;
140 goto fail_request_resource;
141 }
142
143 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
144 if (!hcd->regs) {
145 dev_dbg(&pdev->dev, "ioremap failed\n");
146 retval = -ENOMEM;
147 goto fail_ioremap;
148 }
149
150 ohci_p = (struct spear_ohci *)hcd_to_ohci(hcd);
151 ohci_p->clk = usbh_clk;
152 spear_start_ohci(ohci_p);
153 ohci_hcd_init(hcd_to_ohci(hcd));
154
155 retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), IRQF_DISABLED);
156 if (retval == 0)
157 return retval;
158
159 spear_stop_ohci(ohci_p);
160 iounmap(hcd->regs);
161fail_ioremap:
162 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
163fail_request_resource:
164 usb_put_hcd(hcd);
165fail_create_hcd:
166 clk_put(usbh_clk);
167fail_get_usbh_clk:
168fail_irq_get:
169 dev_err(&pdev->dev, "init fail, %d\n", retval);
170
171 return retval;
172}
173
174static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
175{
176 struct usb_hcd *hcd = platform_get_drvdata(pdev);
177 struct spear_ohci *ohci_p = to_spear_ohci(hcd);
178
179 usb_remove_hcd(hcd);
180 if (ohci_p->clk)
181 spear_stop_ohci(ohci_p);
182
183 iounmap(hcd->regs);
184 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
185 usb_put_hcd(hcd);
186
187 if (ohci_p->clk)
188 clk_put(ohci_p->clk);
189 platform_set_drvdata(pdev, NULL);
190 return 0;
191}
192
193#if defined(CONFIG_PM)
194static int spear_ohci_hcd_drv_suspend(struct platform_device *dev,
195 pm_message_t message)
196{
197 struct usb_hcd *hcd = platform_get_drvdata(dev);
198 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
199 struct spear_ohci *ohci_p = to_spear_ohci(hcd);
200
201 if (time_before(jiffies, ohci->next_statechange))
202 msleep(5);
203 ohci->next_statechange = jiffies;
204
205 spear_stop_ohci(ohci_p);
206 ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
207 return 0;
208}
209
210static int spear_ohci_hcd_drv_resume(struct platform_device *dev)
211{
212 struct usb_hcd *hcd = platform_get_drvdata(dev);
213 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
214 struct spear_ohci *ohci_p = to_spear_ohci(hcd);
215
216 if (time_before(jiffies, ohci->next_statechange))
217 msleep(5);
218 ohci->next_statechange = jiffies;
219
220 spear_start_ohci(ohci_p);
221 ohci_finish_controller_resume(hcd);
222 return 0;
223}
224#endif
225
226/* Driver definition to register with the platform bus */
227static struct platform_driver spear_ohci_hcd_driver = {
228 .probe = spear_ohci_hcd_drv_probe,
229 .remove = spear_ohci_hcd_drv_remove,
230#ifdef CONFIG_PM
231 .suspend = spear_ohci_hcd_drv_suspend,
232 .resume = spear_ohci_hcd_drv_resume,
233#endif
234 .driver = {
235 .owner = THIS_MODULE,
236 .name = "spear-ohci",
237 },
238};
239
240MODULE_ALIAS("platform:spear-ohci");
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index f52d04db28f4..cee867829ec9 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -569,7 +569,7 @@ static int uhci_init(struct usb_hcd *hcd)
569 */ 569 */
570static void uhci_shutdown(struct pci_dev *pdev) 570static void uhci_shutdown(struct pci_dev *pdev)
571{ 571{
572 struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev); 572 struct usb_hcd *hcd = pci_get_drvdata(pdev);
573 573
574 uhci_hc_died(hcd_to_uhci(hcd)); 574 uhci_hc_died(hcd_to_uhci(hcd));
575} 575}
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 2090b45eb606..af77abb5c68b 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -29,7 +29,7 @@ static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
29{ 29{
30 if (uhci->is_stopped) 30 if (uhci->is_stopped)
31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); 31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 32 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
33} 33}
34 34
35static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) 35static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
@@ -195,7 +195,9 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
195 } else { 195 } else {
196 struct uhci_td *ntd; 196 struct uhci_td *ntd;
197 197
198 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); 198 ntd = list_entry(td->fl_list.next,
199 struct uhci_td,
200 fl_list);
199 uhci->frame[td->frame] = LINK_TO_TD(ntd); 201 uhci->frame[td->frame] = LINK_TO_TD(ntd);
200 uhci->frame_cpu[td->frame] = ntd; 202 uhci->frame_cpu[td->frame] = ntd;
201 } 203 }
@@ -728,7 +730,7 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
728 730
729 urbp->urb = urb; 731 urbp->urb = urb;
730 urb->hcpriv = urbp; 732 urb->hcpriv = urbp;
731 733
732 INIT_LIST_HEAD(&urbp->node); 734 INIT_LIST_HEAD(&urbp->node);
733 INIT_LIST_HEAD(&urbp->td_list); 735 INIT_LIST_HEAD(&urbp->td_list);
734 736
@@ -846,7 +848,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
846 848
847 /* Alternate Data0/1 (start with Data1) */ 849 /* Alternate Data0/1 (start with Data1) */
848 destination ^= TD_TOKEN_TOGGLE; 850 destination ^= TD_TOKEN_TOGGLE;
849 851
850 uhci_add_td_to_urbp(td, urbp); 852 uhci_add_td_to_urbp(td, urbp);
851 uhci_fill_td(td, status, destination | uhci_explen(pktsze), 853 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
852 data); 854 data);
@@ -857,7 +859,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
857 } 859 }
858 860
859 /* 861 /*
860 * Build the final TD for control status 862 * Build the final TD for control status
861 */ 863 */
862 td = uhci_alloc_td(uhci); 864 td = uhci_alloc_td(uhci);
863 if (!td) 865 if (!td)
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 72b6892fda67..9546f6cd01f0 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -356,7 +356,7 @@ static void __exit whci_hc_driver_exit(void)
356module_exit(whci_hc_driver_exit); 356module_exit(whci_hc_driver_exit);
357 357
358/* PCI device ID's that we handle (so it gets loaded) */ 358/* PCI device ID's that we handle (so it gets loaded) */
359static struct pci_device_id whci_hcd_id_table[] = { 359static struct pci_device_id __used whci_hcd_id_table[] = {
360 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, 360 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
361 { /* empty last entry */ } 361 { /* empty last entry */ }
362}; 362};
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index c96f51de1696..1732d9bc097e 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * USB LED driver - 1.1 2 * USB LED driver
3 * 3 *
4 * Copyright (C) 2004 Greg Kroah-Hartman (greg@kroah.com) 4 * Copyright (C) 2004 Greg Kroah-Hartman (greg@kroah.com)
5 * 5 *
@@ -20,12 +20,17 @@
20#define DRIVER_AUTHOR "Greg Kroah-Hartman, greg@kroah.com" 20#define DRIVER_AUTHOR "Greg Kroah-Hartman, greg@kroah.com"
21#define DRIVER_DESC "USB LED Driver" 21#define DRIVER_DESC "USB LED Driver"
22 22
23#define VENDOR_ID 0x0fc5 23enum led_type {
24#define PRODUCT_ID 0x1223 24 DELCOM_VISUAL_SIGNAL_INDICATOR,
25 DREAM_CHEEKY_WEBMAIL_NOTIFIER,
26};
25 27
26/* table of devices that work with this driver */ 28/* table of devices that work with this driver */
27static const struct usb_device_id id_table[] = { 29static const struct usb_device_id id_table[] = {
28 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) }, 30 { USB_DEVICE(0x0fc5, 0x1223),
31 .driver_info = DELCOM_VISUAL_SIGNAL_INDICATOR },
32 { USB_DEVICE(0x1d34, 0x0004),
33 .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
29 { }, 34 { },
30}; 35};
31MODULE_DEVICE_TABLE (usb, id_table); 36MODULE_DEVICE_TABLE (usb, id_table);
@@ -35,15 +40,12 @@ struct usb_led {
35 unsigned char blue; 40 unsigned char blue;
36 unsigned char red; 41 unsigned char red;
37 unsigned char green; 42 unsigned char green;
43 enum led_type type;
38}; 44};
39 45
40#define BLUE 0x04
41#define RED 0x02
42#define GREEN 0x01
43static void change_color(struct usb_led *led) 46static void change_color(struct usb_led *led)
44{ 47{
45 int retval; 48 int retval;
46 unsigned char color = 0x07;
47 unsigned char *buffer; 49 unsigned char *buffer;
48 50
49 buffer = kmalloc(8, GFP_KERNEL); 51 buffer = kmalloc(8, GFP_KERNEL);
@@ -52,25 +54,59 @@ static void change_color(struct usb_led *led)
52 return; 54 return;
53 } 55 }
54 56
55 if (led->blue) 57 switch (led->type) {
56 color &= ~(BLUE); 58 case DELCOM_VISUAL_SIGNAL_INDICATOR: {
57 if (led->red) 59 unsigned char color = 0x07;
58 color &= ~(RED); 60
59 if (led->green) 61 if (led->blue)
60 color &= ~(GREEN); 62 color &= ~0x04;
61 dev_dbg(&led->udev->dev, 63 if (led->red)
62 "blue = %d, red = %d, green = %d, color = %.2x\n", 64 color &= ~0x02;
63 led->blue, led->red, led->green, color); 65 if (led->green)
64 66 color &= ~0x01;
65 retval = usb_control_msg(led->udev, 67 dev_dbg(&led->udev->dev,
66 usb_sndctrlpipe(led->udev, 0), 68 "blue = %d, red = %d, green = %d, color = %.2x\n",
67 0x12, 69 led->blue, led->red, led->green, color);
68 0xc8, 70
69 (0x02 * 0x100) + 0x0a, 71 retval = usb_control_msg(led->udev,
70 (0x00 * 0x100) + color, 72 usb_sndctrlpipe(led->udev, 0),
71 buffer, 73 0x12,
72 8, 74 0xc8,
73 2000); 75 (0x02 * 0x100) + 0x0a,
76 (0x00 * 0x100) + color,
77 buffer,
78 8,
79 2000);
80 break;
81 }
82
83 case DREAM_CHEEKY_WEBMAIL_NOTIFIER:
84 dev_dbg(&led->udev->dev,
85 "red = %d, green = %d, blue = %d\n",
86 led->red, led->green, led->blue);
87
88 buffer[0] = led->red;
89 buffer[1] = led->green;
90 buffer[2] = led->blue;
91 buffer[3] = buffer[4] = buffer[5] = 0;
92 buffer[6] = 0x1a;
93 buffer[7] = 0x05;
94
95 retval = usb_control_msg(led->udev,
96 usb_sndctrlpipe(led->udev, 0),
97 0x09,
98 0x21,
99 0x200,
100 0,
101 buffer,
102 8,
103 2000);
104 break;
105
106 default:
107 dev_err(&led->udev->dev, "unknown device type %d\n", led->type);
108 }
109
74 if (retval) 110 if (retval)
75 dev_dbg(&led->udev->dev, "retval = %d\n", retval); 111 dev_dbg(&led->udev->dev, "retval = %d\n", retval);
76 kfree(buffer); 112 kfree(buffer);
@@ -107,11 +143,12 @@ static int led_probe(struct usb_interface *interface, const struct usb_device_id
107 143
108 dev = kzalloc(sizeof(struct usb_led), GFP_KERNEL); 144 dev = kzalloc(sizeof(struct usb_led), GFP_KERNEL);
109 if (dev == NULL) { 145 if (dev == NULL) {
110 dev_err(&interface->dev, "Out of memory\n"); 146 dev_err(&interface->dev, "out of memory\n");
111 goto error_mem; 147 goto error_mem;
112 } 148 }
113 149
114 dev->udev = usb_get_dev(udev); 150 dev->udev = usb_get_dev(udev);
151 dev->type = id->driver_info;
115 152
116 usb_set_intfdata (interface, dev); 153 usb_set_intfdata (interface, dev);
117 154
@@ -125,6 +162,31 @@ static int led_probe(struct usb_interface *interface, const struct usb_device_id
125 if (retval) 162 if (retval)
126 goto error; 163 goto error;
127 164
165 if (dev->type == DREAM_CHEEKY_WEBMAIL_NOTIFIER) {
166 unsigned char *enable;
167
168 enable = kmemdup("\x1f\x02\0\x5f\0\0\x1a\x03", 8, GFP_KERNEL);
169 if (!enable) {
170 dev_err(&interface->dev, "out of memory\n");
171 retval = -ENOMEM;
172 goto error;
173 }
174
175 retval = usb_control_msg(udev,
176 usb_sndctrlpipe(udev, 0),
177 0x09,
178 0x21,
179 0x200,
180 0,
181 enable,
182 8,
183 2000);
184
185 kfree(enable);
186 if (retval != 8)
187 goto error;
188 }
189
128 dev_info(&interface->dev, "USB LED device now attached\n"); 190 dev_info(&interface->dev, "USB LED device now attached\n");
129 return 0; 191 return 0;
130 192
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index c436e1e2c3b6..a09dbd243eb3 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -436,6 +436,28 @@ static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
436 return length; 436 return length;
437} 437}
438 438
439/*
440 * This is the look-ahead pass in case of 'C Zi', when actual_length cannot
441 * be used to determine the length of the whole contiguous buffer.
442 */
443static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp,
444 struct urb *urb, unsigned int ndesc)
445{
446 struct usb_iso_packet_descriptor *fp;
447 unsigned int length;
448
449 length = 0;
450 fp = urb->iso_frame_desc;
451 while (ndesc-- != 0) {
452 if (fp->actual_length != 0) {
453 if (fp->offset + fp->actual_length > length)
454 length = fp->offset + fp->actual_length;
455 }
456 fp++;
457 }
458 return length;
459}
460
439static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, 461static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
440 unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc) 462 unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc)
441{ 463{
@@ -478,6 +500,10 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
478 /* 500 /*
479 * Find the maximum allowable length, then allocate space. 501 * Find the maximum allowable length, then allocate space.
480 */ 502 */
503 urb_length = (ev_type == 'S') ?
504 urb->transfer_buffer_length : urb->actual_length;
505 length = urb_length;
506
481 if (usb_endpoint_xfer_isoc(epd)) { 507 if (usb_endpoint_xfer_isoc(epd)) {
482 if (urb->number_of_packets < 0) { 508 if (urb->number_of_packets < 0) {
483 ndesc = 0; 509 ndesc = 0;
@@ -486,14 +512,16 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
486 } else { 512 } else {
487 ndesc = urb->number_of_packets; 513 ndesc = urb->number_of_packets;
488 } 514 }
515 if (ev_type == 'C' && usb_urb_dir_in(urb))
516 length = mon_bin_collate_isodesc(rp, urb, ndesc);
489 } else { 517 } else {
490 ndesc = 0; 518 ndesc = 0;
491 } 519 }
492 lendesc = ndesc*sizeof(struct mon_bin_isodesc); 520 lendesc = ndesc*sizeof(struct mon_bin_isodesc);
493 521
494 urb_length = (ev_type == 'S') ? 522 /* not an issue unless there's a subtle bug in a HCD somewhere */
495 urb->transfer_buffer_length : urb->actual_length; 523 if (length >= urb->transfer_buffer_length)
496 length = urb_length; 524 length = urb->transfer_buffer_length;
497 525
498 if (length >= rp->b_size/5) 526 if (length >= rp->b_size/5)
499 length = rp->b_size/5; 527 length = rp->b_size/5;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 341a37a469bd..4cbb7e4b368d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -12,6 +12,7 @@ config USB_MUSB_HDRC
12 depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523)) 12 depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
13 select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) 13 select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
14 select TWL4030_USB if MACH_OMAP_3430SDP 14 select TWL4030_USB if MACH_OMAP_3430SDP
15 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
15 select USB_OTG_UTILS 16 select USB_OTG_UTILS
16 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 17 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
17 help 18 help
@@ -30,57 +31,41 @@ config USB_MUSB_HDRC
30 If you do not know what this is, please say N. 31 If you do not know what this is, please say N.
31 32
32 To compile this driver as a module, choose M here; the 33 To compile this driver as a module, choose M here; the
33 module will be called "musb_hdrc". 34 module will be called "musb-hdrc".
34 35
35config USB_MUSB_SOC 36choice
36 boolean 37 prompt "Platform Glue Layer"
37 depends on USB_MUSB_HDRC 38 depends on USB_MUSB_HDRC
38 default y if ARCH_DAVINCI
39 default y if ARCH_OMAP2430
40 default y if ARCH_OMAP3
41 default y if ARCH_OMAP4
42 default y if (BF54x && !BF544)
43 default y if (BF52x && !BF522 && !BF523)
44 39
45comment "DaVinci 35x and 644x USB support" 40config USB_MUSB_DAVINCI
46 depends on USB_MUSB_HDRC && ARCH_DAVINCI_DMx 41 bool "DaVinci"
42 depends on ARCH_DAVINCI_DMx
47 43
48comment "DA8xx/OMAP-L1x USB support" 44config USB_MUSB_DA8XX
49 depends on USB_MUSB_HDRC && ARCH_DAVINCI_DA8XX 45 bool "DA8xx/OMAP-L1x"
46 depends on ARCH_DAVINCI_DA8XX
50 47
51comment "OMAP 243x high speed USB support" 48config USB_MUSB_TUSB6010
52 depends on USB_MUSB_HDRC && ARCH_OMAP2430 49 bool "TUSB6010"
50 depends on ARCH_OMAP
53 51
54comment "OMAP 343x high speed USB support" 52config USB_MUSB_OMAP2PLUS
55 depends on USB_MUSB_HDRC && ARCH_OMAP3 53 bool "OMAP2430 and onwards"
54 depends on ARCH_OMAP2PLUS
56 55
57comment "OMAP 44xx high speed USB support" 56config USB_MUSB_AM35X
58 depends on USB_MUSB_HDRC && ARCH_OMAP4 57 bool "AM35x"
58 depends on ARCH_OMAP
59 59
60comment "Blackfin high speed USB Support" 60config USB_MUSB_BLACKFIN
61 depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523)) 61 bool "Blackfin"
62 depends on (BF54x && !BF544) || (BF52x && ! BF522 && !BF523)
62 63
63config USB_MUSB_AM35X 64config USB_MUSB_UX500
64 bool 65 bool "U8500 and U5500"
65 depends on USB_MUSB_HDRC && !ARCH_OMAP2430 && !ARCH_OMAP4 66 depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500)
66 select NOP_USB_XCEIV 67
67 default MACH_OMAP3517EVM 68endchoice
68 help
69 Select this option if your platform is based on AM35x. As
70 AM35x has an updated MUSB with CPPI4.1 DMA so this config
71 is introduced to differentiate musb ip between OMAP3x and
72 AM35x platforms.
73
74config USB_TUSB6010
75 boolean "TUSB 6010 support"
76 depends on USB_MUSB_HDRC && !USB_MUSB_SOC
77 select NOP_USB_XCEIV
78 default y
79 help
80 The TUSB 6010 chip, from Texas Instruments, connects a discrete
81 HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
82 (a high speed serial link). It can use system-specific external
83 DMA controllers.
84 69
85choice 70choice
86 prompt "Driver Mode" 71 prompt "Driver Mode"
@@ -158,7 +143,7 @@ config USB_MUSB_HDRC_HCD
158config MUSB_PIO_ONLY 143config MUSB_PIO_ONLY
159 bool 'Disable DMA (always use PIO)' 144 bool 'Disable DMA (always use PIO)'
160 depends on USB_MUSB_HDRC 145 depends on USB_MUSB_HDRC
161 default USB_TUSB6010 || ARCH_DAVINCI_DA8XX || USB_MUSB_AM35X 146 default USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
162 help 147 help
163 All data is copied between memory and FIFO by the CPU. 148 All data is copied between memory and FIFO by the CPU.
164 DMA controllers are ignored. 149 DMA controllers are ignored.
@@ -171,21 +156,21 @@ config MUSB_PIO_ONLY
171config USB_INVENTRA_DMA 156config USB_INVENTRA_DMA
172 bool 157 bool
173 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY 158 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
174 default ARCH_OMAP2430 || ARCH_OMAP3 || BLACKFIN || ARCH_OMAP4 159 default USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
175 help 160 help
176 Enable DMA transfers using Mentor's engine. 161 Enable DMA transfers using Mentor's engine.
177 162
178config USB_TI_CPPI_DMA 163config USB_TI_CPPI_DMA
179 bool 164 bool
180 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY 165 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
181 default ARCH_DAVINCI 166 default USB_MUSB_DAVINCI
182 help 167 help
183 Enable DMA transfers when TI CPPI DMA is available. 168 Enable DMA transfers when TI CPPI DMA is available.
184 169
185config USB_TUSB_OMAP_DMA 170config USB_TUSB_OMAP_DMA
186 bool 171 bool
187 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY 172 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
188 depends on USB_TUSB6010 173 depends on USB_MUSB_TUSB6010
189 depends on ARCH_OMAP 174 depends on ARCH_OMAP
190 default y 175 default y
191 help 176 help
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index ce164e8998d8..74df5284894f 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -8,22 +8,19 @@ obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
8 8
9musb_hdrc-y := musb_core.o 9musb_hdrc-y := musb_core.o
10 10
11musb_hdrc-$(CONFIG_ARCH_DAVINCI_DMx) += davinci.o
12musb_hdrc-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx.o
13musb_hdrc-$(CONFIG_USB_TUSB6010) += tusb6010.o
14musb_hdrc-$(CONFIG_ARCH_OMAP2430) += omap2430.o
15ifeq ($(CONFIG_USB_MUSB_AM35X),y)
16 musb_hdrc-$(CONFIG_ARCH_OMAP3430) += am35x.o
17else
18 musb_hdrc-$(CONFIG_ARCH_OMAP3430) += omap2430.o
19endif
20musb_hdrc-$(CONFIG_ARCH_OMAP4) += omap2430.o
21musb_hdrc-$(CONFIG_BF54x) += blackfin.o
22musb_hdrc-$(CONFIG_BF52x) += blackfin.o
23musb_hdrc-$(CONFIG_USB_GADGET_MUSB_HDRC) += musb_gadget_ep0.o musb_gadget.o 11musb_hdrc-$(CONFIG_USB_GADGET_MUSB_HDRC) += musb_gadget_ep0.o musb_gadget.o
24musb_hdrc-$(CONFIG_USB_MUSB_HDRC_HCD) += musb_virthub.o musb_host.o 12musb_hdrc-$(CONFIG_USB_MUSB_HDRC_HCD) += musb_virthub.o musb_host.o
25musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o 13musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o
26 14
15# Hardware Glue Layer
16obj-$(CONFIG_USB_MUSB_OMAP2PLUS) += omap2430.o
17obj-$(CONFIG_USB_MUSB_AM35X) += am35x.o
18obj-$(CONFIG_USB_MUSB_TUSB6010) += tusb6010.o
19obj-$(CONFIG_USB_MUSB_DAVINCI) += davinci.o
20obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
21obj-$(CONFIG_USB_MUSB_BLACKFIN) += blackfin.o
22obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
23
27# the kconfig must guarantee that only one of the 24# the kconfig must guarantee that only one of the
28# possible I/O schemes will be enabled at a time ... 25# possible I/O schemes will be enabled at a time ...
29# PIO only, or DMA (several potential schemes). 26# PIO only, or DMA (several potential schemes).
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index b0aabf3a606f..d5a3da37c90c 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -29,8 +29,9 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/platform_device.h>
33#include <linux/dma-mapping.h>
32 34
33#include <plat/control.h>
34#include <plat/usb.h> 35#include <plat/usb.h>
35 36
36#include "musb_core.h" 37#include "musb_core.h"
@@ -80,51 +81,18 @@
80 81
81#define USB_MENTOR_CORE_OFFSET 0x400 82#define USB_MENTOR_CORE_OFFSET 0x400
82 83
83static inline void phy_on(void) 84struct am35x_glue {
84{ 85 struct device *dev;
85 unsigned long timeout = jiffies + msecs_to_jiffies(100); 86 struct platform_device *musb;
86 u32 devconf2; 87 struct clk *phy_clk;
87 88 struct clk *clk;
88 /* 89};
89 * Start the on-chip PHY and its PLL. 90#define glue_to_musb(g) platform_get_drvdata(g->musb)
90 */
91 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
92
93 devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
94 devconf2 |= CONF2_PHY_PLLON;
95
96 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
97
98 DBG(1, "Waiting for PHY clock good...\n");
99 while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
100 & CONF2_PHYCLKGD)) {
101 cpu_relax();
102
103 if (time_after(jiffies, timeout)) {
104 DBG(1, "musb PHY clock good timed out\n");
105 break;
106 }
107 }
108}
109
110static inline void phy_off(void)
111{
112 u32 devconf2;
113
114 /*
115 * Power down the on-chip PHY.
116 */
117 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
118
119 devconf2 &= ~CONF2_PHY_PLLON;
120 devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN;
121 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
122}
123 91
124/* 92/*
125 * musb_platform_enable - enable interrupts 93 * am35x_musb_enable - enable interrupts
126 */ 94 */
127void musb_platform_enable(struct musb *musb) 95static void am35x_musb_enable(struct musb *musb)
128{ 96{
129 void __iomem *reg_base = musb->ctrl_base; 97 void __iomem *reg_base = musb->ctrl_base;
130 u32 epmask; 98 u32 epmask;
@@ -143,9 +111,9 @@ void musb_platform_enable(struct musb *musb)
143} 111}
144 112
145/* 113/*
146 * musb_platform_disable - disable HDRC and flush interrupts 114 * am35x_musb_disable - disable HDRC and flush interrupts
147 */ 115 */
148void musb_platform_disable(struct musb *musb) 116static void am35x_musb_disable(struct musb *musb)
149{ 117{
150 void __iomem *reg_base = musb->ctrl_base; 118 void __iomem *reg_base = musb->ctrl_base;
151 119
@@ -162,7 +130,7 @@ void musb_platform_disable(struct musb *musb)
162#define portstate(stmt) 130#define portstate(stmt)
163#endif 131#endif
164 132
165static void am35x_set_vbus(struct musb *musb, int is_on) 133static void am35x_musb_set_vbus(struct musb *musb, int is_on)
166{ 134{
167 WARN_ON(is_on && is_peripheral_active(musb)); 135 WARN_ON(is_on && is_peripheral_active(musb));
168} 136}
@@ -221,7 +189,7 @@ static void otg_timer(unsigned long _musb)
221 spin_unlock_irqrestore(&musb->lock, flags); 189 spin_unlock_irqrestore(&musb->lock, flags);
222} 190}
223 191
224void musb_platform_try_idle(struct musb *musb, unsigned long timeout) 192static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
225{ 193{
226 static unsigned long last_timer; 194 static unsigned long last_timer;
227 195
@@ -251,13 +219,16 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
251 mod_timer(&otg_workaround, timeout); 219 mod_timer(&otg_workaround, timeout);
252} 220}
253 221
254static irqreturn_t am35x_interrupt(int irq, void *hci) 222static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
255{ 223{
256 struct musb *musb = hci; 224 struct musb *musb = hci;
257 void __iomem *reg_base = musb->ctrl_base; 225 void __iomem *reg_base = musb->ctrl_base;
226 struct device *dev = musb->controller;
227 struct musb_hdrc_platform_data *plat = dev->platform_data;
228 struct omap_musb_board_data *data = plat->board_data;
258 unsigned long flags; 229 unsigned long flags;
259 irqreturn_t ret = IRQ_NONE; 230 irqreturn_t ret = IRQ_NONE;
260 u32 epintr, usbintr, lvl_intr; 231 u32 epintr, usbintr;
261 232
262 spin_lock_irqsave(&musb->lock, flags); 233 spin_lock_irqsave(&musb->lock, flags);
263 234
@@ -346,9 +317,8 @@ eoi:
346 /* EOI needs to be written for the IRQ to be re-asserted. */ 317 /* EOI needs to be written for the IRQ to be re-asserted. */
347 if (ret == IRQ_HANDLED || epintr || usbintr) { 318 if (ret == IRQ_HANDLED || epintr || usbintr) {
348 /* clear level interrupt */ 319 /* clear level interrupt */
349 lvl_intr = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); 320 if (data->clear_irq)
350 lvl_intr |= AM35XX_USBOTGSS_INT_CLR; 321 data->clear_irq();
351 omap_ctrl_writel(lvl_intr, AM35XX_CONTROL_LVL_INTR_CLEAR);
352 /* write EOI */ 322 /* write EOI */
353 musb_writel(reg_base, USB_END_OF_INTR_REG, 0); 323 musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
354 } 324 }
@@ -362,137 +332,85 @@ eoi:
362 return ret; 332 return ret;
363} 333}
364 334
365int musb_platform_set_mode(struct musb *musb, u8 musb_mode) 335static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
366{ 336{
367 u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); 337 struct device *dev = musb->controller;
338 struct musb_hdrc_platform_data *plat = dev->platform_data;
339 struct omap_musb_board_data *data = plat->board_data;
340 int retval = 0;
368 341
369 devconf2 &= ~CONF2_OTGMODE; 342 if (data->set_mode)
370 switch (musb_mode) { 343 data->set_mode(musb_mode);
371#ifdef CONFIG_USB_MUSB_HDRC_HCD 344 else
372 case MUSB_HOST: /* Force VBUS valid, ID = 0 */ 345 retval = -EIO;
373 devconf2 |= CONF2_FORCE_HOST;
374 break;
375#endif
376#ifdef CONFIG_USB_GADGET_MUSB_HDRC
377 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
378 devconf2 |= CONF2_FORCE_DEVICE;
379 break;
380#endif
381#ifdef CONFIG_USB_MUSB_OTG
382 case MUSB_OTG: /* Don't override the VBUS/ID comparators */
383 devconf2 |= CONF2_NO_OVERRIDE;
384 break;
385#endif
386 default:
387 DBG(2, "Trying to set unsupported mode %u\n", musb_mode);
388 }
389 346
390 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); 347 return retval;
391 return 0;
392} 348}
393 349
394int __init musb_platform_init(struct musb *musb, void *board_data) 350static int am35x_musb_init(struct musb *musb)
395{ 351{
352 struct device *dev = musb->controller;
353 struct musb_hdrc_platform_data *plat = dev->platform_data;
354 struct omap_musb_board_data *data = plat->board_data;
396 void __iomem *reg_base = musb->ctrl_base; 355 void __iomem *reg_base = musb->ctrl_base;
397 u32 rev, lvl_intr, sw_reset; 356 u32 rev;
398 int status;
399 357
400 musb->mregs += USB_MENTOR_CORE_OFFSET; 358 musb->mregs += USB_MENTOR_CORE_OFFSET;
401 359
402 clk_enable(musb->clock);
403 DBG(2, "musb->clock=%lud\n", clk_get_rate(musb->clock));
404
405 musb->phy_clock = clk_get(musb->controller, "fck");
406 if (IS_ERR(musb->phy_clock)) {
407 status = PTR_ERR(musb->phy_clock);
408 goto exit0;
409 }
410 clk_enable(musb->phy_clock);
411 DBG(2, "musb->phy_clock=%lud\n", clk_get_rate(musb->phy_clock));
412
413 /* Returns zero if e.g. not clocked */ 360 /* Returns zero if e.g. not clocked */
414 rev = musb_readl(reg_base, USB_REVISION_REG); 361 rev = musb_readl(reg_base, USB_REVISION_REG);
415 if (!rev) { 362 if (!rev)
416 status = -ENODEV; 363 return -ENODEV;
417 goto exit1;
418 }
419 364
420 usb_nop_xceiv_register(); 365 usb_nop_xceiv_register();
421 musb->xceiv = otg_get_transceiver(); 366 musb->xceiv = otg_get_transceiver();
422 if (!musb->xceiv) { 367 if (!musb->xceiv)
423 status = -ENODEV; 368 return -ENODEV;
424 goto exit1;
425 }
426 369
427 if (is_host_enabled(musb)) 370 if (is_host_enabled(musb))
428 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); 371 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
429 372
430 musb->board_set_vbus = am35x_set_vbus; 373 /* Reset the musb */
431 374 if (data->reset)
432 /* Global reset */ 375 data->reset();
433 sw_reset = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
434
435 sw_reset |= AM35XX_USBOTGSS_SW_RST;
436 omap_ctrl_writel(sw_reset, AM35XX_CONTROL_IP_SW_RESET);
437
438 sw_reset &= ~AM35XX_USBOTGSS_SW_RST;
439 omap_ctrl_writel(sw_reset, AM35XX_CONTROL_IP_SW_RESET);
440 376
441 /* Reset the controller */ 377 /* Reset the controller */
442 musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK); 378 musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
443 379
444 /* Start the on-chip PHY and its PLL. */ 380 /* Start the on-chip PHY and its PLL. */
445 phy_on(); 381 if (data->set_phy_power)
382 data->set_phy_power(1);
446 383
447 msleep(5); 384 msleep(5);
448 385
449 musb->isr = am35x_interrupt; 386 musb->isr = am35x_musb_interrupt;
450 387
451 /* clear level interrupt */ 388 /* clear level interrupt */
452 lvl_intr = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); 389 if (data->clear_irq)
453 lvl_intr |= AM35XX_USBOTGSS_INT_CLR; 390 data->clear_irq();
454 omap_ctrl_writel(lvl_intr, AM35XX_CONTROL_LVL_INTR_CLEAR); 391
455 return 0; 392 return 0;
456exit1:
457 clk_disable(musb->phy_clock);
458 clk_put(musb->phy_clock);
459exit0:
460 clk_disable(musb->clock);
461 return status;
462} 393}
463 394
464int musb_platform_exit(struct musb *musb) 395static int am35x_musb_exit(struct musb *musb)
465{ 396{
397 struct device *dev = musb->controller;
398 struct musb_hdrc_platform_data *plat = dev->platform_data;
399 struct omap_musb_board_data *data = plat->board_data;
400
466 if (is_host_enabled(musb)) 401 if (is_host_enabled(musb))
467 del_timer_sync(&otg_workaround); 402 del_timer_sync(&otg_workaround);
468 403
469 phy_off(); 404 /* Shutdown the on-chip PHY and its PLL. */
405 if (data->set_phy_power)
406 data->set_phy_power(0);
470 407
471 otg_put_transceiver(musb->xceiv); 408 otg_put_transceiver(musb->xceiv);
472 usb_nop_xceiv_unregister(); 409 usb_nop_xceiv_unregister();
473 410
474 clk_disable(musb->clock);
475
476 clk_disable(musb->phy_clock);
477 clk_put(musb->phy_clock);
478
479 return 0; 411 return 0;
480} 412}
481 413
482#ifdef CONFIG_PM
483void musb_platform_save_context(struct musb *musb,
484 struct musb_context_registers *musb_context)
485{
486 phy_off();
487}
488
489void musb_platform_restore_context(struct musb *musb,
490 struct musb_context_registers *musb_context)
491{
492 phy_on();
493}
494#endif
495
496/* AM35x supports only 32bit read operation */ 414/* AM35x supports only 32bit read operation */
497void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) 415void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
498{ 416{
@@ -522,3 +440,215 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
522 memcpy(dst, &val, len); 440 memcpy(dst, &val, len);
523 } 441 }
524} 442}
443
444static const struct musb_platform_ops am35x_ops = {
445 .init = am35x_musb_init,
446 .exit = am35x_musb_exit,
447
448 .enable = am35x_musb_enable,
449 .disable = am35x_musb_disable,
450
451 .set_mode = am35x_musb_set_mode,
452 .try_idle = am35x_musb_try_idle,
453
454 .set_vbus = am35x_musb_set_vbus,
455};
456
457static u64 am35x_dmamask = DMA_BIT_MASK(32);
458
459static int __init am35x_probe(struct platform_device *pdev)
460{
461 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
462 struct platform_device *musb;
463 struct am35x_glue *glue;
464
465 struct clk *phy_clk;
466 struct clk *clk;
467
468 int ret = -ENOMEM;
469
470 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
471 if (!glue) {
472 dev_err(&pdev->dev, "failed to allocate glue context\n");
473 goto err0;
474 }
475
476 musb = platform_device_alloc("musb-hdrc", -1);
477 if (!musb) {
478 dev_err(&pdev->dev, "failed to allocate musb device\n");
479 goto err1;
480 }
481
482 phy_clk = clk_get(&pdev->dev, "fck");
483 if (IS_ERR(phy_clk)) {
484 dev_err(&pdev->dev, "failed to get PHY clock\n");
485 ret = PTR_ERR(phy_clk);
486 goto err2;
487 }
488
489 clk = clk_get(&pdev->dev, "ick");
490 if (IS_ERR(clk)) {
491 dev_err(&pdev->dev, "failed to get clock\n");
492 ret = PTR_ERR(clk);
493 goto err3;
494 }
495
496 ret = clk_enable(phy_clk);
497 if (ret) {
498 dev_err(&pdev->dev, "failed to enable PHY clock\n");
499 goto err4;
500 }
501
502 ret = clk_enable(clk);
503 if (ret) {
504 dev_err(&pdev->dev, "failed to enable clock\n");
505 goto err5;
506 }
507
508 musb->dev.parent = &pdev->dev;
509 musb->dev.dma_mask = &am35x_dmamask;
510 musb->dev.coherent_dma_mask = am35x_dmamask;
511
512 glue->dev = &pdev->dev;
513 glue->musb = musb;
514 glue->phy_clk = phy_clk;
515 glue->clk = clk;
516
517 pdata->platform_ops = &am35x_ops;
518
519 platform_set_drvdata(pdev, glue);
520
521 ret = platform_device_add_resources(musb, pdev->resource,
522 pdev->num_resources);
523 if (ret) {
524 dev_err(&pdev->dev, "failed to add resources\n");
525 goto err6;
526 }
527
528 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
529 if (ret) {
530 dev_err(&pdev->dev, "failed to add platform_data\n");
531 goto err6;
532 }
533
534 ret = platform_device_add(musb);
535 if (ret) {
536 dev_err(&pdev->dev, "failed to register musb device\n");
537 goto err6;
538 }
539
540 return 0;
541
542err6:
543 clk_disable(clk);
544
545err5:
546 clk_disable(phy_clk);
547
548err4:
549 clk_put(clk);
550
551err3:
552 clk_put(phy_clk);
553
554err2:
555 platform_device_put(musb);
556
557err1:
558 kfree(glue);
559
560err0:
561 return ret;
562}
563
564static int __exit am35x_remove(struct platform_device *pdev)
565{
566 struct am35x_glue *glue = platform_get_drvdata(pdev);
567
568 platform_device_del(glue->musb);
569 platform_device_put(glue->musb);
570 clk_disable(glue->clk);
571 clk_disable(glue->phy_clk);
572 clk_put(glue->clk);
573 clk_put(glue->phy_clk);
574 kfree(glue);
575
576 return 0;
577}
578
579#ifdef CONFIG_PM
580static int am35x_suspend(struct device *dev)
581{
582 struct am35x_glue *glue = dev_get_drvdata(dev);
583 struct musb_hdrc_platform_data *plat = dev->platform_data;
584 struct omap_musb_board_data *data = plat->board_data;
585
586 /* Shutdown the on-chip PHY and its PLL. */
587 if (data->set_phy_power)
588 data->set_phy_power(0);
589
590 clk_disable(glue->phy_clk);
591 clk_disable(glue->clk);
592
593 return 0;
594}
595
596static int am35x_resume(struct device *dev)
597{
598 struct am35x_glue *glue = dev_get_drvdata(dev);
599 struct musb_hdrc_platform_data *plat = dev->platform_data;
600 struct omap_musb_board_data *data = plat->board_data;
601 int ret;
602
603 /* Start the on-chip PHY and its PLL. */
604 if (data->set_phy_power)
605 data->set_phy_power(1);
606
607 ret = clk_enable(glue->phy_clk);
608 if (ret) {
609 dev_err(dev, "failed to enable PHY clock\n");
610 return ret;
611 }
612
613 ret = clk_enable(glue->clk);
614 if (ret) {
615 dev_err(dev, "failed to enable clock\n");
616 return ret;
617 }
618
619 return 0;
620}
621
622static struct dev_pm_ops am35x_pm_ops = {
623 .suspend = am35x_suspend,
624 .resume = am35x_resume,
625};
626
627#define DEV_PM_OPS &am35x_pm_ops
628#else
629#define DEV_PM_OPS NULL
630#endif
631
632static struct platform_driver am35x_driver = {
633 .remove = __exit_p(am35x_remove),
634 .driver = {
635 .name = "musb-am35x",
636 .pm = DEV_PM_OPS,
637 },
638};
639
640MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
641MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
642MODULE_LICENSE("GPL v2");
643
644static int __init am35x_init(void)
645{
646 return platform_driver_probe(&am35x_driver, am35x_probe);
647}
648subsys_initcall(am35x_init);
649
650static void __exit am35x_exit(void)
651{
652 platform_driver_unregister(&am35x_driver);
653}
654module_exit(am35x_exit);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index fcb5206a65bd..eeba228eb2af 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -15,12 +15,20 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
18 20
19#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
20 22
21#include "musb_core.h" 23#include "musb_core.h"
22#include "blackfin.h" 24#include "blackfin.h"
23 25
26struct bfin_glue {
27 struct device *dev;
28 struct platform_device *musb;
29};
30#define glue_to_musb(g) platform_get_drvdata(g->musb)
31
24/* 32/*
25 * Load an endpoint's FIFO 33 * Load an endpoint's FIFO
26 */ 34 */
@@ -278,7 +286,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
278 DBG(4, "state is %s\n", otg_state_string(musb)); 286 DBG(4, "state is %s\n", otg_state_string(musb));
279} 287}
280 288
281void musb_platform_enable(struct musb *musb) 289static void bfin_musb_enable(struct musb *musb)
282{ 290{
283 if (!is_otg_enabled(musb) && is_host_enabled(musb)) { 291 if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
284 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 292 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
@@ -286,11 +294,11 @@ void musb_platform_enable(struct musb *musb)
286 } 294 }
287} 295}
288 296
289void musb_platform_disable(struct musb *musb) 297static void bfin_musb_disable(struct musb *musb)
290{ 298{
291} 299}
292 300
293static void bfin_set_vbus(struct musb *musb, int is_on) 301static void bfin_musb_set_vbus(struct musb *musb, int is_on)
294{ 302{
295 int value = musb->config->gpio_vrsel_active; 303 int value = musb->config->gpio_vrsel_active;
296 if (!is_on) 304 if (!is_on)
@@ -303,28 +311,28 @@ static void bfin_set_vbus(struct musb *musb, int is_on)
303 musb_readb(musb->mregs, MUSB_DEVCTL)); 311 musb_readb(musb->mregs, MUSB_DEVCTL));
304} 312}
305 313
306static int bfin_set_power(struct otg_transceiver *x, unsigned mA) 314static int bfin_musb_set_power(struct otg_transceiver *x, unsigned mA)
307{ 315{
308 return 0; 316 return 0;
309} 317}
310 318
311void musb_platform_try_idle(struct musb *musb, unsigned long timeout) 319static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout)
312{ 320{
313 if (!is_otg_enabled(musb) && is_host_enabled(musb)) 321 if (!is_otg_enabled(musb) && is_host_enabled(musb))
314 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 322 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
315} 323}
316 324
317int musb_platform_get_vbus_status(struct musb *musb) 325static int bfin_musb_get_vbus_status(struct musb *musb)
318{ 326{
319 return 0; 327 return 0;
320} 328}
321 329
322int musb_platform_set_mode(struct musb *musb, u8 musb_mode) 330static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
323{ 331{
324 return -EIO; 332 return -EIO;
325} 333}
326 334
327static void musb_platform_reg_init(struct musb *musb) 335static void bfin_musb_reg_init(struct musb *musb)
328{ 336{
329 if (ANOMALY_05000346) { 337 if (ANOMALY_05000346) {
330 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); 338 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
@@ -362,7 +370,7 @@ static void musb_platform_reg_init(struct musb *musb)
362 SSYNC(); 370 SSYNC();
363} 371}
364 372
365int __init musb_platform_init(struct musb *musb, void *board_data) 373static int bfin_musb_init(struct musb *musb)
366{ 374{
367 375
368 /* 376 /*
@@ -386,25 +394,124 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
386 return -ENODEV; 394 return -ENODEV;
387 } 395 }
388 396
389 musb_platform_reg_init(musb); 397 bfin_musb_reg_init(musb);
390 398
391 if (is_host_enabled(musb)) { 399 if (is_host_enabled(musb)) {
392 musb->board_set_vbus = bfin_set_vbus;
393 setup_timer(&musb_conn_timer, 400 setup_timer(&musb_conn_timer,
394 musb_conn_timer_handler, (unsigned long) musb); 401 musb_conn_timer_handler, (unsigned long) musb);
395 } 402 }
396 if (is_peripheral_enabled(musb)) 403 if (is_peripheral_enabled(musb))
397 musb->xceiv->set_power = bfin_set_power; 404 musb->xceiv->set_power = bfin_musb_set_power;
398 405
399 musb->isr = blackfin_interrupt; 406 musb->isr = blackfin_interrupt;
400 407
401 return 0; 408 return 0;
402} 409}
403 410
411static int bfin_musb_exit(struct musb *musb)
412{
413 gpio_free(musb->config->gpio_vrsel);
414
415 otg_put_transceiver(musb->xceiv);
416 usb_nop_xceiv_unregister();
417 return 0;
418}
419
420static const struct musb_platform_ops bfin_ops = {
421 .init = bfin_musb_init,
422 .exit = bfin_musb_exit,
423
424 .enable = bfin_musb_enable,
425 .disable = bfin_musb_disable,
426
427 .set_mode = bfin_musb_set_mode,
428 .try_idle = bfin_musb_try_idle,
429
430 .vbus_status = bfin_musb_vbus_status,
431 .set_vbus = bfin_musb_set_vbus,
432};
433
434static u64 bfin_dmamask = DMA_BIT_MASK(32);
435
436static int __init bfin_probe(struct platform_device *pdev)
437{
438 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
439 struct platform_device *musb;
440 struct bfin_glue *glue;
441
442 int ret = -ENOMEM;
443
444 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
445 if (!glue) {
446 dev_err(&pdev->dev, "failed to allocate glue context\n");
447 goto err0;
448 }
449
450 musb = platform_device_alloc("musb-hdrc", -1);
451 if (!musb) {
452 dev_err(&pdev->dev, "failed to allocate musb device\n");
453 goto err1;
454 }
455
456 musb->dev.parent = &pdev->dev;
457 musb->dev.dma_mask = &bfin_dmamask;
458 musb->dev.coherent_dma_mask = bfin_dmamask;
459
460 glue->dev = &pdev->dev;
461 glue->musb = musb;
462
463 pdata->platform_ops = &bfin_ops;
464
465 platform_set_drvdata(pdev, glue);
466
467 ret = platform_device_add_resources(musb, pdev->resource,
468 pdev->num_resources);
469 if (ret) {
470 dev_err(&pdev->dev, "failed to add resources\n");
471 goto err2;
472 }
473
474 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
475 if (ret) {
476 dev_err(&pdev->dev, "failed to add platform_data\n");
477 goto err2;
478 }
479
480 ret = platform_device_add(musb);
481 if (ret) {
482 dev_err(&pdev->dev, "failed to register musb device\n");
483 goto err2;
484 }
485
486 return 0;
487
488err2:
489 platform_device_put(musb);
490
491err1:
492 kfree(glue);
493
494err0:
495 return ret;
496}
497
498static int __exit bfin_remove(struct platform_device *pdev)
499{
500 struct bfin_glue *glue = platform_get_drvdata(pdev);
501
502 platform_device_del(glue->musb);
503 platform_device_put(glue->musb);
504 kfree(glue);
505
506 return 0;
507}
508
404#ifdef CONFIG_PM 509#ifdef CONFIG_PM
405void musb_platform_save_context(struct musb *musb, 510static int bfin_suspend(struct device *dev)
406 struct musb_context_registers *musb_context)
407{ 511{
512 struct bfin_glue *glue = dev_get_drvdata(dev);
513 struct musb *musb = glue_to_musb(glue);
514
408 if (is_host_active(musb)) 515 if (is_host_active(musb))
409 /* 516 /*
410 * During hibernate gpio_vrsel will change from high to low 517 * During hibernate gpio_vrsel will change from high to low
@@ -413,20 +520,50 @@ void musb_platform_save_context(struct musb *musb,
413 * wakeup event. 520 * wakeup event.
414 */ 521 */
415 gpio_set_value(musb->config->gpio_vrsel, 0); 522 gpio_set_value(musb->config->gpio_vrsel, 0);
523
524 return 0;
416} 525}
417 526
418void musb_platform_restore_context(struct musb *musb, 527static int bfin_resume(struct device *dev)
419 struct musb_context_registers *musb_context)
420{ 528{
421 musb_platform_reg_init(musb); 529 struct bfin_glue *glue = dev_get_drvdata(dev);
530 struct musb *musb = glue_to_musb(glue);
531
532 bfin_musb_reg_init(musb);
533
534 return 0;
422} 535}
536
537static struct dev_pm_ops bfin_pm_ops = {
538 .suspend = bfin_suspend,
539 .resume = bfin_resume,
540};
541
542#define DEV_PM_OPS &bfin_pm_op,
543#else
544#define DEV_PM_OPS NULL
423#endif 545#endif
424 546
425int musb_platform_exit(struct musb *musb) 547static struct platform_driver bfin_driver = {
548 .remove = __exit_p(bfin_remove),
549 .driver = {
550 .name = "musb-bfin",
551 .pm = DEV_PM_OPS,
552 },
553};
554
555MODULE_DESCRIPTION("Blackfin MUSB Glue Layer");
556MODULE_AUTHOR("Bryan Wy <cooloney@kernel.org>");
557MODULE_LICENSE("GPL v2");
558
559static int __init bfin_init(void)
426{ 560{
427 gpio_free(musb->config->gpio_vrsel); 561 return platform_driver_probe(&bfin_driver, bfin_probe);
562}
563subsys_initcall(bfin_init);
428 564
429 otg_put_transceiver(musb->xceiv); 565static void __exit bfin_exit(void)
430 usb_nop_xceiv_unregister(); 566{
431 return 0; 567 platform_driver_unregister(&bfin_driver);
432} 568}
569module_exit(bfin_exit);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index f5a65ff0ac2b..de55a3c3259a 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1308,7 +1308,7 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
1308 struct cppi *controller; 1308 struct cppi *controller;
1309 struct device *dev = musb->controller; 1309 struct device *dev = musb->controller;
1310 struct platform_device *pdev = to_platform_device(dev); 1310 struct platform_device *pdev = to_platform_device(dev);
1311 int irq = platform_get_irq(pdev, 1); 1311 int irq = platform_get_irq_byname(pdev, "dma");
1312 1312
1313 controller = kzalloc(sizeof *controller, GFP_KERNEL); 1313 controller = kzalloc(sizeof *controller, GFP_KERNEL);
1314 if (!controller) 1314 if (!controller)
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 84427bebbf62..69a0da3c8f09 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -29,6 +29,8 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/platform_device.h>
33#include <linux/dma-mapping.h>
32 34
33#include <mach/da8xx.h> 35#include <mach/da8xx.h>
34#include <mach/usb.h> 36#include <mach/usb.h>
@@ -78,6 +80,12 @@
78 80
79#define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG) 81#define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG)
80 82
83struct da8xx_glue {
84 struct device *dev;
85 struct platform_device *musb;
86 struct clk *clk;
87};
88
81/* 89/*
82 * REVISIT (PM): we should be able to keep the PHY in low power mode most 90 * REVISIT (PM): we should be able to keep the PHY in low power mode most
83 * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0 91 * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0
@@ -131,9 +139,9 @@ static inline void phy_off(void)
131 */ 139 */
132 140
133/** 141/**
134 * musb_platform_enable - enable interrupts 142 * da8xx_musb_enable - enable interrupts
135 */ 143 */
136void musb_platform_enable(struct musb *musb) 144static void da8xx_musb_enable(struct musb *musb)
137{ 145{
138 void __iomem *reg_base = musb->ctrl_base; 146 void __iomem *reg_base = musb->ctrl_base;
139 u32 mask; 147 u32 mask;
@@ -151,9 +159,9 @@ void musb_platform_enable(struct musb *musb)
151} 159}
152 160
153/** 161/**
154 * musb_platform_disable - disable HDRC and flush interrupts 162 * da8xx_musb_disable - disable HDRC and flush interrupts
155 */ 163 */
156void musb_platform_disable(struct musb *musb) 164static void da8xx_musb_disable(struct musb *musb)
157{ 165{
158 void __iomem *reg_base = musb->ctrl_base; 166 void __iomem *reg_base = musb->ctrl_base;
159 167
@@ -170,7 +178,7 @@ void musb_platform_disable(struct musb *musb)
170#define portstate(stmt) 178#define portstate(stmt)
171#endif 179#endif
172 180
173static void da8xx_set_vbus(struct musb *musb, int is_on) 181static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
174{ 182{
175 WARN_ON(is_on && is_peripheral_active(musb)); 183 WARN_ON(is_on && is_peripheral_active(musb));
176} 184}
@@ -252,7 +260,7 @@ static void otg_timer(unsigned long _musb)
252 spin_unlock_irqrestore(&musb->lock, flags); 260 spin_unlock_irqrestore(&musb->lock, flags);
253} 261}
254 262
255void musb_platform_try_idle(struct musb *musb, unsigned long timeout) 263static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
256{ 264{
257 static unsigned long last_timer; 265 static unsigned long last_timer;
258 266
@@ -282,7 +290,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
282 mod_timer(&otg_workaround, timeout); 290 mod_timer(&otg_workaround, timeout);
283} 291}
284 292
285static irqreturn_t da8xx_interrupt(int irq, void *hci) 293static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
286{ 294{
287 struct musb *musb = hci; 295 struct musb *musb = hci;
288 void __iomem *reg_base = musb->ctrl_base; 296 void __iomem *reg_base = musb->ctrl_base;
@@ -380,7 +388,7 @@ static irqreturn_t da8xx_interrupt(int irq, void *hci)
380 return ret; 388 return ret;
381} 389}
382 390
383int musb_platform_set_mode(struct musb *musb, u8 musb_mode) 391static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
384{ 392{
385 u32 cfgchip2 = __raw_readl(CFGCHIP2); 393 u32 cfgchip2 = __raw_readl(CFGCHIP2);
386 394
@@ -409,15 +417,13 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
409 return 0; 417 return 0;
410} 418}
411 419
412int __init musb_platform_init(struct musb *musb, void *board_data) 420static int da8xx_musb_init(struct musb *musb)
413{ 421{
414 void __iomem *reg_base = musb->ctrl_base; 422 void __iomem *reg_base = musb->ctrl_base;
415 u32 rev; 423 u32 rev;
416 424
417 musb->mregs += DA8XX_MENTOR_CORE_OFFSET; 425 musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
418 426
419 clk_enable(musb->clock);
420
421 /* Returns zero if e.g. not clocked */ 427 /* Returns zero if e.g. not clocked */
422 rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG); 428 rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
423 if (!rev) 429 if (!rev)
@@ -431,8 +437,6 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
431 if (is_host_enabled(musb)) 437 if (is_host_enabled(musb))
432 setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); 438 setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
433 439
434 musb->board_set_vbus = da8xx_set_vbus;
435
436 /* Reset the controller */ 440 /* Reset the controller */
437 musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK); 441 musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
438 442
@@ -446,14 +450,13 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
446 rev, __raw_readl(CFGCHIP2), 450 rev, __raw_readl(CFGCHIP2),
447 musb_readb(reg_base, DA8XX_USB_CTRL_REG)); 451 musb_readb(reg_base, DA8XX_USB_CTRL_REG));
448 452
449 musb->isr = da8xx_interrupt; 453 musb->isr = da8xx_musb_interrupt;
450 return 0; 454 return 0;
451fail: 455fail:
452 clk_disable(musb->clock);
453 return -ENODEV; 456 return -ENODEV;
454} 457}
455 458
456int musb_platform_exit(struct musb *musb) 459static int da8xx_musb_exit(struct musb *musb)
457{ 460{
458 if (is_host_enabled(musb)) 461 if (is_host_enabled(musb))
459 del_timer_sync(&otg_workaround); 462 del_timer_sync(&otg_workaround);
@@ -463,7 +466,140 @@ int musb_platform_exit(struct musb *musb)
463 otg_put_transceiver(musb->xceiv); 466 otg_put_transceiver(musb->xceiv);
464 usb_nop_xceiv_unregister(); 467 usb_nop_xceiv_unregister();
465 468
466 clk_disable(musb->clock); 469 return 0;
470}
471
472static const struct musb_platform_ops da8xx_ops = {
473 .init = da8xx_musb_init,
474 .exit = da8xx_musb_exit,
475
476 .enable = da8xx_musb_enable,
477 .disable = da8xx_musb_disable,
478
479 .set_mode = da8xx_musb_set_mode,
480 .try_idle = da8xx_musb_try_idle,
481
482 .set_vbus = da8xx_musb_set_vbus,
483};
484
485static u64 da8xx_dmamask = DMA_BIT_MASK(32);
486
487static int __init da8xx_probe(struct platform_device *pdev)
488{
489 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
490 struct platform_device *musb;
491 struct da8xx_glue *glue;
492
493 struct clk *clk;
494
495 int ret = -ENOMEM;
496
497 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
498 if (!glue) {
499 dev_err(&pdev->dev, "failed to allocate glue context\n");
500 goto err0;
501 }
502
503 musb = platform_device_alloc("musb-hdrc", -1);
504 if (!musb) {
505 dev_err(&pdev->dev, "failed to allocate musb device\n");
506 goto err1;
507 }
508
509 clk = clk_get(&pdev->dev, "usb20");
510 if (IS_ERR(clk)) {
511 dev_err(&pdev->dev, "failed to get clock\n");
512 ret = PTR_ERR(clk);
513 goto err2;
514 }
515
516 ret = clk_enable(clk);
517 if (ret) {
518 dev_err(&pdev->dev, "failed to enable clock\n");
519 goto err3;
520 }
521
522 musb->dev.parent = &pdev->dev;
523 musb->dev.dma_mask = &da8xx_dmamask;
524 musb->dev.coherent_dma_mask = da8xx_dmamask;
525
526 glue->dev = &pdev->dev;
527 glue->musb = musb;
528 glue->clk = clk;
529
530 pdata->platform_ops = &da8xx_ops;
531
532 platform_set_drvdata(pdev, glue);
533
534 ret = platform_device_add_resources(musb, pdev->resource,
535 pdev->num_resources);
536 if (ret) {
537 dev_err(&pdev->dev, "failed to add resources\n");
538 goto err4;
539 }
540
541 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
542 if (ret) {
543 dev_err(&pdev->dev, "failed to add platform_data\n");
544 goto err4;
545 }
546
547 ret = platform_device_add(musb);
548 if (ret) {
549 dev_err(&pdev->dev, "failed to register musb device\n");
550 goto err4;
551 }
552
553 return 0;
554
555err4:
556 clk_disable(clk);
557
558err3:
559 clk_put(clk);
560
561err2:
562 platform_device_put(musb);
563
564err1:
565 kfree(glue);
566
567err0:
568 return ret;
569}
570
571static int __exit da8xx_remove(struct platform_device *pdev)
572{
573 struct da8xx_glue *glue = platform_get_drvdata(pdev);
574
575 platform_device_del(glue->musb);
576 platform_device_put(glue->musb);
577 clk_disable(glue->clk);
578 clk_put(glue->clk);
579 kfree(glue);
467 580
468 return 0; 581 return 0;
469} 582}
583
584static struct platform_driver da8xx_driver = {
585 .remove = __exit_p(da8xx_remove),
586 .driver = {
587 .name = "musb-da8xx",
588 },
589};
590
591MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer");
592MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>");
593MODULE_LICENSE("GPL v2");
594
595static int __init da8xx_init(void)
596{
597 return platform_driver_probe(&da8xx_driver, da8xx_probe);
598}
599subsys_initcall(da8xx_init);
600
601static void __exit da8xx_exit(void)
602{
603 platform_driver_unregister(&da8xx_driver);
604}
605module_exit(da8xx_exit);
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 6e67629f50cc..e6de097fb7e8 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -30,6 +30,8 @@
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/gpio.h> 32#include <linux/gpio.h>
33#include <linux/platform_device.h>
34#include <linux/dma-mapping.h>
33 35
34#include <mach/hardware.h> 36#include <mach/hardware.h>
35#include <mach/memory.h> 37#include <mach/memory.h>
@@ -51,6 +53,12 @@
51#define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR) 53#define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR)
52#define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR) 54#define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR)
53 55
56struct davinci_glue {
57 struct device *dev;
58 struct platform_device *musb;
59 struct clk *clk;
60};
61
54/* REVISIT (PM) we should be able to keep the PHY in low power mode most 62/* REVISIT (PM) we should be able to keep the PHY in low power mode most
55 * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 63 * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
56 * and, when in host mode, autosuspending idle root ports... PHYPLLON 64 * and, when in host mode, autosuspending idle root ports... PHYPLLON
@@ -83,7 +91,7 @@ static inline void phy_off(void)
83 91
84static int dma_off = 1; 92static int dma_off = 1;
85 93
86void musb_platform_enable(struct musb *musb) 94static void davinci_musb_enable(struct musb *musb)
87{ 95{
88 u32 tmp, old, val; 96 u32 tmp, old, val;
89 97
@@ -116,7 +124,7 @@ void musb_platform_enable(struct musb *musb)
116/* 124/*
117 * Disable the HDRC and flush interrupts 125 * Disable the HDRC and flush interrupts
118 */ 126 */
119void musb_platform_disable(struct musb *musb) 127static void davinci_musb_disable(struct musb *musb)
120{ 128{
121 /* because we don't set CTRLR.UINT, "important" to: 129 /* because we don't set CTRLR.UINT, "important" to:
122 * - not read/write INTRUSB/INTRUSBE 130 * - not read/write INTRUSB/INTRUSBE
@@ -167,7 +175,7 @@ static void evm_deferred_drvvbus(struct work_struct *ignored)
167 175
168#endif /* EVM */ 176#endif /* EVM */
169 177
170static void davinci_source_power(struct musb *musb, int is_on, int immediate) 178static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate)
171{ 179{
172#ifdef CONFIG_MACH_DAVINCI_EVM 180#ifdef CONFIG_MACH_DAVINCI_EVM
173 if (is_on) 181 if (is_on)
@@ -190,10 +198,10 @@ static void davinci_source_power(struct musb *musb, int is_on, int immediate)
190#endif 198#endif
191} 199}
192 200
193static void davinci_set_vbus(struct musb *musb, int is_on) 201static void davinci_musb_set_vbus(struct musb *musb, int is_on)
194{ 202{
195 WARN_ON(is_on && is_peripheral_active(musb)); 203 WARN_ON(is_on && is_peripheral_active(musb));
196 davinci_source_power(musb, is_on, 0); 204 davinci_musb_source_power(musb, is_on, 0);
197} 205}
198 206
199 207
@@ -259,7 +267,7 @@ static void otg_timer(unsigned long _musb)
259 spin_unlock_irqrestore(&musb->lock, flags); 267 spin_unlock_irqrestore(&musb->lock, flags);
260} 268}
261 269
262static irqreturn_t davinci_interrupt(int irq, void *__hci) 270static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
263{ 271{
264 unsigned long flags; 272 unsigned long flags;
265 irqreturn_t retval = IRQ_NONE; 273 irqreturn_t retval = IRQ_NONE;
@@ -345,7 +353,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
345 /* NOTE: this must complete poweron within 100 msec 353 /* NOTE: this must complete poweron within 100 msec
346 * (OTG_TIME_A_WAIT_VRISE) but we don't check for that. 354 * (OTG_TIME_A_WAIT_VRISE) but we don't check for that.
347 */ 355 */
348 davinci_source_power(musb, drvvbus, 0); 356 davinci_musb_source_power(musb, drvvbus, 0);
349 DBG(2, "VBUS %s (%s)%s, devctl %02x\n", 357 DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
350 drvvbus ? "on" : "off", 358 drvvbus ? "on" : "off",
351 otg_state_string(musb), 359 otg_state_string(musb),
@@ -370,13 +378,13 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
370 return retval; 378 return retval;
371} 379}
372 380
373int musb_platform_set_mode(struct musb *musb, u8 mode) 381static int davinci_musb_set_mode(struct musb *musb, u8 mode)
374{ 382{
375 /* EVM can't do this (right?) */ 383 /* EVM can't do this (right?) */
376 return -EIO; 384 return -EIO;
377} 385}
378 386
379int __init musb_platform_init(struct musb *musb, void *board_data) 387static int davinci_musb_init(struct musb *musb)
380{ 388{
381 void __iomem *tibase = musb->ctrl_base; 389 void __iomem *tibase = musb->ctrl_base;
382 u32 revision; 390 u32 revision;
@@ -388,8 +396,6 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
388 396
389 musb->mregs += DAVINCI_BASE_OFFSET; 397 musb->mregs += DAVINCI_BASE_OFFSET;
390 398
391 clk_enable(musb->clock);
392
393 /* returns zero if e.g. not clocked */ 399 /* returns zero if e.g. not clocked */
394 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); 400 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
395 if (revision == 0) 401 if (revision == 0)
@@ -398,8 +404,7 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
398 if (is_host_enabled(musb)) 404 if (is_host_enabled(musb))
399 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); 405 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
400 406
401 musb->board_set_vbus = davinci_set_vbus; 407 davinci_musb_source_power(musb, 0, 1);
402 davinci_source_power(musb, 0, 1);
403 408
404 /* dm355 EVM swaps D+/D- for signal integrity, and 409 /* dm355 EVM swaps D+/D- for signal integrity, and
405 * is clocked from the main 24 MHz crystal. 410 * is clocked from the main 24 MHz crystal.
@@ -440,18 +445,16 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
440 revision, __raw_readl(USB_PHY_CTRL), 445 revision, __raw_readl(USB_PHY_CTRL),
441 musb_readb(tibase, DAVINCI_USB_CTRL_REG)); 446 musb_readb(tibase, DAVINCI_USB_CTRL_REG));
442 447
443 musb->isr = davinci_interrupt; 448 musb->isr = davinci_musb_interrupt;
444 return 0; 449 return 0;
445 450
446fail: 451fail:
447 clk_disable(musb->clock);
448
449 otg_put_transceiver(musb->xceiv); 452 otg_put_transceiver(musb->xceiv);
450 usb_nop_xceiv_unregister(); 453 usb_nop_xceiv_unregister();
451 return -ENODEV; 454 return -ENODEV;
452} 455}
453 456
454int musb_platform_exit(struct musb *musb) 457static int davinci_musb_exit(struct musb *musb)
455{ 458{
456 if (is_host_enabled(musb)) 459 if (is_host_enabled(musb))
457 del_timer_sync(&otg_workaround); 460 del_timer_sync(&otg_workaround);
@@ -465,7 +468,7 @@ int musb_platform_exit(struct musb *musb)
465 __raw_writel(deepsleep, DM355_DEEPSLEEP); 468 __raw_writel(deepsleep, DM355_DEEPSLEEP);
466 } 469 }
467 470
468 davinci_source_power(musb, 0 /*off*/, 1); 471 davinci_musb_source_power(musb, 0 /*off*/, 1);
469 472
470 /* delay, to avoid problems with module reload */ 473 /* delay, to avoid problems with module reload */
471 if (is_host_enabled(musb) && musb->xceiv->default_a) { 474 if (is_host_enabled(musb) && musb->xceiv->default_a) {
@@ -495,10 +498,141 @@ int musb_platform_exit(struct musb *musb)
495 498
496 phy_off(); 499 phy_off();
497 500
498 clk_disable(musb->clock);
499
500 otg_put_transceiver(musb->xceiv); 501 otg_put_transceiver(musb->xceiv);
501 usb_nop_xceiv_unregister(); 502 usb_nop_xceiv_unregister();
502 503
503 return 0; 504 return 0;
504} 505}
506
507static const struct musb_platform_ops davinci_ops = {
508 .init = davinci_musb_init,
509 .exit = davinci_musb_exit,
510
511 .enable = davinci_musb_enable,
512 .disable = davinci_musb_disable,
513
514 .set_mode = davinci_musb_set_mode,
515
516 .set_vbus = davinci_musb_set_vbus,
517};
518
519static u64 davinci_dmamask = DMA_BIT_MASK(32);
520
521static int __init davinci_probe(struct platform_device *pdev)
522{
523 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
524 struct platform_device *musb;
525 struct davinci_glue *glue;
526 struct clk *clk;
527
528 int ret = -ENOMEM;
529
530 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
531 if (!glue) {
532 dev_err(&pdev->dev, "failed to allocate glue context\n");
533 goto err0;
534 }
535
536 musb = platform_device_alloc("musb-hdrc", -1);
537 if (!musb) {
538 dev_err(&pdev->dev, "failed to allocate musb device\n");
539 goto err1;
540 }
541
542 clk = clk_get(&pdev->dev, "usb");
543 if (IS_ERR(clk)) {
544 dev_err(&pdev->dev, "failed to get clock\n");
545 ret = PTR_ERR(clk);
546 goto err2;
547 }
548
549 ret = clk_enable(clk);
550 if (ret) {
551 dev_err(&pdev->dev, "failed to enable clock\n");
552 goto err3;
553 }
554
555 musb->dev.parent = &pdev->dev;
556 musb->dev.dma_mask = &davinci_dmamask;
557 musb->dev.coherent_dma_mask = davinci_dmamask;
558
559 glue->dev = &pdev->dev;
560 glue->musb = musb;
561 glue->clk = clk;
562
563 pdata->platform_ops = &davinci_ops;
564
565 platform_set_drvdata(pdev, glue);
566
567 ret = platform_device_add_resources(musb, pdev->resource,
568 pdev->num_resources);
569 if (ret) {
570 dev_err(&pdev->dev, "failed to add resources\n");
571 goto err4;
572 }
573
574 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
575 if (ret) {
576 dev_err(&pdev->dev, "failed to add platform_data\n");
577 goto err4;
578 }
579
580 ret = platform_device_add(musb);
581 if (ret) {
582 dev_err(&pdev->dev, "failed to register musb device\n");
583 goto err4;
584 }
585
586 return 0;
587
588err4:
589 clk_disable(clk);
590
591err3:
592 clk_put(clk);
593
594err2:
595 platform_device_put(musb);
596
597err1:
598 kfree(glue);
599
600err0:
601 return ret;
602}
603
604static int __exit davinci_remove(struct platform_device *pdev)
605{
606 struct davinci_glue *glue = platform_get_drvdata(pdev);
607
608 platform_device_del(glue->musb);
609 platform_device_put(glue->musb);
610 clk_disable(glue->clk);
611 clk_put(glue->clk);
612 kfree(glue);
613
614 return 0;
615}
616
617static struct platform_driver davinci_driver = {
618 .remove = __exit_p(davinci_remove),
619 .driver = {
620 .name = "musb-davinci",
621 },
622};
623
624MODULE_DESCRIPTION("DaVinci MUSB Glue Layer");
625MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
626MODULE_LICENSE("GPL v2");
627
628static int __init davinci_init(void)
629{
630 return platform_driver_probe(&davinci_driver, davinci_probe);
631}
632subsys_initcall(davinci_init);
633
634static void __exit davinci_exit(void)
635{
636 platform_driver_unregister(&davinci_driver);
637}
638module_exit(davinci_exit);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 99beebce8550..07cf394e491b 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -99,19 +99,8 @@
99#include <linux/platform_device.h> 99#include <linux/platform_device.h>
100#include <linux/io.h> 100#include <linux/io.h>
101 101
102#ifdef CONFIG_ARM
103#include <mach/hardware.h>
104#include <mach/memory.h>
105#include <asm/mach-types.h>
106#endif
107
108#include "musb_core.h" 102#include "musb_core.h"
109 103
110
111#ifdef CONFIG_ARCH_DAVINCI
112#include "davinci.h"
113#endif
114
115#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON) 104#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
116 105
117 106
@@ -126,7 +115,7 @@ MODULE_PARM_DESC(debug, "Debug message level. Default = 0");
126 115
127#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION 116#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
128 117
129#define MUSB_DRIVER_NAME "musb_hdrc" 118#define MUSB_DRIVER_NAME "musb-hdrc"
130const char musb_driver_name[] = MUSB_DRIVER_NAME; 119const char musb_driver_name[] = MUSB_DRIVER_NAME;
131 120
132MODULE_DESCRIPTION(DRIVER_INFO); 121MODULE_DESCRIPTION(DRIVER_INFO);
@@ -230,7 +219,7 @@ static struct otg_io_access_ops musb_ulpi_access = {
230 219
231/*-------------------------------------------------------------------------*/ 220/*-------------------------------------------------------------------------*/
232 221
233#if !defined(CONFIG_USB_TUSB6010) && !defined(CONFIG_BLACKFIN) 222#if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN)
234 223
235/* 224/*
236 * Load an endpoint's FIFO 225 * Load an endpoint's FIFO
@@ -390,7 +379,7 @@ void musb_otg_timer_func(unsigned long data)
390 case OTG_STATE_A_SUSPEND: 379 case OTG_STATE_A_SUSPEND:
391 case OTG_STATE_A_WAIT_BCON: 380 case OTG_STATE_A_WAIT_BCON:
392 DBG(1, "HNP: %s timeout\n", otg_state_string(musb)); 381 DBG(1, "HNP: %s timeout\n", otg_state_string(musb));
393 musb_set_vbus(musb, 0); 382 musb_platform_set_vbus(musb, 0);
394 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 383 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
395 break; 384 break;
396 default: 385 default:
@@ -571,7 +560,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
571 musb->ep0_stage = MUSB_EP0_START; 560 musb->ep0_stage = MUSB_EP0_START;
572 musb->xceiv->state = OTG_STATE_A_IDLE; 561 musb->xceiv->state = OTG_STATE_A_IDLE;
573 MUSB_HST_MODE(musb); 562 MUSB_HST_MODE(musb);
574 musb_set_vbus(musb, 1); 563 musb_platform_set_vbus(musb, 1);
575 564
576 handled = IRQ_HANDLED; 565 handled = IRQ_HANDLED;
577 } 566 }
@@ -642,7 +631,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
642 631
643 /* go through A_WAIT_VFALL then start a new session */ 632 /* go through A_WAIT_VFALL then start a new session */
644 if (!ignore) 633 if (!ignore)
645 musb_set_vbus(musb, 0); 634 musb_platform_set_vbus(musb, 0);
646 handled = IRQ_HANDLED; 635 handled = IRQ_HANDLED;
647 } 636 }
648 637
@@ -1049,8 +1038,6 @@ static void musb_shutdown(struct platform_device *pdev)
1049 spin_lock_irqsave(&musb->lock, flags); 1038 spin_lock_irqsave(&musb->lock, flags);
1050 musb_platform_disable(musb); 1039 musb_platform_disable(musb);
1051 musb_generic_disable(musb); 1040 musb_generic_disable(musb);
1052 if (musb->clock)
1053 clk_put(musb->clock);
1054 spin_unlock_irqrestore(&musb->lock, flags); 1041 spin_unlock_irqrestore(&musb->lock, flags);
1055 1042
1056 if (!is_otg_enabled(musb) && is_host_enabled(musb)) 1043 if (!is_otg_enabled(musb) && is_host_enabled(musb))
@@ -1074,10 +1061,11 @@ static void musb_shutdown(struct platform_device *pdev)
1074 * We don't currently use dynamic fifo setup capability to do anything 1061 * We don't currently use dynamic fifo setup capability to do anything
1075 * more than selecting one of a bunch of predefined configurations. 1062 * more than selecting one of a bunch of predefined configurations.
1076 */ 1063 */
1077#if defined(CONFIG_USB_TUSB6010) || \ 1064#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_OMAP2PLUS) \
1078 defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ 1065 || defined(CONFIG_USB_MUSB_AM35X)
1079 || defined(CONFIG_ARCH_OMAP4)
1080static ushort __initdata fifo_mode = 4; 1066static ushort __initdata fifo_mode = 4;
1067#elif defined(CONFIG_USB_MUSB_UX500)
1068static ushort __initdata fifo_mode = 5;
1081#else 1069#else
1082static ushort __initdata fifo_mode = 2; 1070static ushort __initdata fifo_mode = 2;
1083#endif 1071#endif
@@ -1501,7 +1489,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1501 struct musb_hw_ep *hw_ep = musb->endpoints + i; 1489 struct musb_hw_ep *hw_ep = musb->endpoints + i;
1502 1490
1503 hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; 1491 hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
1504#ifdef CONFIG_USB_TUSB6010 1492#ifdef CONFIG_USB_MUSB_TUSB6010
1505 hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); 1493 hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
1506 hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); 1494 hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
1507 hw_ep->fifo_sync_va = 1495 hw_ep->fifo_sync_va =
@@ -1548,7 +1536,8 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1548/*-------------------------------------------------------------------------*/ 1536/*-------------------------------------------------------------------------*/
1549 1537
1550#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \ 1538#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \
1551 defined(CONFIG_ARCH_OMAP4) 1539 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
1540 defined(CONFIG_ARCH_U5500)
1552 1541
1553static irqreturn_t generic_interrupt(int irq, void *__hci) 1542static irqreturn_t generic_interrupt(int irq, void *__hci)
1554{ 1543{
@@ -1904,6 +1893,7 @@ allocate_instance(struct device *dev,
1904 } 1893 }
1905 1894
1906 musb->controller = dev; 1895 musb->controller = dev;
1896
1907 return musb; 1897 return musb;
1908} 1898}
1909 1899
@@ -2000,30 +1990,14 @@ bad_config:
2000 spin_lock_init(&musb->lock); 1990 spin_lock_init(&musb->lock);
2001 musb->board_mode = plat->mode; 1991 musb->board_mode = plat->mode;
2002 musb->board_set_power = plat->set_power; 1992 musb->board_set_power = plat->set_power;
2003 musb->set_clock = plat->set_clock;
2004 musb->min_power = plat->min_power; 1993 musb->min_power = plat->min_power;
2005 1994 musb->ops = plat->platform_ops;
2006 /* Clock usage is chip-specific ... functional clock (DaVinci,
2007 * OMAP2430), or PHY ref (some TUSB6010 boards). All this core
2008 * code does is make sure a clock handle is available; platform
2009 * code manages it during start/stop and suspend/resume.
2010 */
2011 if (plat->clock) {
2012 musb->clock = clk_get(dev, plat->clock);
2013 if (IS_ERR(musb->clock)) {
2014 status = PTR_ERR(musb->clock);
2015 musb->clock = NULL;
2016 goto fail1;
2017 }
2018 }
2019 1995
2020 /* The musb_platform_init() call: 1996 /* The musb_platform_init() call:
2021 * - adjusts musb->mregs and musb->isr if needed, 1997 * - adjusts musb->mregs and musb->isr if needed,
2022 * - may initialize an integrated tranceiver 1998 * - may initialize an integrated tranceiver
2023 * - initializes musb->xceiv, usually by otg_get_transceiver() 1999 * - initializes musb->xceiv, usually by otg_get_transceiver()
2024 * - activates clocks.
2025 * - stops powering VBUS 2000 * - stops powering VBUS
2026 * - assigns musb->board_set_vbus if host mode is enabled
2027 * 2001 *
2028 * There are various transciever configurations. Blackfin, 2002 * There are various transciever configurations. Blackfin,
2029 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses 2003 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
@@ -2031,9 +2005,9 @@ bad_config:
2031 * isp1504, non-OTG, etc) mostly hooking up through ULPI. 2005 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2032 */ 2006 */
2033 musb->isr = generic_interrupt; 2007 musb->isr = generic_interrupt;
2034 status = musb_platform_init(musb, plat->board_data); 2008 status = musb_platform_init(musb);
2035 if (status < 0) 2009 if (status < 0)
2036 goto fail2; 2010 goto fail1;
2037 2011
2038 if (!musb->isr) { 2012 if (!musb->isr) {
2039 status = -ENODEV; 2013 status = -ENODEV;
@@ -2186,10 +2160,6 @@ fail3:
2186 device_init_wakeup(dev, 0); 2160 device_init_wakeup(dev, 0);
2187 musb_platform_exit(musb); 2161 musb_platform_exit(musb);
2188 2162
2189fail2:
2190 if (musb->clock)
2191 clk_put(musb->clock);
2192
2193fail1: 2163fail1:
2194 dev_err(musb->controller, 2164 dev_err(musb->controller,
2195 "musb_init_controller failed with status %d\n", status); 2165 "musb_init_controller failed with status %d\n", status);
@@ -2215,7 +2185,7 @@ static u64 *orig_dma_mask;
2215static int __init musb_probe(struct platform_device *pdev) 2185static int __init musb_probe(struct platform_device *pdev)
2216{ 2186{
2217 struct device *dev = &pdev->dev; 2187 struct device *dev = &pdev->dev;
2218 int irq = platform_get_irq(pdev, 0); 2188 int irq = platform_get_irq_byname(pdev, "mc");
2219 int status; 2189 int status;
2220 struct resource *iomem; 2190 struct resource *iomem;
2221 void __iomem *base; 2191 void __iomem *base;
@@ -2265,144 +2235,138 @@ static int __exit musb_remove(struct platform_device *pdev)
2265 2235
2266#ifdef CONFIG_PM 2236#ifdef CONFIG_PM
2267 2237
2268static struct musb_context_registers musb_context; 2238static void musb_save_context(struct musb *musb)
2269
2270void musb_save_context(struct musb *musb)
2271{ 2239{
2272 int i; 2240 int i;
2273 void __iomem *musb_base = musb->mregs; 2241 void __iomem *musb_base = musb->mregs;
2274 void __iomem *epio; 2242 void __iomem *epio;
2275 2243
2276 if (is_host_enabled(musb)) { 2244 if (is_host_enabled(musb)) {
2277 musb_context.frame = musb_readw(musb_base, MUSB_FRAME); 2245 musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
2278 musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE); 2246 musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2279 musb_context.busctl = musb_read_ulpi_buscontrol(musb->mregs); 2247 musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
2280 } 2248 }
2281 musb_context.power = musb_readb(musb_base, MUSB_POWER); 2249 musb->context.power = musb_readb(musb_base, MUSB_POWER);
2282 musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); 2250 musb->context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
2283 musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE); 2251 musb->context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
2284 musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE); 2252 musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2285 musb_context.index = musb_readb(musb_base, MUSB_INDEX); 2253 musb->context.index = musb_readb(musb_base, MUSB_INDEX);
2286 musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL); 2254 musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2287 2255
2288 for (i = 0; i < musb->config->num_eps; ++i) { 2256 for (i = 0; i < musb->config->num_eps; ++i) {
2289 epio = musb->endpoints[i].regs; 2257 epio = musb->endpoints[i].regs;
2290 musb_context.index_regs[i].txmaxp = 2258 musb->context.index_regs[i].txmaxp =
2291 musb_readw(epio, MUSB_TXMAXP); 2259 musb_readw(epio, MUSB_TXMAXP);
2292 musb_context.index_regs[i].txcsr = 2260 musb->context.index_regs[i].txcsr =
2293 musb_readw(epio, MUSB_TXCSR); 2261 musb_readw(epio, MUSB_TXCSR);
2294 musb_context.index_regs[i].rxmaxp = 2262 musb->context.index_regs[i].rxmaxp =
2295 musb_readw(epio, MUSB_RXMAXP); 2263 musb_readw(epio, MUSB_RXMAXP);
2296 musb_context.index_regs[i].rxcsr = 2264 musb->context.index_regs[i].rxcsr =
2297 musb_readw(epio, MUSB_RXCSR); 2265 musb_readw(epio, MUSB_RXCSR);
2298 2266
2299 if (musb->dyn_fifo) { 2267 if (musb->dyn_fifo) {
2300 musb_context.index_regs[i].txfifoadd = 2268 musb->context.index_regs[i].txfifoadd =
2301 musb_read_txfifoadd(musb_base); 2269 musb_read_txfifoadd(musb_base);
2302 musb_context.index_regs[i].rxfifoadd = 2270 musb->context.index_regs[i].rxfifoadd =
2303 musb_read_rxfifoadd(musb_base); 2271 musb_read_rxfifoadd(musb_base);
2304 musb_context.index_regs[i].txfifosz = 2272 musb->context.index_regs[i].txfifosz =
2305 musb_read_txfifosz(musb_base); 2273 musb_read_txfifosz(musb_base);
2306 musb_context.index_regs[i].rxfifosz = 2274 musb->context.index_regs[i].rxfifosz =
2307 musb_read_rxfifosz(musb_base); 2275 musb_read_rxfifosz(musb_base);
2308 } 2276 }
2309 if (is_host_enabled(musb)) { 2277 if (is_host_enabled(musb)) {
2310 musb_context.index_regs[i].txtype = 2278 musb->context.index_regs[i].txtype =
2311 musb_readb(epio, MUSB_TXTYPE); 2279 musb_readb(epio, MUSB_TXTYPE);
2312 musb_context.index_regs[i].txinterval = 2280 musb->context.index_regs[i].txinterval =
2313 musb_readb(epio, MUSB_TXINTERVAL); 2281 musb_readb(epio, MUSB_TXINTERVAL);
2314 musb_context.index_regs[i].rxtype = 2282 musb->context.index_regs[i].rxtype =
2315 musb_readb(epio, MUSB_RXTYPE); 2283 musb_readb(epio, MUSB_RXTYPE);
2316 musb_context.index_regs[i].rxinterval = 2284 musb->context.index_regs[i].rxinterval =
2317 musb_readb(epio, MUSB_RXINTERVAL); 2285 musb_readb(epio, MUSB_RXINTERVAL);
2318 2286
2319 musb_context.index_regs[i].txfunaddr = 2287 musb->context.index_regs[i].txfunaddr =
2320 musb_read_txfunaddr(musb_base, i); 2288 musb_read_txfunaddr(musb_base, i);
2321 musb_context.index_regs[i].txhubaddr = 2289 musb->context.index_regs[i].txhubaddr =
2322 musb_read_txhubaddr(musb_base, i); 2290 musb_read_txhubaddr(musb_base, i);
2323 musb_context.index_regs[i].txhubport = 2291 musb->context.index_regs[i].txhubport =
2324 musb_read_txhubport(musb_base, i); 2292 musb_read_txhubport(musb_base, i);
2325 2293
2326 musb_context.index_regs[i].rxfunaddr = 2294 musb->context.index_regs[i].rxfunaddr =
2327 musb_read_rxfunaddr(musb_base, i); 2295 musb_read_rxfunaddr(musb_base, i);
2328 musb_context.index_regs[i].rxhubaddr = 2296 musb->context.index_regs[i].rxhubaddr =
2329 musb_read_rxhubaddr(musb_base, i); 2297 musb_read_rxhubaddr(musb_base, i);
2330 musb_context.index_regs[i].rxhubport = 2298 musb->context.index_regs[i].rxhubport =
2331 musb_read_rxhubport(musb_base, i); 2299 musb_read_rxhubport(musb_base, i);
2332 } 2300 }
2333 } 2301 }
2334
2335 musb_platform_save_context(musb, &musb_context);
2336} 2302}
2337 2303
2338void musb_restore_context(struct musb *musb) 2304static void musb_restore_context(struct musb *musb)
2339{ 2305{
2340 int i; 2306 int i;
2341 void __iomem *musb_base = musb->mregs; 2307 void __iomem *musb_base = musb->mregs;
2342 void __iomem *ep_target_regs; 2308 void __iomem *ep_target_regs;
2343 void __iomem *epio; 2309 void __iomem *epio;
2344 2310
2345 musb_platform_restore_context(musb, &musb_context);
2346
2347 if (is_host_enabled(musb)) { 2311 if (is_host_enabled(musb)) {
2348 musb_writew(musb_base, MUSB_FRAME, musb_context.frame); 2312 musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
2349 musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode); 2313 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
2350 musb_write_ulpi_buscontrol(musb->mregs, musb_context.busctl); 2314 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
2351 } 2315 }
2352 musb_writeb(musb_base, MUSB_POWER, musb_context.power); 2316 musb_writeb(musb_base, MUSB_POWER, musb->context.power);
2353 musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe); 2317 musb_writew(musb_base, MUSB_INTRTXE, musb->context.intrtxe);
2354 musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe); 2318 musb_writew(musb_base, MUSB_INTRRXE, musb->context.intrrxe);
2355 musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe); 2319 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2356 musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl); 2320 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2357 2321
2358 for (i = 0; i < musb->config->num_eps; ++i) { 2322 for (i = 0; i < musb->config->num_eps; ++i) {
2359 epio = musb->endpoints[i].regs; 2323 epio = musb->endpoints[i].regs;
2360 musb_writew(epio, MUSB_TXMAXP, 2324 musb_writew(epio, MUSB_TXMAXP,
2361 musb_context.index_regs[i].txmaxp); 2325 musb->context.index_regs[i].txmaxp);
2362 musb_writew(epio, MUSB_TXCSR, 2326 musb_writew(epio, MUSB_TXCSR,
2363 musb_context.index_regs[i].txcsr); 2327 musb->context.index_regs[i].txcsr);
2364 musb_writew(epio, MUSB_RXMAXP, 2328 musb_writew(epio, MUSB_RXMAXP,
2365 musb_context.index_regs[i].rxmaxp); 2329 musb->context.index_regs[i].rxmaxp);
2366 musb_writew(epio, MUSB_RXCSR, 2330 musb_writew(epio, MUSB_RXCSR,
2367 musb_context.index_regs[i].rxcsr); 2331 musb->context.index_regs[i].rxcsr);
2368 2332
2369 if (musb->dyn_fifo) { 2333 if (musb->dyn_fifo) {
2370 musb_write_txfifosz(musb_base, 2334 musb_write_txfifosz(musb_base,
2371 musb_context.index_regs[i].txfifosz); 2335 musb->context.index_regs[i].txfifosz);
2372 musb_write_rxfifosz(musb_base, 2336 musb_write_rxfifosz(musb_base,
2373 musb_context.index_regs[i].rxfifosz); 2337 musb->context.index_regs[i].rxfifosz);
2374 musb_write_txfifoadd(musb_base, 2338 musb_write_txfifoadd(musb_base,
2375 musb_context.index_regs[i].txfifoadd); 2339 musb->context.index_regs[i].txfifoadd);
2376 musb_write_rxfifoadd(musb_base, 2340 musb_write_rxfifoadd(musb_base,
2377 musb_context.index_regs[i].rxfifoadd); 2341 musb->context.index_regs[i].rxfifoadd);
2378 } 2342 }
2379 2343
2380 if (is_host_enabled(musb)) { 2344 if (is_host_enabled(musb)) {
2381 musb_writeb(epio, MUSB_TXTYPE, 2345 musb_writeb(epio, MUSB_TXTYPE,
2382 musb_context.index_regs[i].txtype); 2346 musb->context.index_regs[i].txtype);
2383 musb_writeb(epio, MUSB_TXINTERVAL, 2347 musb_writeb(epio, MUSB_TXINTERVAL,
2384 musb_context.index_regs[i].txinterval); 2348 musb->context.index_regs[i].txinterval);
2385 musb_writeb(epio, MUSB_RXTYPE, 2349 musb_writeb(epio, MUSB_RXTYPE,
2386 musb_context.index_regs[i].rxtype); 2350 musb->context.index_regs[i].rxtype);
2387 musb_writeb(epio, MUSB_RXINTERVAL, 2351 musb_writeb(epio, MUSB_RXINTERVAL,
2388 2352
2389 musb_context.index_regs[i].rxinterval); 2353 musb->context.index_regs[i].rxinterval);
2390 musb_write_txfunaddr(musb_base, i, 2354 musb_write_txfunaddr(musb_base, i,
2391 musb_context.index_regs[i].txfunaddr); 2355 musb->context.index_regs[i].txfunaddr);
2392 musb_write_txhubaddr(musb_base, i, 2356 musb_write_txhubaddr(musb_base, i,
2393 musb_context.index_regs[i].txhubaddr); 2357 musb->context.index_regs[i].txhubaddr);
2394 musb_write_txhubport(musb_base, i, 2358 musb_write_txhubport(musb_base, i,
2395 musb_context.index_regs[i].txhubport); 2359 musb->context.index_regs[i].txhubport);
2396 2360
2397 ep_target_regs = 2361 ep_target_regs =
2398 musb_read_target_reg_base(i, musb_base); 2362 musb_read_target_reg_base(i, musb_base);
2399 2363
2400 musb_write_rxfunaddr(ep_target_regs, 2364 musb_write_rxfunaddr(ep_target_regs,
2401 musb_context.index_regs[i].rxfunaddr); 2365 musb->context.index_regs[i].rxfunaddr);
2402 musb_write_rxhubaddr(ep_target_regs, 2366 musb_write_rxhubaddr(ep_target_regs,
2403 musb_context.index_regs[i].rxhubaddr); 2367 musb->context.index_regs[i].rxhubaddr);
2404 musb_write_rxhubport(ep_target_regs, 2368 musb_write_rxhubport(ep_target_regs,
2405 musb_context.index_regs[i].rxhubport); 2369 musb->context.index_regs[i].rxhubport);
2406 } 2370 }
2407 } 2371 }
2408} 2372}
@@ -2427,12 +2391,6 @@ static int musb_suspend(struct device *dev)
2427 2391
2428 musb_save_context(musb); 2392 musb_save_context(musb);
2429 2393
2430 if (musb->clock) {
2431 if (musb->set_clock)
2432 musb->set_clock(musb->clock, 0);
2433 else
2434 clk_disable(musb->clock);
2435 }
2436 spin_unlock_irqrestore(&musb->lock, flags); 2394 spin_unlock_irqrestore(&musb->lock, flags);
2437 return 0; 2395 return 0;
2438} 2396}
@@ -2442,13 +2400,6 @@ static int musb_resume_noirq(struct device *dev)
2442 struct platform_device *pdev = to_platform_device(dev); 2400 struct platform_device *pdev = to_platform_device(dev);
2443 struct musb *musb = dev_to_musb(&pdev->dev); 2401 struct musb *musb = dev_to_musb(&pdev->dev);
2444 2402
2445 if (musb->clock) {
2446 if (musb->set_clock)
2447 musb->set_clock(musb->clock, 1);
2448 else
2449 clk_enable(musb->clock);
2450 }
2451
2452 musb_restore_context(musb); 2403 musb_restore_context(musb);
2453 2404
2454 /* for static cmos like DaVinci, register values were preserved 2405 /* for static cmos like DaVinci, register values were preserved
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index febaabcc2b35..d0c236f8e191 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -222,7 +222,7 @@ enum musb_g_ep0_state {
222#endif 222#endif
223 223
224/* TUSB mapping: "flat" plus ep0 special cases */ 224/* TUSB mapping: "flat" plus ep0 special cases */
225#if defined(CONFIG_USB_TUSB6010) 225#if defined(CONFIG_USB_MUSB_TUSB6010)
226#define musb_ep_select(_mbase, _epnum) \ 226#define musb_ep_select(_mbase, _epnum) \
227 musb_writeb((_mbase), MUSB_INDEX, (_epnum)) 227 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
228#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET 228#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
@@ -253,6 +253,29 @@ enum musb_g_ep0_state {
253 253
254/******************************** TYPES *************************************/ 254/******************************** TYPES *************************************/
255 255
256/**
257 * struct musb_platform_ops - Operations passed to musb_core by HW glue layer
258 * @init: turns on clocks, sets up platform-specific registers, etc
259 * @exit: undoes @init
260 * @set_mode: forcefully changes operating mode
261 * @try_ilde: tries to idle the IP
262 * @vbus_status: returns vbus status if possible
263 * @set_vbus: forces vbus status
264 */
265struct musb_platform_ops {
266 int (*init)(struct musb *musb);
267 int (*exit)(struct musb *musb);
268
269 void (*enable)(struct musb *musb);
270 void (*disable)(struct musb *musb);
271
272 int (*set_mode)(struct musb *musb, u8 mode);
273 void (*try_idle)(struct musb *musb, unsigned long timeout);
274
275 int (*vbus_status)(struct musb *musb);
276 void (*set_vbus)(struct musb *musb, int on);
277};
278
256/* 279/*
257 * struct musb_hw_ep - endpoint hardware (bidirectional) 280 * struct musb_hw_ep - endpoint hardware (bidirectional)
258 * 281 *
@@ -263,7 +286,7 @@ struct musb_hw_ep {
263 void __iomem *fifo; 286 void __iomem *fifo;
264 void __iomem *regs; 287 void __iomem *regs;
265 288
266#ifdef CONFIG_USB_TUSB6010 289#ifdef CONFIG_USB_MUSB_TUSB6010
267 void __iomem *conf; 290 void __iomem *conf;
268#endif 291#endif
269 292
@@ -280,7 +303,7 @@ struct musb_hw_ep {
280 struct dma_channel *tx_channel; 303 struct dma_channel *tx_channel;
281 struct dma_channel *rx_channel; 304 struct dma_channel *rx_channel;
282 305
283#ifdef CONFIG_USB_TUSB6010 306#ifdef CONFIG_USB_MUSB_TUSB6010
284 /* TUSB has "asynchronous" and "synchronous" dma modes */ 307 /* TUSB has "asynchronous" and "synchronous" dma modes */
285 dma_addr_t fifo_async; 308 dma_addr_t fifo_async;
286 dma_addr_t fifo_sync; 309 dma_addr_t fifo_sync;
@@ -323,14 +346,43 @@ static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
323#endif 346#endif
324} 347}
325 348
349struct musb_csr_regs {
350 /* FIFO registers */
351 u16 txmaxp, txcsr, rxmaxp, rxcsr;
352 u16 rxfifoadd, txfifoadd;
353 u8 txtype, txinterval, rxtype, rxinterval;
354 u8 rxfifosz, txfifosz;
355 u8 txfunaddr, txhubaddr, txhubport;
356 u8 rxfunaddr, rxhubaddr, rxhubport;
357};
358
359struct musb_context_registers {
360
361#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
362 defined(CONFIG_ARCH_OMAP4)
363 u32 otg_sysconfig, otg_forcestandby;
364#endif
365 u8 power;
366 u16 intrtxe, intrrxe;
367 u8 intrusbe;
368 u16 frame;
369 u8 index, testmode;
370
371 u8 devctl, busctl, misc;
372
373 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
374};
375
326/* 376/*
327 * struct musb - Driver instance data. 377 * struct musb - Driver instance data.
328 */ 378 */
329struct musb { 379struct musb {
330 /* device lock */ 380 /* device lock */
331 spinlock_t lock; 381 spinlock_t lock;
332 struct clk *clock; 382
333 struct clk *phy_clock; 383 const struct musb_platform_ops *ops;
384 struct musb_context_registers context;
385
334 irqreturn_t (*isr)(int, void *); 386 irqreturn_t (*isr)(int, void *);
335 struct work_struct irq_work; 387 struct work_struct irq_work;
336 u16 hwvers; 388 u16 hwvers;
@@ -359,11 +411,7 @@ struct musb {
359 411
360 struct timer_list otg_timer; 412 struct timer_list otg_timer;
361#endif 413#endif
362 414 struct notifier_block nb;
363 /* called with IRQs blocked; ON/nonzero implies starting a session,
364 * and waiting at least a_wait_vrise_tmout.
365 */
366 void (*board_set_vbus)(struct musb *, int is_on);
367 415
368 struct dma_controller *dma_controller; 416 struct dma_controller *dma_controller;
369 417
@@ -371,7 +419,7 @@ struct musb {
371 void __iomem *ctrl_base; 419 void __iomem *ctrl_base;
372 void __iomem *mregs; 420 void __iomem *mregs;
373 421
374#ifdef CONFIG_USB_TUSB6010 422#ifdef CONFIG_USB_MUSB_TUSB6010
375 dma_addr_t async; 423 dma_addr_t async;
376 dma_addr_t sync; 424 dma_addr_t sync;
377 void __iomem *sync_va; 425 void __iomem *sync_va;
@@ -398,8 +446,6 @@ struct musb {
398 u8 board_mode; /* enum musb_mode */ 446 u8 board_mode; /* enum musb_mode */
399 int (*board_set_power)(int state); 447 int (*board_set_power)(int state);
400 448
401 int (*set_clock)(struct clk *clk, int is_active);
402
403 u8 min_power; /* vbus for periph, in mA/2 */ 449 u8 min_power; /* vbus for periph, in mA/2 */
404 450
405 bool is_host; 451 bool is_host;
@@ -458,52 +504,6 @@ struct musb {
458#endif 504#endif
459}; 505};
460 506
461#ifdef CONFIG_PM
462struct musb_csr_regs {
463 /* FIFO registers */
464 u16 txmaxp, txcsr, rxmaxp, rxcsr;
465 u16 rxfifoadd, txfifoadd;
466 u8 txtype, txinterval, rxtype, rxinterval;
467 u8 rxfifosz, txfifosz;
468 u8 txfunaddr, txhubaddr, txhubport;
469 u8 rxfunaddr, rxhubaddr, rxhubport;
470};
471
472struct musb_context_registers {
473
474#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
475 defined(CONFIG_ARCH_OMAP4)
476 u32 otg_sysconfig, otg_forcestandby;
477#endif
478 u8 power;
479 u16 intrtxe, intrrxe;
480 u8 intrusbe;
481 u16 frame;
482 u8 index, testmode;
483
484 u8 devctl, busctl, misc;
485
486 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
487};
488
489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
490 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN)
491extern void musb_platform_save_context(struct musb *musb,
492 struct musb_context_registers *musb_context);
493extern void musb_platform_restore_context(struct musb *musb,
494 struct musb_context_registers *musb_context);
495#else
496#define musb_platform_save_context(m, x) do {} while (0)
497#define musb_platform_restore_context(m, x) do {} while (0)
498#endif
499
500#endif
501
502static inline void musb_set_vbus(struct musb *musb, int is_on)
503{
504 musb->board_set_vbus(musb, is_on);
505}
506
507#ifdef CONFIG_USB_GADGET_MUSB_HDRC 507#ifdef CONFIG_USB_GADGET_MUSB_HDRC
508static inline struct musb *gadget_to_musb(struct usb_gadget *g) 508static inline struct musb *gadget_to_musb(struct usb_gadget *g)
509{ 509{
@@ -592,29 +592,63 @@ extern void musb_load_testpacket(struct musb *);
592 592
593extern irqreturn_t musb_interrupt(struct musb *); 593extern irqreturn_t musb_interrupt(struct musb *);
594 594
595extern void musb_platform_enable(struct musb *musb);
596extern void musb_platform_disable(struct musb *musb);
597
598extern void musb_hnp_stop(struct musb *musb); 595extern void musb_hnp_stop(struct musb *musb);
599 596
600extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode); 597static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
598{
599 if (musb->ops->set_vbus)
600 musb->ops->set_vbus(musb, is_on);
601}
601 602
602#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \ 603static inline void musb_platform_enable(struct musb *musb)
603 defined(CONFIG_ARCH_DAVINCI_DA8XX) || \ 604{
604 defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 605 if (musb->ops->enable)
605 defined(CONFIG_ARCH_OMAP4) 606 musb->ops->enable(musb);
606extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout); 607}
607#else
608#define musb_platform_try_idle(x, y) do {} while (0)
609#endif
610 608
611#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) 609static inline void musb_platform_disable(struct musb *musb)
612extern int musb_platform_get_vbus_status(struct musb *musb); 610{
613#else 611 if (musb->ops->disable)
614#define musb_platform_get_vbus_status(x) 0 612 musb->ops->disable(musb);
615#endif 613}
614
615static inline int musb_platform_set_mode(struct musb *musb, u8 mode)
616{
617 if (!musb->ops->set_mode)
618 return 0;
619
620 return musb->ops->set_mode(musb, mode);
621}
622
623static inline void musb_platform_try_idle(struct musb *musb,
624 unsigned long timeout)
625{
626 if (musb->ops->try_idle)
627 musb->ops->try_idle(musb, timeout);
628}
629
630static inline int musb_platform_get_vbus_status(struct musb *musb)
631{
632 if (!musb->ops->vbus_status)
633 return 0;
616 634
617extern int __init musb_platform_init(struct musb *musb, void *board_data); 635 return musb->ops->vbus_status(musb);
618extern int musb_platform_exit(struct musb *musb); 636}
637
638static inline int musb_platform_init(struct musb *musb)
639{
640 if (!musb->ops->init)
641 return -EINVAL;
642
643 return musb->ops->init(musb);
644}
645
646static inline int musb_platform_exit(struct musb *musb)
647{
648 if (!musb->ops->exit)
649 return -EINVAL;
650
651 return musb->ops->exit(musb);
652}
619 653
620#endif /* __MUSB_CORE_H__ */ 654#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9d6ade82b9f2..9b162dfaa4fb 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1136,13 +1136,16 @@ struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1136 struct musb_request *request = NULL; 1136 struct musb_request *request = NULL;
1137 1137
1138 request = kzalloc(sizeof *request, gfp_flags); 1138 request = kzalloc(sizeof *request, gfp_flags);
1139 if (request) { 1139 if (!request) {
1140 INIT_LIST_HEAD(&request->request.list); 1140 DBG(4, "not enough memory\n");
1141 request->request.dma = DMA_ADDR_INVALID; 1141 return NULL;
1142 request->epnum = musb_ep->current_epnum;
1143 request->ep = musb_ep;
1144 } 1142 }
1145 1143
1144 INIT_LIST_HEAD(&request->request.list);
1145 request->request.dma = DMA_ADDR_INVALID;
1146 request->epnum = musb_ep->current_epnum;
1147 request->ep = musb_ep;
1148
1146 return &request->request; 1149 return &request->request;
1147} 1150}
1148 1151
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index b06e9ef00cfc..03c6ccdbb3be 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -74,7 +74,7 @@ static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
74 { __raw_writel(data, addr + offset); } 74 { __raw_writel(data, addr + offset); }
75 75
76 76
77#ifdef CONFIG_USB_TUSB6010 77#ifdef CONFIG_USB_MUSB_TUSB6010
78 78
79/* 79/*
80 * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum. 80 * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
@@ -114,7 +114,7 @@ static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
114static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) 114static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
115 { __raw_writeb(data, addr + offset); } 115 { __raw_writeb(data, addr + offset); }
116 116
117#endif /* CONFIG_USB_TUSB6010 */ 117#endif /* CONFIG_USB_MUSB_TUSB6010 */
118 118
119#else 119#else
120 120
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 5a727c5b8676..82410703dcd3 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -234,7 +234,7 @@
234#define MUSB_TESTMODE 0x0F /* 8 bit */ 234#define MUSB_TESTMODE 0x0F /* 8 bit */
235 235
236/* Get offset for a given FIFO from musb->mregs */ 236/* Get offset for a given FIFO from musb->mregs */
237#ifdef CONFIG_USB_TUSB6010 237#ifdef CONFIG_USB_MUSB_TUSB6010
238#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) 238#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
239#else 239#else
240#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) 240#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
@@ -295,7 +295,7 @@
295#define MUSB_FLAT_OFFSET(_epnum, _offset) \ 295#define MUSB_FLAT_OFFSET(_epnum, _offset) \
296 (0x100 + (0x10*(_epnum)) + (_offset)) 296 (0x100 + (0x10*(_epnum)) + (_offset))
297 297
298#ifdef CONFIG_USB_TUSB6010 298#ifdef CONFIG_USB_MUSB_TUSB6010
299/* TUSB6010 EP0 configuration register is special */ 299/* TUSB6010 EP0 configuration register is special */
300#define MUSB_TUSB_OFFSET(_epnum, _offset) \ 300#define MUSB_TUSB_OFFSET(_epnum, _offset) \
301 (0x10 + _offset) 301 (0x10 + _offset)
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 43233c397b6e..b46d1877e28e 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -276,7 +276,7 @@ int musb_hub_control(
276 break; 276 break;
277 case USB_PORT_FEAT_POWER: 277 case USB_PORT_FEAT_POWER:
278 if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) 278 if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
279 musb_set_vbus(musb, 0); 279 musb_platform_set_vbus(musb, 0);
280 break; 280 break;
281 case USB_PORT_FEAT_C_CONNECTION: 281 case USB_PORT_FEAT_C_CONNECTION:
282 case USB_PORT_FEAT_C_ENABLE: 282 case USB_PORT_FEAT_C_ENABLE:
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 563114d613d6..0144a2d481fd 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -377,7 +377,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
377 struct musb_dma_controller *controller; 377 struct musb_dma_controller *controller;
378 struct device *dev = musb->controller; 378 struct device *dev = musb->controller;
379 struct platform_device *pdev = to_platform_device(dev); 379 struct platform_device *pdev = to_platform_device(dev);
380 int irq = platform_get_irq(pdev, 1); 380 int irq = platform_get_irq_byname(pdev, "dma");
381 381
382 if (irq == 0) { 382 if (irq == 0) {
383 dev_err(dev, "No DMA interrupt line!\n"); 383 dev_err(dev, "No DMA interrupt line!\n");
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index ed618bde1eec..a3f12333fc41 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -31,10 +31,18 @@
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/platform_device.h>
35#include <linux/dma-mapping.h>
34 36
35#include "musb_core.h" 37#include "musb_core.h"
36#include "omap2430.h" 38#include "omap2430.h"
37 39
40struct omap2430_glue {
41 struct device *dev;
42 struct platform_device *musb;
43 struct clk *clk;
44};
45#define glue_to_musb(g) platform_get_drvdata(g->musb)
38 46
39static struct timer_list musb_idle_timer; 47static struct timer_list musb_idle_timer;
40 48
@@ -49,12 +57,8 @@ static void musb_do_idle(unsigned long _musb)
49 57
50 spin_lock_irqsave(&musb->lock, flags); 58 spin_lock_irqsave(&musb->lock, flags);
51 59
52 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
53
54 switch (musb->xceiv->state) { 60 switch (musb->xceiv->state) {
55 case OTG_STATE_A_WAIT_BCON: 61 case OTG_STATE_A_WAIT_BCON:
56 devctl &= ~MUSB_DEVCTL_SESSION;
57 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
58 62
59 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 63 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
60 if (devctl & MUSB_DEVCTL_BDEVICE) { 64 if (devctl & MUSB_DEVCTL_BDEVICE) {
@@ -98,7 +102,7 @@ static void musb_do_idle(unsigned long _musb)
98} 102}
99 103
100 104
101void musb_platform_try_idle(struct musb *musb, unsigned long timeout) 105static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
102{ 106{
103 unsigned long default_timeout = jiffies + msecs_to_jiffies(3); 107 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
104 static unsigned long last_timer; 108 static unsigned long last_timer;
@@ -131,15 +135,11 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
131 mod_timer(&musb_idle_timer, timeout); 135 mod_timer(&musb_idle_timer, timeout);
132} 136}
133 137
134void musb_platform_enable(struct musb *musb) 138static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
135{
136}
137void musb_platform_disable(struct musb *musb)
138{
139}
140static void omap_set_vbus(struct musb *musb, int is_on)
141{ 139{
142 u8 devctl; 140 u8 devctl;
141 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
142 int ret = 1;
143 /* HDRC controls CPEN, but beware current surges during device 143 /* HDRC controls CPEN, but beware current surges during device
144 * connect. They can trigger transient overcurrent conditions 144 * connect. They can trigger transient overcurrent conditions
145 * that must be ignored. 145 * that must be ignored.
@@ -148,12 +148,35 @@ static void omap_set_vbus(struct musb *musb, int is_on)
148 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 148 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
149 149
150 if (is_on) { 150 if (is_on) {
151 musb->is_active = 1; 151 if (musb->xceiv->state == OTG_STATE_A_IDLE) {
152 musb->xceiv->default_a = 1; 152 /* start the session */
153 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 153 devctl |= MUSB_DEVCTL_SESSION;
154 devctl |= MUSB_DEVCTL_SESSION; 154 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
155 155 /*
156 MUSB_HST_MODE(musb); 156 * Wait for the musb to set as A device to enable the
157 * VBUS
158 */
159 while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
160
161 cpu_relax();
162
163 if (time_after(jiffies, timeout)) {
164 dev_err(musb->controller,
165 "configured as A device timeout");
166 ret = -EINVAL;
167 break;
168 }
169 }
170
171 if (ret && musb->xceiv->set_vbus)
172 otg_set_vbus(musb->xceiv, 1);
173 } else {
174 musb->is_active = 1;
175 musb->xceiv->default_a = 1;
176 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
177 devctl |= MUSB_DEVCTL_SESSION;
178 MUSB_HST_MODE(musb);
179 }
157 } else { 180 } else {
158 musb->is_active = 0; 181 musb->is_active = 0;
159 182
@@ -175,9 +198,7 @@ static void omap_set_vbus(struct musb *musb, int is_on)
175 musb_readb(musb->mregs, MUSB_DEVCTL)); 198 musb_readb(musb->mregs, MUSB_DEVCTL));
176} 199}
177 200
178static int musb_platform_resume(struct musb *musb); 201static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
179
180int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
181{ 202{
182 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 203 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
183 204
@@ -187,10 +208,94 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
187 return 0; 208 return 0;
188} 209}
189 210
190int __init musb_platform_init(struct musb *musb, void *board_data) 211static inline void omap2430_low_level_exit(struct musb *musb)
191{ 212{
192 u32 l; 213 u32 l;
193 struct omap_musb_board_data *data = board_data; 214
215 /* in any role */
216 l = musb_readl(musb->mregs, OTG_FORCESTDBY);
217 l |= ENABLEFORCE; /* enable MSTANDBY */
218 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
219
220 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
221 l |= ENABLEWAKEUP; /* enable wakeup */
222 musb_writel(musb->mregs, OTG_SYSCONFIG, l);
223}
224
225static inline void omap2430_low_level_init(struct musb *musb)
226{
227 u32 l;
228
229 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
230 l &= ~ENABLEWAKEUP; /* disable wakeup */
231 musb_writel(musb->mregs, OTG_SYSCONFIG, l);
232
233 l = musb_readl(musb->mregs, OTG_FORCESTDBY);
234 l &= ~ENABLEFORCE; /* disable MSTANDBY */
235 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
236}
237
238/* blocking notifier support */
239static int musb_otg_notifications(struct notifier_block *nb,
240 unsigned long event, void *unused)
241{
242 struct musb *musb = container_of(nb, struct musb, nb);
243 struct device *dev = musb->controller;
244 struct musb_hdrc_platform_data *pdata = dev->platform_data;
245 struct omap_musb_board_data *data = pdata->board_data;
246
247 switch (event) {
248 case USB_EVENT_ID:
249 DBG(4, "ID GND\n");
250
251 if (is_otg_enabled(musb)) {
252#ifdef CONFIG_USB_GADGET_MUSB_HDRC
253 if (musb->gadget_driver) {
254 otg_init(musb->xceiv);
255
256 if (data->interface_type ==
257 MUSB_INTERFACE_UTMI)
258 omap2430_musb_set_vbus(musb, 1);
259
260 }
261#endif
262 } else {
263 otg_init(musb->xceiv);
264 if (data->interface_type ==
265 MUSB_INTERFACE_UTMI)
266 omap2430_musb_set_vbus(musb, 1);
267 }
268 break;
269
270 case USB_EVENT_VBUS:
271 DBG(4, "VBUS Connect\n");
272
273 otg_init(musb->xceiv);
274 break;
275
276 case USB_EVENT_NONE:
277 DBG(4, "VBUS Disconnect\n");
278
279 if (data->interface_type == MUSB_INTERFACE_UTMI) {
280 if (musb->xceiv->set_vbus)
281 otg_set_vbus(musb->xceiv, 0);
282 }
283 otg_shutdown(musb->xceiv);
284 break;
285 default:
286 DBG(4, "ID float\n");
287 return NOTIFY_DONE;
288 }
289
290 return NOTIFY_OK;
291}
292
293static int omap2430_musb_init(struct musb *musb)
294{
295 u32 l, status = 0;
296 struct device *dev = musb->controller;
297 struct musb_hdrc_platform_data *plat = dev->platform_data;
298 struct omap_musb_board_data *data = plat->board_data;
194 299
195 /* We require some kind of external transceiver, hooked 300 /* We require some kind of external transceiver, hooked
196 * up through ULPI. TWL4030-family PMICs include one, 301 * up through ULPI. TWL4030-family PMICs include one,
@@ -202,7 +307,7 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
202 return -ENODEV; 307 return -ENODEV;
203 } 308 }
204 309
205 musb_platform_resume(musb); 310 omap2430_low_level_init(musb);
206 311
207 l = musb_readl(musb->mregs, OTG_SYSCONFIG); 312 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
208 l &= ~ENABLEWAKEUP; /* disable wakeup */ 313 l &= ~ENABLEWAKEUP; /* disable wakeup */
@@ -239,87 +344,214 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
239 musb_readl(musb->mregs, OTG_INTERFSEL), 344 musb_readl(musb->mregs, OTG_INTERFSEL),
240 musb_readl(musb->mregs, OTG_SIMENABLE)); 345 musb_readl(musb->mregs, OTG_SIMENABLE));
241 346
242 if (is_host_enabled(musb)) 347 musb->nb.notifier_call = musb_otg_notifications;
243 musb->board_set_vbus = omap_set_vbus; 348 status = otg_register_notifier(musb->xceiv, &musb->nb);
349
350 if (status)
351 DBG(1, "notification register failed\n");
352
353 /* check whether cable is already connected */
354 if (musb->xceiv->state ==OTG_STATE_B_IDLE)
355 musb_otg_notifications(&musb->nb, 1,
356 musb->xceiv->gadget);
244 357
245 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); 358 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
246 359
247 return 0; 360 return 0;
248} 361}
249 362
250#ifdef CONFIG_PM 363static int omap2430_musb_exit(struct musb *musb)
251void musb_platform_save_context(struct musb *musb,
252 struct musb_context_registers *musb_context)
253{ 364{
254 musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
255 musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
256}
257 365
258void musb_platform_restore_context(struct musb *musb, 366 omap2430_low_level_exit(musb);
259 struct musb_context_registers *musb_context) 367 otg_put_transceiver(musb->xceiv);
260{ 368
261 musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig); 369 return 0;
262 musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby);
263} 370}
264#endif
265 371
266static int musb_platform_suspend(struct musb *musb) 372static const struct musb_platform_ops omap2430_ops = {
373 .init = omap2430_musb_init,
374 .exit = omap2430_musb_exit,
375
376 .set_mode = omap2430_musb_set_mode,
377 .try_idle = omap2430_musb_try_idle,
378
379 .set_vbus = omap2430_musb_set_vbus,
380};
381
382static u64 omap2430_dmamask = DMA_BIT_MASK(32);
383
384static int __init omap2430_probe(struct platform_device *pdev)
267{ 385{
268 u32 l; 386 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
387 struct platform_device *musb;
388 struct omap2430_glue *glue;
389 struct clk *clk;
269 390
270 if (!musb->clock) 391 int ret = -ENOMEM;
271 return 0;
272 392
273 /* in any role */ 393 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
274 l = musb_readl(musb->mregs, OTG_FORCESTDBY); 394 if (!glue) {
275 l |= ENABLEFORCE; /* enable MSTANDBY */ 395 dev_err(&pdev->dev, "failed to allocate glue context\n");
276 musb_writel(musb->mregs, OTG_FORCESTDBY, l); 396 goto err0;
397 }
277 398
278 l = musb_readl(musb->mregs, OTG_SYSCONFIG); 399 musb = platform_device_alloc("musb-hdrc", -1);
279 l |= ENABLEWAKEUP; /* enable wakeup */ 400 if (!musb) {
280 musb_writel(musb->mregs, OTG_SYSCONFIG, l); 401 dev_err(&pdev->dev, "failed to allocate musb device\n");
402 goto err1;
403 }
281 404
282 otg_set_suspend(musb->xceiv, 1); 405 clk = clk_get(&pdev->dev, "ick");
406 if (IS_ERR(clk)) {
407 dev_err(&pdev->dev, "failed to get clock\n");
408 ret = PTR_ERR(clk);
409 goto err2;
410 }
283 411
284 if (musb->set_clock) 412 ret = clk_enable(clk);
285 musb->set_clock(musb->clock, 0); 413 if (ret) {
286 else 414 dev_err(&pdev->dev, "failed to enable clock\n");
287 clk_disable(musb->clock); 415 goto err3;
416 }
417
418 musb->dev.parent = &pdev->dev;
419 musb->dev.dma_mask = &omap2430_dmamask;
420 musb->dev.coherent_dma_mask = omap2430_dmamask;
421
422 glue->dev = &pdev->dev;
423 glue->musb = musb;
424 glue->clk = clk;
425
426 pdata->platform_ops = &omap2430_ops;
427
428 platform_set_drvdata(pdev, glue);
429
430 ret = platform_device_add_resources(musb, pdev->resource,
431 pdev->num_resources);
432 if (ret) {
433 dev_err(&pdev->dev, "failed to add resources\n");
434 goto err4;
435 }
436
437 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
438 if (ret) {
439 dev_err(&pdev->dev, "failed to add platform_data\n");
440 goto err4;
441 }
442
443 ret = platform_device_add(musb);
444 if (ret) {
445 dev_err(&pdev->dev, "failed to register musb device\n");
446 goto err4;
447 }
288 448
289 return 0; 449 return 0;
450
451err4:
452 clk_disable(clk);
453
454err3:
455 clk_put(clk);
456
457err2:
458 platform_device_put(musb);
459
460err1:
461 kfree(glue);
462
463err0:
464 return ret;
290} 465}
291 466
292static int musb_platform_resume(struct musb *musb) 467static int __exit omap2430_remove(struct platform_device *pdev)
293{ 468{
294 u32 l; 469 struct omap2430_glue *glue = platform_get_drvdata(pdev);
295 470
296 if (!musb->clock) 471 platform_device_del(glue->musb);
297 return 0; 472 platform_device_put(glue->musb);
473 clk_disable(glue->clk);
474 clk_put(glue->clk);
475 kfree(glue);
298 476
299 otg_set_suspend(musb->xceiv, 0); 477 return 0;
478}
300 479
301 if (musb->set_clock) 480#ifdef CONFIG_PM
302 musb->set_clock(musb->clock, 1); 481static void omap2430_save_context(struct musb *musb)
303 else 482{
304 clk_enable(musb->clock); 483 musb->context.otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
484 musb->context.otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
485}
305 486
306 l = musb_readl(musb->mregs, OTG_SYSCONFIG); 487static void omap2430_restore_context(struct musb *musb)
307 l &= ~ENABLEWAKEUP; /* disable wakeup */ 488{
308 musb_writel(musb->mregs, OTG_SYSCONFIG, l); 489 musb_writel(musb->mregs, OTG_SYSCONFIG, musb->context.otg_sysconfig);
490 musb_writel(musb->mregs, OTG_FORCESTDBY, musb->context.otg_forcestandby);
491}
309 492
310 l = musb_readl(musb->mregs, OTG_FORCESTDBY); 493static int omap2430_suspend(struct device *dev)
311 l &= ~ENABLEFORCE; /* disable MSTANDBY */ 494{
312 musb_writel(musb->mregs, OTG_FORCESTDBY, l); 495 struct omap2430_glue *glue = dev_get_drvdata(dev);
496 struct musb *musb = glue_to_musb(glue);
497
498 omap2430_low_level_exit(musb);
499 otg_set_suspend(musb->xceiv, 1);
500 omap2430_save_context(musb);
501 clk_disable(glue->clk);
313 502
314 return 0; 503 return 0;
315} 504}
316 505
317 506static int omap2430_resume(struct device *dev)
318int musb_platform_exit(struct musb *musb)
319{ 507{
508 struct omap2430_glue *glue = dev_get_drvdata(dev);
509 struct musb *musb = glue_to_musb(glue);
510 int ret;
511
512 ret = clk_enable(glue->clk);
513 if (ret) {
514 dev_err(dev, "faled to enable clock\n");
515 return ret;
516 }
320 517
321 musb_platform_suspend(musb); 518 omap2430_low_level_init(musb);
519 omap2430_restore_context(musb);
520 otg_set_suspend(musb->xceiv, 0);
322 521
323 otg_put_transceiver(musb->xceiv);
324 return 0; 522 return 0;
325} 523}
524
525static struct dev_pm_ops omap2430_pm_ops = {
526 .suspend = omap2430_suspend,
527 .resume = omap2430_resume,
528};
529
530#define DEV_PM_OPS (&omap2430_pm_ops)
531#else
532#define DEV_PM_OPS NULL
533#endif
534
535static struct platform_driver omap2430_driver = {
536 .remove = __exit_p(omap2430_remove),
537 .driver = {
538 .name = "musb-omap2430",
539 .pm = DEV_PM_OPS,
540 },
541};
542
543MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
544MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
545MODULE_LICENSE("GPL v2");
546
547static int __init omap2430_init(void)
548{
549 return platform_driver_probe(&omap2430_driver, omap2430_probe);
550}
551subsys_initcall(omap2430_init);
552
553static void __exit omap2430_exit(void)
554{
555 platform_driver_unregister(&omap2430_driver);
556}
557module_exit(omap2430_exit);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index bde40efc7046..2ba3b070ed0b 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -21,10 +21,16 @@
21#include <linux/usb.h> 21#include <linux/usb.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/dma-mapping.h>
24 25
25#include "musb_core.h" 26#include "musb_core.h"
26 27
27static void tusb_source_power(struct musb *musb, int is_on); 28struct tusb6010_glue {
29 struct device *dev;
30 struct platform_device *musb;
31};
32
33static void tusb_musb_set_vbus(struct musb *musb, int is_on);
28 34
29#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) 35#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
30#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) 36#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
@@ -50,7 +56,7 @@ u8 tusb_get_revision(struct musb *musb)
50 return rev; 56 return rev;
51} 57}
52 58
53static int __init tusb_print_revision(struct musb *musb) 59static int tusb_print_revision(struct musb *musb)
54{ 60{
55 void __iomem *tbase = musb->ctrl_base; 61 void __iomem *tbase = musb->ctrl_base;
56 u8 rev; 62 u8 rev;
@@ -275,17 +281,6 @@ static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
275 void __iomem *tbase = musb->ctrl_base; 281 void __iomem *tbase = musb->ctrl_base;
276 u32 reg; 282 u32 reg;
277 283
278 /*
279 * Keep clock active when enabled. Note that this is not tied to
280 * drawing VBUS, as with OTG mA can be less than musb->min_power.
281 */
282 if (musb->set_clock) {
283 if (mA)
284 musb->set_clock(musb->clock, 1);
285 else
286 musb->set_clock(musb->clock, 0);
287 }
288
289 /* tps65030 seems to consume max 100mA, with maybe 60mA available 284 /* tps65030 seems to consume max 100mA, with maybe 60mA available
290 * (measured on one board) for things other than tps and tusb. 285 * (measured on one board) for things other than tps and tusb.
291 * 286 *
@@ -348,7 +343,7 @@ static void tusb_set_clock_source(struct musb *musb, unsigned mode)
348 * USB link is not suspended ... and tells us the relevant wakeup 343 * USB link is not suspended ... and tells us the relevant wakeup
349 * events. SW_EN for voltage is handled separately. 344 * events. SW_EN for voltage is handled separately.
350 */ 345 */
351void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) 346static void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
352{ 347{
353 void __iomem *tbase = musb->ctrl_base; 348 void __iomem *tbase = musb->ctrl_base;
354 u32 reg; 349 u32 reg;
@@ -385,7 +380,7 @@ void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
385/* 380/*
386 * Updates cable VBUS status. Caller must take care of locking. 381 * Updates cable VBUS status. Caller must take care of locking.
387 */ 382 */
388int musb_platform_get_vbus_status(struct musb *musb) 383static int tusb_musb_vbus_status(struct musb *musb)
389{ 384{
390 void __iomem *tbase = musb->ctrl_base; 385 void __iomem *tbase = musb->ctrl_base;
391 u32 otg_stat, prcm_mngmt; 386 u32 otg_stat, prcm_mngmt;
@@ -431,7 +426,7 @@ static void musb_do_idle(unsigned long _musb)
431 } 426 }
432 /* FALLTHROUGH */ 427 /* FALLTHROUGH */
433 case OTG_STATE_A_IDLE: 428 case OTG_STATE_A_IDLE:
434 tusb_source_power(musb, 0); 429 tusb_musb_set_vbus(musb, 0);
435 default: 430 default:
436 break; 431 break;
437 } 432 }
@@ -475,7 +470,7 @@ done:
475 * we don't want to treat that full speed J as a wakeup event. 470 * we don't want to treat that full speed J as a wakeup event.
476 * ... peripherals must draw only suspend current after 10 msec. 471 * ... peripherals must draw only suspend current after 10 msec.
477 */ 472 */
478void musb_platform_try_idle(struct musb *musb, unsigned long timeout) 473static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
479{ 474{
480 unsigned long default_timeout = jiffies + msecs_to_jiffies(3); 475 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
481 static unsigned long last_timer; 476 static unsigned long last_timer;
@@ -515,7 +510,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
515 | TUSB_DEV_OTG_TIMER_ENABLE) \ 510 | TUSB_DEV_OTG_TIMER_ENABLE) \
516 : 0) 511 : 0)
517 512
518static void tusb_source_power(struct musb *musb, int is_on) 513static void tusb_musb_set_vbus(struct musb *musb, int is_on)
519{ 514{
520 void __iomem *tbase = musb->ctrl_base; 515 void __iomem *tbase = musb->ctrl_base;
521 u32 conf, prcm, timer; 516 u32 conf, prcm, timer;
@@ -531,8 +526,6 @@ static void tusb_source_power(struct musb *musb, int is_on)
531 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 526 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
532 527
533 if (is_on) { 528 if (is_on) {
534 if (musb->set_clock)
535 musb->set_clock(musb->clock, 1);
536 timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); 529 timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
537 musb->xceiv->default_a = 1; 530 musb->xceiv->default_a = 1;
538 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 531 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
@@ -571,8 +564,6 @@ static void tusb_source_power(struct musb *musb, int is_on)
571 564
572 devctl &= ~MUSB_DEVCTL_SESSION; 565 devctl &= ~MUSB_DEVCTL_SESSION;
573 conf &= ~TUSB_DEV_CONF_USB_HOST_MODE; 566 conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
574 if (musb->set_clock)
575 musb->set_clock(musb->clock, 0);
576 } 567 }
577 prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); 568 prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
578 569
@@ -599,7 +590,7 @@ static void tusb_source_power(struct musb *musb, int is_on)
599 * and peripheral modes in non-OTG configurations by reconfiguring hardware 590 * and peripheral modes in non-OTG configurations by reconfiguring hardware
600 * and then setting musb->board_mode. For now, only support OTG mode. 591 * and then setting musb->board_mode. For now, only support OTG mode.
601 */ 592 */
602int musb_platform_set_mode(struct musb *musb, u8 musb_mode) 593static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode)
603{ 594{
604 void __iomem *tbase = musb->ctrl_base; 595 void __iomem *tbase = musb->ctrl_base;
605 u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; 596 u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
@@ -677,7 +668,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
677 default_a = is_host_enabled(musb); 668 default_a = is_host_enabled(musb);
678 DBG(2, "Default-%c\n", default_a ? 'A' : 'B'); 669 DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
679 musb->xceiv->default_a = default_a; 670 musb->xceiv->default_a = default_a;
680 tusb_source_power(musb, default_a); 671 tusb_musb_set_vbus(musb, default_a);
681 672
682 /* Don't allow idling immediately */ 673 /* Don't allow idling immediately */
683 if (default_a) 674 if (default_a)
@@ -722,7 +713,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
722 switch (musb->xceiv->state) { 713 switch (musb->xceiv->state) {
723 case OTG_STATE_A_IDLE: 714 case OTG_STATE_A_IDLE:
724 DBG(2, "Got SRP, turning on VBUS\n"); 715 DBG(2, "Got SRP, turning on VBUS\n");
725 musb_set_vbus(musb, 1); 716 musb_platform_set_vbus(musb, 1);
726 717
727 /* CONNECT can wake if a_wait_bcon is set */ 718 /* CONNECT can wake if a_wait_bcon is set */
728 if (musb->a_wait_bcon != 0) 719 if (musb->a_wait_bcon != 0)
@@ -748,11 +739,11 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
748 */ 739 */
749 if (musb->vbuserr_retry) { 740 if (musb->vbuserr_retry) {
750 musb->vbuserr_retry--; 741 musb->vbuserr_retry--;
751 tusb_source_power(musb, 1); 742 tusb_musb_set_vbus(musb, 1);
752 } else { 743 } else {
753 musb->vbuserr_retry 744 musb->vbuserr_retry
754 = VBUSERR_RETRY_COUNT; 745 = VBUSERR_RETRY_COUNT;
755 tusb_source_power(musb, 0); 746 tusb_musb_set_vbus(musb, 0);
756 } 747 }
757 break; 748 break;
758 default: 749 default:
@@ -786,7 +777,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
786 } else { 777 } else {
787 /* REVISIT report overcurrent to hub? */ 778 /* REVISIT report overcurrent to hub? */
788 ERR("vbus too slow, devctl %02x\n", devctl); 779 ERR("vbus too slow, devctl %02x\n", devctl);
789 tusb_source_power(musb, 0); 780 tusb_musb_set_vbus(musb, 0);
790 } 781 }
791 break; 782 break;
792 case OTG_STATE_A_WAIT_BCON: 783 case OTG_STATE_A_WAIT_BCON:
@@ -807,7 +798,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
807 return idle_timeout; 798 return idle_timeout;
808} 799}
809 800
810static irqreturn_t tusb_interrupt(int irq, void *__hci) 801static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
811{ 802{
812 struct musb *musb = __hci; 803 struct musb *musb = __hci;
813 void __iomem *tbase = musb->ctrl_base; 804 void __iomem *tbase = musb->ctrl_base;
@@ -911,7 +902,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci)
911 musb_writel(tbase, TUSB_INT_SRC_CLEAR, 902 musb_writel(tbase, TUSB_INT_SRC_CLEAR,
912 int_src & ~TUSB_INT_MASK_RESERVED_BITS); 903 int_src & ~TUSB_INT_MASK_RESERVED_BITS);
913 904
914 musb_platform_try_idle(musb, idle_timeout); 905 tusb_musb_try_idle(musb, idle_timeout);
915 906
916 musb_writel(tbase, TUSB_INT_MASK, int_mask); 907 musb_writel(tbase, TUSB_INT_MASK, int_mask);
917 spin_unlock_irqrestore(&musb->lock, flags); 908 spin_unlock_irqrestore(&musb->lock, flags);
@@ -926,7 +917,7 @@ static int dma_off;
926 * REVISIT: 917 * REVISIT:
927 * - Check what is unnecessary in MGC_HdrcStart() 918 * - Check what is unnecessary in MGC_HdrcStart()
928 */ 919 */
929void musb_platform_enable(struct musb *musb) 920static void tusb_musb_enable(struct musb *musb)
930{ 921{
931 void __iomem *tbase = musb->ctrl_base; 922 void __iomem *tbase = musb->ctrl_base;
932 923
@@ -970,7 +961,7 @@ void musb_platform_enable(struct musb *musb)
970/* 961/*
971 * Disables TUSB6010. Caller must take care of locking. 962 * Disables TUSB6010. Caller must take care of locking.
972 */ 963 */
973void musb_platform_disable(struct musb *musb) 964static void tusb_musb_disable(struct musb *musb)
974{ 965{
975 void __iomem *tbase = musb->ctrl_base; 966 void __iomem *tbase = musb->ctrl_base;
976 967
@@ -995,7 +986,7 @@ void musb_platform_disable(struct musb *musb)
995 * Sets up TUSB6010 CPU interface specific signals and registers 986 * Sets up TUSB6010 CPU interface specific signals and registers
996 * Note: Settings optimized for OMAP24xx 987 * Note: Settings optimized for OMAP24xx
997 */ 988 */
998static void __init tusb_setup_cpu_interface(struct musb *musb) 989static void tusb_setup_cpu_interface(struct musb *musb)
999{ 990{
1000 void __iomem *tbase = musb->ctrl_base; 991 void __iomem *tbase = musb->ctrl_base;
1001 992
@@ -1022,7 +1013,7 @@ static void __init tusb_setup_cpu_interface(struct musb *musb)
1022 musb_writel(tbase, TUSB_WAIT_COUNT, 1); 1013 musb_writel(tbase, TUSB_WAIT_COUNT, 1);
1023} 1014}
1024 1015
1025static int __init tusb_start(struct musb *musb) 1016static int tusb_musb_start(struct musb *musb)
1026{ 1017{
1027 void __iomem *tbase = musb->ctrl_base; 1018 void __iomem *tbase = musb->ctrl_base;
1028 int ret = 0; 1019 int ret = 0;
@@ -1091,7 +1082,7 @@ err:
1091 return -ENODEV; 1082 return -ENODEV;
1092} 1083}
1093 1084
1094int __init musb_platform_init(struct musb *musb, void *board_data) 1085static int tusb_musb_init(struct musb *musb)
1095{ 1086{
1096 struct platform_device *pdev; 1087 struct platform_device *pdev;
1097 struct resource *mem; 1088 struct resource *mem;
@@ -1131,16 +1122,14 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
1131 */ 1122 */
1132 musb->mregs += TUSB_BASE_OFFSET; 1123 musb->mregs += TUSB_BASE_OFFSET;
1133 1124
1134 ret = tusb_start(musb); 1125 ret = tusb_musb_start(musb);
1135 if (ret) { 1126 if (ret) {
1136 printk(KERN_ERR "Could not start tusb6010 (%d)\n", 1127 printk(KERN_ERR "Could not start tusb6010 (%d)\n",
1137 ret); 1128 ret);
1138 goto done; 1129 goto done;
1139 } 1130 }
1140 musb->isr = tusb_interrupt; 1131 musb->isr = tusb_musb_interrupt;
1141 1132
1142 if (is_host_enabled(musb))
1143 musb->board_set_vbus = tusb_source_power;
1144 if (is_peripheral_enabled(musb)) { 1133 if (is_peripheral_enabled(musb)) {
1145 musb->xceiv->set_power = tusb_draw_power; 1134 musb->xceiv->set_power = tusb_draw_power;
1146 the_musb = musb; 1135 the_musb = musb;
@@ -1159,7 +1148,7 @@ done:
1159 return ret; 1148 return ret;
1160} 1149}
1161 1150
1162int musb_platform_exit(struct musb *musb) 1151static int tusb_musb_exit(struct musb *musb)
1163{ 1152{
1164 del_timer_sync(&musb_idle_timer); 1153 del_timer_sync(&musb_idle_timer);
1165 the_musb = NULL; 1154 the_musb = NULL;
@@ -1173,3 +1162,115 @@ int musb_platform_exit(struct musb *musb)
1173 usb_nop_xceiv_unregister(); 1162 usb_nop_xceiv_unregister();
1174 return 0; 1163 return 0;
1175} 1164}
1165
1166static const struct musb_platform_ops tusb_ops = {
1167 .init = tusb_musb_init,
1168 .exit = tusb_musb_exit,
1169
1170 .enable = tusb_musb_enable,
1171 .disable = tusb_musb_disable,
1172
1173 .set_mode = tusb_musb_set_mode,
1174 .try_idle = tusb_musb_try_idle,
1175
1176 .vbus_status = tusb_musb_vbus_status,
1177 .set_vbus = tusb_musb_set_vbus,
1178};
1179
1180static u64 tusb_dmamask = DMA_BIT_MASK(32);
1181
1182static int __init tusb_probe(struct platform_device *pdev)
1183{
1184 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
1185 struct platform_device *musb;
1186 struct tusb6010_glue *glue;
1187
1188 int ret = -ENOMEM;
1189
1190 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
1191 if (!glue) {
1192 dev_err(&pdev->dev, "failed to allocate glue context\n");
1193 goto err0;
1194 }
1195
1196 musb = platform_device_alloc("musb-hdrc", -1);
1197 if (!musb) {
1198 dev_err(&pdev->dev, "failed to allocate musb device\n");
1199 goto err1;
1200 }
1201
1202 musb->dev.parent = &pdev->dev;
1203 musb->dev.dma_mask = &tusb_dmamask;
1204 musb->dev.coherent_dma_mask = tusb_dmamask;
1205
1206 glue->dev = &pdev->dev;
1207 glue->musb = musb;
1208
1209 pdata->platform_ops = &tusb_ops;
1210
1211 platform_set_drvdata(pdev, glue);
1212
1213 ret = platform_device_add_resources(musb, pdev->resource,
1214 pdev->num_resources);
1215 if (ret) {
1216 dev_err(&pdev->dev, "failed to add resources\n");
1217 goto err2;
1218 }
1219
1220 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
1221 if (ret) {
1222 dev_err(&pdev->dev, "failed to add platform_data\n");
1223 goto err2;
1224 }
1225
1226 ret = platform_device_add(musb);
1227 if (ret) {
1228 dev_err(&pdev->dev, "failed to register musb device\n");
1229 goto err1;
1230 }
1231
1232 return 0;
1233
1234err2:
1235 platform_device_put(musb);
1236
1237err1:
1238 kfree(glue);
1239
1240err0:
1241 return ret;
1242}
1243
1244static int __exit tusb_remove(struct platform_device *pdev)
1245{
1246 struct tusb6010_glue *glue = platform_get_drvdata(pdev);
1247
1248 platform_device_del(glue->musb);
1249 platform_device_put(glue->musb);
1250 kfree(glue);
1251
1252 return 0;
1253}
1254
1255static struct platform_driver tusb_driver = {
1256 .remove = __exit_p(tusb_remove),
1257 .driver = {
1258 .name = "musb-tusb",
1259 },
1260};
1261
1262MODULE_DESCRIPTION("TUSB6010 MUSB Glue Layer");
1263MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
1264MODULE_LICENSE("GPL v2");
1265
1266static int __init tusb_init(void)
1267{
1268 return platform_driver_probe(&tusb_driver, tusb_probe);
1269}
1270subsys_initcall(tusb_init);
1271
1272static void __exit tusb_exit(void)
1273{
1274 platform_driver_unregister(&tusb_driver);
1275}
1276module_exit(tusb_exit);
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
new file mode 100644
index 000000000000..d6384e4aeef9
--- /dev/null
+++ b/drivers/usb/musb/ux500.c
@@ -0,0 +1,216 @@
1/*
2 * Copyright (C) 2010 ST-Ericsson AB
3 * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
4 *
5 * Based on omap2430.c
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
28
29#include "musb_core.h"
30
31struct ux500_glue {
32 struct device *dev;
33 struct platform_device *musb;
34 struct clk *clk;
35};
36#define glue_to_musb(g) platform_get_drvdata(g->musb)
37
38static int ux500_musb_init(struct musb *musb)
39{
40 musb->xceiv = otg_get_transceiver();
41 if (!musb->xceiv) {
42 pr_err("HS USB OTG: no transceiver configured\n");
43 return -ENODEV;
44 }
45
46 return 0;
47}
48
49static int ux500_musb_exit(struct musb *musb)
50{
51 otg_put_transceiver(musb->xceiv);
52
53 return 0;
54}
55
56static const struct musb_platform_ops ux500_ops = {
57 .init = ux500_musb_init,
58 .exit = ux500_musb_exit,
59};
60
61static int __init ux500_probe(struct platform_device *pdev)
62{
63 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
64 struct platform_device *musb;
65 struct ux500_glue *glue;
66 struct clk *clk;
67
68 int ret = -ENOMEM;
69
70 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
71 if (!glue) {
72 dev_err(&pdev->dev, "failed to allocate glue context\n");
73 goto err0;
74 }
75
76 musb = platform_device_alloc("musb-hdrc", -1);
77 if (!musb) {
78 dev_err(&pdev->dev, "failed to allocate musb device\n");
79 goto err1;
80 }
81
82 clk = clk_get(&pdev->dev, "usb");
83 if (IS_ERR(clk)) {
84 dev_err(&pdev->dev, "failed to get clock\n");
85 ret = PTR_ERR(clk);
86 goto err2;
87 }
88
89 ret = clk_enable(clk);
90 if (ret) {
91 dev_err(&pdev->dev, "failed to enable clock\n");
92 goto err3;
93 }
94
95 musb->dev.parent = &pdev->dev;
96
97 glue->dev = &pdev->dev;
98 glue->musb = musb;
99 glue->clk = clk;
100
101 pdata->platform_ops = &ux500_ops;
102
103 platform_set_drvdata(pdev, glue);
104
105 ret = platform_device_add_resources(musb, pdev->resource,
106 pdev->num_resources);
107 if (ret) {
108 dev_err(&pdev->dev, "failed to add resources\n");
109 goto err4;
110 }
111
112 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
113 if (ret) {
114 dev_err(&pdev->dev, "failed to add platform_data\n");
115 goto err4;
116 }
117
118 ret = platform_device_add(musb);
119 if (ret) {
120 dev_err(&pdev->dev, "failed to register musb device\n");
121 goto err4;
122 }
123
124 return 0;
125
126err4:
127 clk_disable(clk);
128
129err3:
130 clk_put(clk);
131
132err2:
133 platform_device_put(musb);
134
135err1:
136 kfree(glue);
137
138err0:
139 return ret;
140}
141
142static int __exit ux500_remove(struct platform_device *pdev)
143{
144 struct ux500_glue *glue = platform_get_drvdata(pdev);
145
146 platform_device_del(glue->musb);
147 platform_device_put(glue->musb);
148 clk_disable(glue->clk);
149 clk_put(glue->clk);
150 kfree(glue);
151
152 return 0;
153}
154
155#ifdef CONFIG_PM
156static int ux500_suspend(struct device *dev)
157{
158 struct ux500_glue *glue = dev_get_drvdata(dev);
159 struct musb *musb = glue_to_musb(glue);
160
161 otg_set_suspend(musb->xceiv, 1);
162 clk_disable(glue->clk);
163
164 return 0;
165}
166
167static int ux500_resume(struct device *dev)
168{
169 struct ux500_glue *glue = dev_get_drvdata(dev);
170 struct musb *musb = glue_to_musb(glue);
171 int ret;
172
173 ret = clk_enable(glue->clk);
174 if (ret) {
175 dev_err(dev, "failed to enable clock\n");
176 return ret;
177 }
178
179 otg_set_suspend(musb->xceiv, 0);
180
181 return 0;
182}
183
184static const struct dev_pm_ops ux500_pm_ops = {
185 .suspend = ux500_suspend,
186 .resume = ux500_resume,
187};
188
189#define DEV_PM_OPS (&ux500_pm_ops)
190#else
191#define DEV_PM_OPS NULL
192#endif
193
194static struct platform_driver ux500_driver = {
195 .remove = __exit_p(ux500_remove),
196 .driver = {
197 .name = "musb-ux500",
198 .pm = DEV_PM_OPS,
199 },
200};
201
202MODULE_DESCRIPTION("UX500 MUSB Glue Layer");
203MODULE_AUTHOR("Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>");
204MODULE_LICENSE("GPL v2");
205
206static int __init ux500_init(void)
207{
208 return platform_driver_probe(&ux500_driver, ux500_probe);
209}
210subsys_initcall(ux500_init);
211
212static void __exit ux500_exit(void)
213{
214 platform_driver_unregister(&ux500_driver);
215}
216module_exit(ux500_exit);
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 5ce07528cd0c..9fb875d5f09c 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -59,6 +59,18 @@ config TWL4030_USB
59 This transceiver supports high and full speed devices plus, 59 This transceiver supports high and full speed devices plus,
60 in host mode, low speed. 60 in host mode, low speed.
61 61
62config TWL6030_USB
63 tristate "TWL6030 USB Transceiver Driver"
64 depends on TWL4030_CORE
65 select USB_OTG_UTILS
66 help
67 Enable this to support the USB OTG transceiver on TWL6030
68 family chips. This TWL6030 transceiver has the VBUS and ID GND
69 and OTG SRP events capabilities. For all other transceiver functionality
70 UTMI PHY is embedded in OMAP4430. The internal PHY configurations APIs
71 are hooked to this driver through platform_data structure.
72 The definition of internal PHY APIs are in the mach-omap2 layer.
73
62config NOP_USB_XCEIV 74config NOP_USB_XCEIV
63 tristate "NOP USB Transceiver Driver" 75 tristate "NOP USB Transceiver Driver"
64 select USB_OTG_UTILS 76 select USB_OTG_UTILS
@@ -81,4 +93,24 @@ config USB_LANGWELL_OTG
81 To compile this driver as a module, choose M here: the 93 To compile this driver as a module, choose M here: the
82 module will be called langwell_otg. 94 module will be called langwell_otg.
83 95
96config USB_MSM_OTG_72K
97 tristate "OTG support for Qualcomm on-chip USB controller"
98 depends on (USB || USB_GADGET) && ARCH_MSM
99 select USB_OTG_UTILS
100 help
101 Enable this to support the USB OTG transceiver on MSM chips. It
102 handles PHY initialization, clock management, and workarounds
103 required after resetting the hardware and power management.
104 This driver is required even for peripheral only or host only
105 mode configurations.
106
107config AB8500_USB
108 tristate "AB8500 USB Transceiver Driver"
109 depends on AB8500_CORE
110 select USB_OTG_UTILS
111 help
112 Enable this to support the USB OTG transceiver in AB8500 chip.
113 This transceiver supports high and full speed devices plus,
114 in host mode, low speed.
115
84endif # USB || OTG 116endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 66f1b83e4fa7..a520e715cfd6 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -12,6 +12,9 @@ obj-$(CONFIG_USB_OTG_UTILS) += otg.o
12obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o 12obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
13obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o 13obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
14obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o 14obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
15obj-$(CONFIG_TWL6030_USB) += twl6030-usb.o
15obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o 16obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
16obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o 17obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
17obj-$(CONFIG_USB_ULPI) += ulpi.o 18obj-$(CONFIG_USB_ULPI) += ulpi.o
19obj-$(CONFIG_USB_MSM_OTG_72K) += msm72k_otg.o
20obj-$(CONFIG_AB8500_USB) += ab8500-usb.o
diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c
new file mode 100644
index 000000000000..d14736b3107b
--- /dev/null
+++ b/drivers/usb/otg/ab8500-usb.c
@@ -0,0 +1,585 @@
1/*
2 * drivers/usb/otg/ab8500_usb.c
3 *
4 * USB transceiver driver for AB8500 chip
5 *
6 * Copyright (C) 2010 ST-Ericsson AB
7 * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 */
24
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/usb/otg.h>
28#include <linux/slab.h>
29#include <linux/notifier.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/mfd/abx500.h>
33#include <linux/mfd/ab8500.h>
34
35#define AB8500_MAIN_WD_CTRL_REG 0x01
36#define AB8500_USB_LINE_STAT_REG 0x80
37#define AB8500_USB_PHY_CTRL_REG 0x8A
38
39#define AB8500_BIT_OTG_STAT_ID (1 << 0)
40#define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0)
41#define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1)
42#define AB8500_BIT_WD_CTRL_ENABLE (1 << 0)
43#define AB8500_BIT_WD_CTRL_KICK (1 << 1)
44
45#define AB8500_V1x_LINK_STAT_WAIT (HZ/10)
46#define AB8500_WD_KICK_DELAY_US 100 /* usec */
47#define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */
48#define AB8500_WD_V10_DISABLE_DELAY_MS 100 /* ms */
49
50/* Usb line status register */
51enum ab8500_usb_link_status {
52 USB_LINK_NOT_CONFIGURED = 0,
53 USB_LINK_STD_HOST_NC,
54 USB_LINK_STD_HOST_C_NS,
55 USB_LINK_STD_HOST_C_S,
56 USB_LINK_HOST_CHG_NM,
57 USB_LINK_HOST_CHG_HS,
58 USB_LINK_HOST_CHG_HS_CHIRP,
59 USB_LINK_DEDICATED_CHG,
60 USB_LINK_ACA_RID_A,
61 USB_LINK_ACA_RID_B,
62 USB_LINK_ACA_RID_C_NM,
63 USB_LINK_ACA_RID_C_HS,
64 USB_LINK_ACA_RID_C_HS_CHIRP,
65 USB_LINK_HM_IDGND,
66 USB_LINK_RESERVED,
67 USB_LINK_NOT_VALID_LINK
68};
69
70struct ab8500_usb {
71 struct otg_transceiver otg;
72 struct device *dev;
73 int irq_num_id_rise;
74 int irq_num_id_fall;
75 int irq_num_vbus_rise;
76 int irq_num_vbus_fall;
77 int irq_num_link_status;
78 unsigned vbus_draw;
79 struct delayed_work dwork;
80 struct work_struct phy_dis_work;
81 unsigned long link_status_wait;
82 int rev;
83};
84
85static inline struct ab8500_usb *xceiv_to_ab(struct otg_transceiver *x)
86{
87 return container_of(x, struct ab8500_usb, otg);
88}
89
90static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
91{
92 abx500_set_register_interruptible(ab->dev,
93 AB8500_SYS_CTRL2_BLOCK,
94 AB8500_MAIN_WD_CTRL_REG,
95 AB8500_BIT_WD_CTRL_ENABLE);
96
97 udelay(AB8500_WD_KICK_DELAY_US);
98
99 abx500_set_register_interruptible(ab->dev,
100 AB8500_SYS_CTRL2_BLOCK,
101 AB8500_MAIN_WD_CTRL_REG,
102 (AB8500_BIT_WD_CTRL_ENABLE
103 | AB8500_BIT_WD_CTRL_KICK));
104
105 if (ab->rev > 0x10) /* v1.1 v2.0 */
106 udelay(AB8500_WD_V11_DISABLE_DELAY_US);
107 else /* v1.0 */
108 msleep(AB8500_WD_V10_DISABLE_DELAY_MS);
109
110 abx500_set_register_interruptible(ab->dev,
111 AB8500_SYS_CTRL2_BLOCK,
112 AB8500_MAIN_WD_CTRL_REG,
113 0);
114}
115
116static void ab8500_usb_phy_ctrl(struct ab8500_usb *ab, bool sel_host,
117 bool enable)
118{
119 u8 ctrl_reg;
120 abx500_get_register_interruptible(ab->dev,
121 AB8500_USB,
122 AB8500_USB_PHY_CTRL_REG,
123 &ctrl_reg);
124 if (sel_host) {
125 if (enable)
126 ctrl_reg |= AB8500_BIT_PHY_CTRL_HOST_EN;
127 else
128 ctrl_reg &= ~AB8500_BIT_PHY_CTRL_HOST_EN;
129 } else {
130 if (enable)
131 ctrl_reg |= AB8500_BIT_PHY_CTRL_DEVICE_EN;
132 else
133 ctrl_reg &= ~AB8500_BIT_PHY_CTRL_DEVICE_EN;
134 }
135
136 abx500_set_register_interruptible(ab->dev,
137 AB8500_USB,
138 AB8500_USB_PHY_CTRL_REG,
139 ctrl_reg);
140
141 /* Needed to enable the phy.*/
142 if (enable)
143 ab8500_usb_wd_workaround(ab);
144}
145
146#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_ctrl(ab, true, true)
147#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_ctrl(ab, true, false)
148#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_ctrl(ab, false, true)
149#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_ctrl(ab, false, false)
150
151static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
152{
153 u8 reg;
154 enum ab8500_usb_link_status lsts;
155 void *v = NULL;
156 enum usb_xceiv_events event;
157
158 abx500_get_register_interruptible(ab->dev,
159 AB8500_USB,
160 AB8500_USB_LINE_STAT_REG,
161 &reg);
162
163 lsts = (reg >> 3) & 0x0F;
164
165 switch (lsts) {
166 case USB_LINK_NOT_CONFIGURED:
167 case USB_LINK_RESERVED:
168 case USB_LINK_NOT_VALID_LINK:
169 /* TODO: Disable regulators. */
170 ab8500_usb_host_phy_dis(ab);
171 ab8500_usb_peri_phy_dis(ab);
172 ab->otg.state = OTG_STATE_B_IDLE;
173 ab->otg.default_a = false;
174 ab->vbus_draw = 0;
175 event = USB_EVENT_NONE;
176 break;
177
178 case USB_LINK_STD_HOST_NC:
179 case USB_LINK_STD_HOST_C_NS:
180 case USB_LINK_STD_HOST_C_S:
181 case USB_LINK_HOST_CHG_NM:
182 case USB_LINK_HOST_CHG_HS:
183 case USB_LINK_HOST_CHG_HS_CHIRP:
184 if (ab->otg.gadget) {
185 /* TODO: Enable regulators. */
186 ab8500_usb_peri_phy_en(ab);
187 v = ab->otg.gadget;
188 }
189 event = USB_EVENT_VBUS;
190 break;
191
192 case USB_LINK_HM_IDGND:
193 if (ab->otg.host) {
194 /* TODO: Enable regulators. */
195 ab8500_usb_host_phy_en(ab);
196 v = ab->otg.host;
197 }
198 ab->otg.state = OTG_STATE_A_IDLE;
199 ab->otg.default_a = true;
200 event = USB_EVENT_ID;
201 break;
202
203 case USB_LINK_ACA_RID_A:
204 case USB_LINK_ACA_RID_B:
205 /* TODO */
206 case USB_LINK_ACA_RID_C_NM:
207 case USB_LINK_ACA_RID_C_HS:
208 case USB_LINK_ACA_RID_C_HS_CHIRP:
209 case USB_LINK_DEDICATED_CHG:
210 /* TODO: vbus_draw */
211 event = USB_EVENT_CHARGER;
212 break;
213 }
214
215 blocking_notifier_call_chain(&ab->otg.notifier, event, v);
216
217 return 0;
218}
219
220static void ab8500_usb_delayed_work(struct work_struct *work)
221{
222 struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
223 dwork.work);
224
225 ab8500_usb_link_status_update(ab);
226}
227
228static irqreturn_t ab8500_usb_v1x_common_irq(int irq, void *data)
229{
230 struct ab8500_usb *ab = (struct ab8500_usb *) data;
231
232 /* Wait for link status to become stable. */
233 schedule_delayed_work(&ab->dwork, ab->link_status_wait);
234
235 return IRQ_HANDLED;
236}
237
238static irqreturn_t ab8500_usb_v1x_vbus_fall_irq(int irq, void *data)
239{
240 struct ab8500_usb *ab = (struct ab8500_usb *) data;
241
242 /* Link status will not be updated till phy is disabled. */
243 ab8500_usb_peri_phy_dis(ab);
244
245 /* Wait for link status to become stable. */
246 schedule_delayed_work(&ab->dwork, ab->link_status_wait);
247
248 return IRQ_HANDLED;
249}
250
251static irqreturn_t ab8500_usb_v20_irq(int irq, void *data)
252{
253 struct ab8500_usb *ab = (struct ab8500_usb *) data;
254
255 ab8500_usb_link_status_update(ab);
256
257 return IRQ_HANDLED;
258}
259
260static void ab8500_usb_phy_disable_work(struct work_struct *work)
261{
262 struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
263 phy_dis_work);
264
265 if (!ab->otg.host)
266 ab8500_usb_host_phy_dis(ab);
267
268 if (!ab->otg.gadget)
269 ab8500_usb_peri_phy_dis(ab);
270}
271
272static int ab8500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
273{
274 struct ab8500_usb *ab;
275
276 if (!otg)
277 return -ENODEV;
278
279 ab = xceiv_to_ab(otg);
280
281 ab->vbus_draw = mA;
282
283 if (mA)
284 blocking_notifier_call_chain(&ab->otg.notifier,
285 USB_EVENT_ENUMERATED, ab->otg.gadget);
286 return 0;
287}
288
289/* TODO: Implement some way for charging or other drivers to read
290 * ab->vbus_draw.
291 */
292
293static int ab8500_usb_set_suspend(struct otg_transceiver *x, int suspend)
294{
295 /* TODO */
296 return 0;
297}
298
299static int ab8500_usb_set_peripheral(struct otg_transceiver *otg,
300 struct usb_gadget *gadget)
301{
302 struct ab8500_usb *ab;
303
304 if (!otg)
305 return -ENODEV;
306
307 ab = xceiv_to_ab(otg);
308
309 /* Some drivers call this function in atomic context.
310 * Do not update ab8500 registers directly till this
311 * is fixed.
312 */
313
314 if (!gadget) {
315 /* TODO: Disable regulators. */
316 ab->otg.gadget = NULL;
317 schedule_work(&ab->phy_dis_work);
318 } else {
319 ab->otg.gadget = gadget;
320 ab->otg.state = OTG_STATE_B_IDLE;
321
322 /* Phy will not be enabled if cable is already
323 * plugged-in. Schedule to enable phy.
324 * Use same delay to avoid any race condition.
325 */
326 schedule_delayed_work(&ab->dwork, ab->link_status_wait);
327 }
328
329 return 0;
330}
331
332static int ab8500_usb_set_host(struct otg_transceiver *otg,
333 struct usb_bus *host)
334{
335 struct ab8500_usb *ab;
336
337 if (!otg)
338 return -ENODEV;
339
340 ab = xceiv_to_ab(otg);
341
342 /* Some drivers call this function in atomic context.
343 * Do not update ab8500 registers directly till this
344 * is fixed.
345 */
346
347 if (!host) {
348 /* TODO: Disable regulators. */
349 ab->otg.host = NULL;
350 schedule_work(&ab->phy_dis_work);
351 } else {
352 ab->otg.host = host;
353 /* Phy will not be enabled if cable is already
354 * plugged-in. Schedule to enable phy.
355 * Use same delay to avoid any race condition.
356 */
357 schedule_delayed_work(&ab->dwork, ab->link_status_wait);
358 }
359
360 return 0;
361}
362
363static void ab8500_usb_irq_free(struct ab8500_usb *ab)
364{
365 if (ab->rev < 0x20) {
366 free_irq(ab->irq_num_id_rise, ab);
367 free_irq(ab->irq_num_id_fall, ab);
368 free_irq(ab->irq_num_vbus_rise, ab);
369 free_irq(ab->irq_num_vbus_fall, ab);
370 } else {
371 free_irq(ab->irq_num_link_status, ab);
372 }
373}
374
375static int ab8500_usb_v1x_res_setup(struct platform_device *pdev,
376 struct ab8500_usb *ab)
377{
378 int err;
379
380 ab->irq_num_id_rise = platform_get_irq_byname(pdev, "ID_WAKEUP_R");
381 if (ab->irq_num_id_rise < 0) {
382 dev_err(&pdev->dev, "ID rise irq not found\n");
383 return ab->irq_num_id_rise;
384 }
385 err = request_threaded_irq(ab->irq_num_id_rise, NULL,
386 ab8500_usb_v1x_common_irq,
387 IRQF_NO_SUSPEND | IRQF_SHARED,
388 "usb-id-rise", ab);
389 if (err < 0) {
390 dev_err(ab->dev, "request_irq failed for ID rise irq\n");
391 goto fail0;
392 }
393
394 ab->irq_num_id_fall = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
395 if (ab->irq_num_id_fall < 0) {
396 dev_err(&pdev->dev, "ID fall irq not found\n");
397 return ab->irq_num_id_fall;
398 }
399 err = request_threaded_irq(ab->irq_num_id_fall, NULL,
400 ab8500_usb_v1x_common_irq,
401 IRQF_NO_SUSPEND | IRQF_SHARED,
402 "usb-id-fall", ab);
403 if (err < 0) {
404 dev_err(ab->dev, "request_irq failed for ID fall irq\n");
405 goto fail1;
406 }
407
408 ab->irq_num_vbus_rise = platform_get_irq_byname(pdev, "VBUS_DET_R");
409 if (ab->irq_num_vbus_rise < 0) {
410 dev_err(&pdev->dev, "VBUS rise irq not found\n");
411 return ab->irq_num_vbus_rise;
412 }
413 err = request_threaded_irq(ab->irq_num_vbus_rise, NULL,
414 ab8500_usb_v1x_common_irq,
415 IRQF_NO_SUSPEND | IRQF_SHARED,
416 "usb-vbus-rise", ab);
417 if (err < 0) {
418 dev_err(ab->dev, "request_irq failed for Vbus rise irq\n");
419 goto fail2;
420 }
421
422 ab->irq_num_vbus_fall = platform_get_irq_byname(pdev, "VBUS_DET_F");
423 if (ab->irq_num_vbus_fall < 0) {
424 dev_err(&pdev->dev, "VBUS fall irq not found\n");
425 return ab->irq_num_vbus_fall;
426 }
427 err = request_threaded_irq(ab->irq_num_vbus_fall, NULL,
428 ab8500_usb_v1x_vbus_fall_irq,
429 IRQF_NO_SUSPEND | IRQF_SHARED,
430 "usb-vbus-fall", ab);
431 if (err < 0) {
432 dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
433 goto fail3;
434 }
435
436 return 0;
437fail3:
438 free_irq(ab->irq_num_vbus_rise, ab);
439fail2:
440 free_irq(ab->irq_num_id_fall, ab);
441fail1:
442 free_irq(ab->irq_num_id_rise, ab);
443fail0:
444 return err;
445}
446
447static int ab8500_usb_v2_res_setup(struct platform_device *pdev,
448 struct ab8500_usb *ab)
449{
450 int err;
451
452 ab->irq_num_link_status = platform_get_irq_byname(pdev,
453 "USB_LINK_STATUS");
454 if (ab->irq_num_link_status < 0) {
455 dev_err(&pdev->dev, "Link status irq not found\n");
456 return ab->irq_num_link_status;
457 }
458
459 err = request_threaded_irq(ab->irq_num_link_status, NULL,
460 ab8500_usb_v20_irq,
461 IRQF_NO_SUSPEND | IRQF_SHARED,
462 "usb-link-status", ab);
463 if (err < 0) {
464 dev_err(ab->dev,
465 "request_irq failed for link status irq\n");
466 return err;
467 }
468
469 return 0;
470}
471
472static int __devinit ab8500_usb_probe(struct platform_device *pdev)
473{
474 struct ab8500_usb *ab;
475 int err;
476 int rev;
477
478 rev = abx500_get_chip_id(&pdev->dev);
479 if (rev < 0) {
480 dev_err(&pdev->dev, "Chip id read failed\n");
481 return rev;
482 } else if (rev < 0x10) {
483 dev_err(&pdev->dev, "Unsupported AB8500 chip\n");
484 return -ENODEV;
485 }
486
487 ab = kzalloc(sizeof *ab, GFP_KERNEL);
488 if (!ab)
489 return -ENOMEM;
490
491 ab->dev = &pdev->dev;
492 ab->rev = rev;
493 ab->otg.dev = ab->dev;
494 ab->otg.label = "ab8500";
495 ab->otg.state = OTG_STATE_UNDEFINED;
496 ab->otg.set_host = ab8500_usb_set_host;
497 ab->otg.set_peripheral = ab8500_usb_set_peripheral;
498 ab->otg.set_suspend = ab8500_usb_set_suspend;
499 ab->otg.set_power = ab8500_usb_set_power;
500
501 platform_set_drvdata(pdev, ab);
502
503 BLOCKING_INIT_NOTIFIER_HEAD(&ab->otg.notifier);
504
505 /* v1: Wait for link status to become stable.
506 * all: Updates form set_host and set_peripheral as they are atomic.
507 */
508 INIT_DELAYED_WORK(&ab->dwork, ab8500_usb_delayed_work);
509
510 /* all: Disable phy when called from set_host and set_peripheral */
511 INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work);
512
513 if (ab->rev < 0x20) {
514 err = ab8500_usb_v1x_res_setup(pdev, ab);
515 ab->link_status_wait = AB8500_V1x_LINK_STAT_WAIT;
516 } else {
517 err = ab8500_usb_v2_res_setup(pdev, ab);
518 }
519
520 if (err < 0)
521 goto fail0;
522
523 err = otg_set_transceiver(&ab->otg);
524 if (err) {
525 dev_err(&pdev->dev, "Can't register transceiver\n");
526 goto fail1;
527 }
528
529 dev_info(&pdev->dev, "AB8500 usb driver initialized\n");
530
531 return 0;
532fail1:
533 ab8500_usb_irq_free(ab);
534fail0:
535 kfree(ab);
536 return err;
537}
538
539static int __devexit ab8500_usb_remove(struct platform_device *pdev)
540{
541 struct ab8500_usb *ab = platform_get_drvdata(pdev);
542
543 ab8500_usb_irq_free(ab);
544
545 cancel_delayed_work_sync(&ab->dwork);
546
547 cancel_work_sync(&ab->phy_dis_work);
548
549 otg_set_transceiver(NULL);
550
551 ab8500_usb_host_phy_dis(ab);
552 ab8500_usb_peri_phy_dis(ab);
553
554 platform_set_drvdata(pdev, NULL);
555
556 kfree(ab);
557
558 return 0;
559}
560
561static struct platform_driver ab8500_usb_driver = {
562 .probe = ab8500_usb_probe,
563 .remove = __devexit_p(ab8500_usb_remove),
564 .driver = {
565 .name = "ab8500-usb",
566 .owner = THIS_MODULE,
567 },
568};
569
570static int __init ab8500_usb_init(void)
571{
572 return platform_driver_register(&ab8500_usb_driver);
573}
574subsys_initcall(ab8500_usb_init);
575
576static void __exit ab8500_usb_exit(void)
577{
578 platform_driver_unregister(&ab8500_usb_driver);
579}
580module_exit(ab8500_usb_exit);
581
582MODULE_ALIAS("platform:ab8500_usb");
583MODULE_AUTHOR("ST-Ericsson AB");
584MODULE_DESCRIPTION("AB8500 usb transceiver driver");
585MODULE_LICENSE("GPL");
diff --git a/drivers/usb/otg/msm72k_otg.c b/drivers/usb/otg/msm72k_otg.c
new file mode 100644
index 000000000000..1cd52edcd0c2
--- /dev/null
+++ b/drivers/usb/otg/msm72k_otg.c
@@ -0,0 +1,1125 @@
1/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/platform_device.h>
22#include <linux/clk.h>
23#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/err.h>
26#include <linux/delay.h>
27#include <linux/io.h>
28#include <linux/ioport.h>
29#include <linux/uaccess.h>
30#include <linux/debugfs.h>
31#include <linux/seq_file.h>
32#include <linux/pm_runtime.h>
33
34#include <linux/usb.h>
35#include <linux/usb/otg.h>
36#include <linux/usb/ulpi.h>
37#include <linux/usb/gadget.h>
38#include <linux/usb/hcd.h>
39#include <linux/usb/msm_hsusb.h>
40#include <linux/usb/msm_hsusb_hw.h>
41
42#include <mach/clk.h>
43
44#define MSM_USB_BASE (motg->regs)
45#define DRIVER_NAME "msm_otg"
46
47#define ULPI_IO_TIMEOUT_USEC (10 * 1000)
48static int ulpi_read(struct otg_transceiver *otg, u32 reg)
49{
50 struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
51 int cnt = 0;
52
53 /* initiate read operation */
54 writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
55 USB_ULPI_VIEWPORT);
56
57 /* wait for completion */
58 while (cnt < ULPI_IO_TIMEOUT_USEC) {
59 if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
60 break;
61 udelay(1);
62 cnt++;
63 }
64
65 if (cnt >= ULPI_IO_TIMEOUT_USEC) {
66 dev_err(otg->dev, "ulpi_read: timeout %08x\n",
67 readl(USB_ULPI_VIEWPORT));
68 return -ETIMEDOUT;
69 }
70 return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
71}
72
73static int ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg)
74{
75 struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
76 int cnt = 0;
77
78 /* initiate write operation */
79 writel(ULPI_RUN | ULPI_WRITE |
80 ULPI_ADDR(reg) | ULPI_DATA(val),
81 USB_ULPI_VIEWPORT);
82
83 /* wait for completion */
84 while (cnt < ULPI_IO_TIMEOUT_USEC) {
85 if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
86 break;
87 udelay(1);
88 cnt++;
89 }
90
91 if (cnt >= ULPI_IO_TIMEOUT_USEC) {
92 dev_err(otg->dev, "ulpi_write: timeout\n");
93 return -ETIMEDOUT;
94 }
95 return 0;
96}
97
98static struct otg_io_access_ops msm_otg_io_ops = {
99 .read = ulpi_read,
100 .write = ulpi_write,
101};
102
103static void ulpi_init(struct msm_otg *motg)
104{
105 struct msm_otg_platform_data *pdata = motg->pdata;
106 int *seq = pdata->phy_init_seq;
107
108 if (!seq)
109 return;
110
111 while (seq[0] >= 0) {
112 dev_vdbg(motg->otg.dev, "ulpi: write 0x%02x to 0x%02x\n",
113 seq[0], seq[1]);
114 ulpi_write(&motg->otg, seq[0], seq[1]);
115 seq += 2;
116 }
117}
118
119static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
120{
121 int ret;
122
123 if (assert) {
124 ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
125 if (ret)
126 dev_err(motg->otg.dev, "usb hs_clk assert failed\n");
127 } else {
128 ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
129 if (ret)
130 dev_err(motg->otg.dev, "usb hs_clk deassert failed\n");
131 }
132 return ret;
133}
134
135static int msm_otg_phy_clk_reset(struct msm_otg *motg)
136{
137 int ret;
138
139 ret = clk_reset(motg->phy_reset_clk, CLK_RESET_ASSERT);
140 if (ret) {
141 dev_err(motg->otg.dev, "usb phy clk assert failed\n");
142 return ret;
143 }
144 usleep_range(10000, 12000);
145 ret = clk_reset(motg->phy_reset_clk, CLK_RESET_DEASSERT);
146 if (ret)
147 dev_err(motg->otg.dev, "usb phy clk deassert failed\n");
148 return ret;
149}
150
151static int msm_otg_phy_reset(struct msm_otg *motg)
152{
153 u32 val;
154 int ret;
155 int retries;
156
157 ret = msm_otg_link_clk_reset(motg, 1);
158 if (ret)
159 return ret;
160 ret = msm_otg_phy_clk_reset(motg);
161 if (ret)
162 return ret;
163 ret = msm_otg_link_clk_reset(motg, 0);
164 if (ret)
165 return ret;
166
167 val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK;
168 writel(val | PORTSC_PTS_ULPI, USB_PORTSC);
169
170 for (retries = 3; retries > 0; retries--) {
171 ret = ulpi_write(&motg->otg, ULPI_FUNC_CTRL_SUSPENDM,
172 ULPI_CLR(ULPI_FUNC_CTRL));
173 if (!ret)
174 break;
175 ret = msm_otg_phy_clk_reset(motg);
176 if (ret)
177 return ret;
178 }
179 if (!retries)
180 return -ETIMEDOUT;
181
182 /* This reset calibrates the phy, if the above write succeeded */
183 ret = msm_otg_phy_clk_reset(motg);
184 if (ret)
185 return ret;
186
187 for (retries = 3; retries > 0; retries--) {
188 ret = ulpi_read(&motg->otg, ULPI_DEBUG);
189 if (ret != -ETIMEDOUT)
190 break;
191 ret = msm_otg_phy_clk_reset(motg);
192 if (ret)
193 return ret;
194 }
195 if (!retries)
196 return -ETIMEDOUT;
197
198 dev_info(motg->otg.dev, "phy_reset: success\n");
199 return 0;
200}
201
202#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
203static int msm_otg_reset(struct otg_transceiver *otg)
204{
205 struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
206 struct msm_otg_platform_data *pdata = motg->pdata;
207 int cnt = 0;
208 int ret;
209 u32 val = 0;
210 u32 ulpi_val = 0;
211
212 ret = msm_otg_phy_reset(motg);
213 if (ret) {
214 dev_err(otg->dev, "phy_reset failed\n");
215 return ret;
216 }
217
218 ulpi_init(motg);
219
220 writel(USBCMD_RESET, USB_USBCMD);
221 while (cnt < LINK_RESET_TIMEOUT_USEC) {
222 if (!(readl(USB_USBCMD) & USBCMD_RESET))
223 break;
224 udelay(1);
225 cnt++;
226 }
227 if (cnt >= LINK_RESET_TIMEOUT_USEC)
228 return -ETIMEDOUT;
229
230 /* select ULPI phy */
231 writel(0x80000000, USB_PORTSC);
232
233 msleep(100);
234
235 writel(0x0, USB_AHBBURST);
236 writel(0x00, USB_AHBMODE);
237
238 if (pdata->otg_control == OTG_PHY_CONTROL) {
239 val = readl(USB_OTGSC);
240 if (pdata->mode == USB_OTG) {
241 ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
242 val |= OTGSC_IDIE | OTGSC_BSVIE;
243 } else if (pdata->mode == USB_PERIPHERAL) {
244 ulpi_val = ULPI_INT_SESS_VALID;
245 val |= OTGSC_BSVIE;
246 }
247 writel(val, USB_OTGSC);
248 ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_RISE);
249 ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_FALL);
250 }
251
252 return 0;
253}
254
255#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
256static int msm_otg_suspend(struct msm_otg *motg)
257{
258 struct otg_transceiver *otg = &motg->otg;
259 struct usb_bus *bus = otg->host;
260 struct msm_otg_platform_data *pdata = motg->pdata;
261 int cnt = 0;
262
263 if (atomic_read(&motg->in_lpm))
264 return 0;
265
266 disable_irq(motg->irq);
267 /*
268 * Interrupt Latch Register auto-clear feature is not present
269 * in all PHY versions. Latch register is clear on read type.
270 * Clear latch register to avoid spurious wakeup from
271 * low power mode (LPM).
272 */
273 ulpi_read(otg, 0x14);
274
275 /*
276 * PHY comparators are disabled when PHY enters into low power
277 * mode (LPM). Keep PHY comparators ON in LPM only when we expect
278 * VBUS/Id notifications from USB PHY. Otherwise turn off USB
279 * PHY comparators. This save significant amount of power.
280 */
281 if (pdata->otg_control == OTG_PHY_CONTROL)
282 ulpi_write(otg, 0x01, 0x30);
283
284 /*
285 * PLL is not turned off when PHY enters into low power mode (LPM).
286 * Disable PLL for maximum power savings.
287 */
288 ulpi_write(otg, 0x08, 0x09);
289
290 /*
291 * PHY may take some time or even fail to enter into low power
292 * mode (LPM). Hence poll for 500 msec and reset the PHY and link
293 * in failure case.
294 */
295 writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
296 while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
297 if (readl(USB_PORTSC) & PORTSC_PHCD)
298 break;
299 udelay(1);
300 cnt++;
301 }
302
303 if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) {
304 dev_err(otg->dev, "Unable to suspend PHY\n");
305 msm_otg_reset(otg);
306 enable_irq(motg->irq);
307 return -ETIMEDOUT;
308 }
309
310 /*
311 * PHY has capability to generate interrupt asynchronously in low
312 * power mode (LPM). This interrupt is level triggered. So USB IRQ
313 * line must be disabled till async interrupt enable bit is cleared
314 * in USBCMD register. Assert STP (ULPI interface STOP signal) to
315 * block data communication from PHY.
316 */
317 writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
318
319 clk_disable(motg->pclk);
320 clk_disable(motg->clk);
321 if (motg->core_clk)
322 clk_disable(motg->core_clk);
323
324 if (device_may_wakeup(otg->dev))
325 enable_irq_wake(motg->irq);
326 if (bus)
327 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
328
329 atomic_set(&motg->in_lpm, 1);
330 enable_irq(motg->irq);
331
332 dev_info(otg->dev, "USB in low power mode\n");
333
334 return 0;
335}
336
337#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
338static int msm_otg_resume(struct msm_otg *motg)
339{
340 struct otg_transceiver *otg = &motg->otg;
341 struct usb_bus *bus = otg->host;
342 int cnt = 0;
343 unsigned temp;
344
345 if (!atomic_read(&motg->in_lpm))
346 return 0;
347
348 clk_enable(motg->pclk);
349 clk_enable(motg->clk);
350 if (motg->core_clk)
351 clk_enable(motg->core_clk);
352
353 temp = readl(USB_USBCMD);
354 temp &= ~ASYNC_INTR_CTRL;
355 temp &= ~ULPI_STP_CTRL;
356 writel(temp, USB_USBCMD);
357
358 /*
359 * PHY comes out of low power mode (LPM) in case of wakeup
360 * from asynchronous interrupt.
361 */
362 if (!(readl(USB_PORTSC) & PORTSC_PHCD))
363 goto skip_phy_resume;
364
365 writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
366 while (cnt < PHY_RESUME_TIMEOUT_USEC) {
367 if (!(readl(USB_PORTSC) & PORTSC_PHCD))
368 break;
369 udelay(1);
370 cnt++;
371 }
372
373 if (cnt >= PHY_RESUME_TIMEOUT_USEC) {
374 /*
375 * This is a fatal error. Reset the link and
376 * PHY. USB state can not be restored. Re-insertion
377 * of USB cable is the only way to get USB working.
378 */
379 dev_err(otg->dev, "Unable to resume USB."
380 "Re-plugin the cable\n");
381 msm_otg_reset(otg);
382 }
383
384skip_phy_resume:
385 if (device_may_wakeup(otg->dev))
386 disable_irq_wake(motg->irq);
387 if (bus)
388 set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
389
390 if (motg->async_int) {
391 motg->async_int = 0;
392 pm_runtime_put(otg->dev);
393 enable_irq(motg->irq);
394 }
395
396 atomic_set(&motg->in_lpm, 0);
397
398 dev_info(otg->dev, "USB exited from low power mode\n");
399
400 return 0;
401}
402
403static void msm_otg_start_host(struct otg_transceiver *otg, int on)
404{
405 struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
406 struct msm_otg_platform_data *pdata = motg->pdata;
407 struct usb_hcd *hcd;
408
409 if (!otg->host)
410 return;
411
412 hcd = bus_to_hcd(otg->host);
413
414 if (on) {
415 dev_dbg(otg->dev, "host on\n");
416
417 if (pdata->vbus_power)
418 pdata->vbus_power(1);
419 /*
420 * Some boards have a switch cotrolled by gpio
421 * to enable/disable internal HUB. Enable internal
422 * HUB before kicking the host.
423 */
424 if (pdata->setup_gpio)
425 pdata->setup_gpio(OTG_STATE_A_HOST);
426#ifdef CONFIG_USB
427 usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
428#endif
429 } else {
430 dev_dbg(otg->dev, "host off\n");
431
432#ifdef CONFIG_USB
433 usb_remove_hcd(hcd);
434#endif
435 if (pdata->setup_gpio)
436 pdata->setup_gpio(OTG_STATE_UNDEFINED);
437 if (pdata->vbus_power)
438 pdata->vbus_power(0);
439 }
440}
441
442static int msm_otg_set_host(struct otg_transceiver *otg, struct usb_bus *host)
443{
444 struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
445 struct usb_hcd *hcd;
446
447 /*
448 * Fail host registration if this board can support
449 * only peripheral configuration.
450 */
451 if (motg->pdata->mode == USB_PERIPHERAL) {
452 dev_info(otg->dev, "Host mode is not supported\n");
453 return -ENODEV;
454 }
455
456 if (!host) {
457 if (otg->state == OTG_STATE_A_HOST) {
458 pm_runtime_get_sync(otg->dev);
459 msm_otg_start_host(otg, 0);
460 otg->host = NULL;
461 otg->state = OTG_STATE_UNDEFINED;
462 schedule_work(&motg->sm_work);
463 } else {
464 otg->host = NULL;
465 }
466
467 return 0;
468 }
469
470 hcd = bus_to_hcd(host);
471 hcd->power_budget = motg->pdata->power_budget;
472
473 otg->host = host;
474 dev_dbg(otg->dev, "host driver registered w/ tranceiver\n");
475
476 /*
477 * Kick the state machine work, if peripheral is not supported
478 * or peripheral is already registered with us.
479 */
480 if (motg->pdata->mode == USB_HOST || otg->gadget) {
481 pm_runtime_get_sync(otg->dev);
482 schedule_work(&motg->sm_work);
483 }
484
485 return 0;
486}
487
488static void msm_otg_start_peripheral(struct otg_transceiver *otg, int on)
489{
490 struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
491 struct msm_otg_platform_data *pdata = motg->pdata;
492
493 if (!otg->gadget)
494 return;
495
496 if (on) {
497 dev_dbg(otg->dev, "gadget on\n");
498 /*
499 * Some boards have a switch cotrolled by gpio
500 * to enable/disable internal HUB. Disable internal
501 * HUB before kicking the gadget.
502 */
503 if (pdata->setup_gpio)
504 pdata->setup_gpio(OTG_STATE_B_PERIPHERAL);
505 usb_gadget_vbus_connect(otg->gadget);
506 } else {
507 dev_dbg(otg->dev, "gadget off\n");
508 usb_gadget_vbus_disconnect(otg->gadget);
509 if (pdata->setup_gpio)
510 pdata->setup_gpio(OTG_STATE_UNDEFINED);
511 }
512
513}
514
515static int msm_otg_set_peripheral(struct otg_transceiver *otg,
516 struct usb_gadget *gadget)
517{
518 struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
519
520 /*
521 * Fail peripheral registration if this board can support
522 * only host configuration.
523 */
524 if (motg->pdata->mode == USB_HOST) {
525 dev_info(otg->dev, "Peripheral mode is not supported\n");
526 return -ENODEV;
527 }
528
529 if (!gadget) {
530 if (otg->state == OTG_STATE_B_PERIPHERAL) {
531 pm_runtime_get_sync(otg->dev);
532 msm_otg_start_peripheral(otg, 0);
533 otg->gadget = NULL;
534 otg->state = OTG_STATE_UNDEFINED;
535 schedule_work(&motg->sm_work);
536 } else {
537 otg->gadget = NULL;
538 }
539
540 return 0;
541 }
542 otg->gadget = gadget;
543 dev_dbg(otg->dev, "peripheral driver registered w/ tranceiver\n");
544
545 /*
546 * Kick the state machine work, if host is not supported
547 * or host is already registered with us.
548 */
549 if (motg->pdata->mode == USB_PERIPHERAL || otg->host) {
550 pm_runtime_get_sync(otg->dev);
551 schedule_work(&motg->sm_work);
552 }
553
554 return 0;
555}
556
557/*
558 * We support OTG, Peripheral only and Host only configurations. In case
559 * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
560 * via Id pin status or user request (debugfs). Id/BSV interrupts are not
561 * enabled when switch is controlled by user and default mode is supplied
562 * by board file, which can be changed by userspace later.
563 */
564static void msm_otg_init_sm(struct msm_otg *motg)
565{
566 struct msm_otg_platform_data *pdata = motg->pdata;
567 u32 otgsc = readl(USB_OTGSC);
568
569 switch (pdata->mode) {
570 case USB_OTG:
571 if (pdata->otg_control == OTG_PHY_CONTROL) {
572 if (otgsc & OTGSC_ID)
573 set_bit(ID, &motg->inputs);
574 else
575 clear_bit(ID, &motg->inputs);
576
577 if (otgsc & OTGSC_BSV)
578 set_bit(B_SESS_VLD, &motg->inputs);
579 else
580 clear_bit(B_SESS_VLD, &motg->inputs);
581 } else if (pdata->otg_control == OTG_USER_CONTROL) {
582 if (pdata->default_mode == USB_HOST) {
583 clear_bit(ID, &motg->inputs);
584 } else if (pdata->default_mode == USB_PERIPHERAL) {
585 set_bit(ID, &motg->inputs);
586 set_bit(B_SESS_VLD, &motg->inputs);
587 } else {
588 set_bit(ID, &motg->inputs);
589 clear_bit(B_SESS_VLD, &motg->inputs);
590 }
591 }
592 break;
593 case USB_HOST:
594 clear_bit(ID, &motg->inputs);
595 break;
596 case USB_PERIPHERAL:
597 set_bit(ID, &motg->inputs);
598 if (otgsc & OTGSC_BSV)
599 set_bit(B_SESS_VLD, &motg->inputs);
600 else
601 clear_bit(B_SESS_VLD, &motg->inputs);
602 break;
603 default:
604 break;
605 }
606}
607
608static void msm_otg_sm_work(struct work_struct *w)
609{
610 struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
611 struct otg_transceiver *otg = &motg->otg;
612
613 switch (otg->state) {
614 case OTG_STATE_UNDEFINED:
615 dev_dbg(otg->dev, "OTG_STATE_UNDEFINED state\n");
616 msm_otg_reset(otg);
617 msm_otg_init_sm(motg);
618 otg->state = OTG_STATE_B_IDLE;
619 /* FALL THROUGH */
620 case OTG_STATE_B_IDLE:
621 dev_dbg(otg->dev, "OTG_STATE_B_IDLE state\n");
622 if (!test_bit(ID, &motg->inputs) && otg->host) {
623 /* disable BSV bit */
624 writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
625 msm_otg_start_host(otg, 1);
626 otg->state = OTG_STATE_A_HOST;
627 } else if (test_bit(B_SESS_VLD, &motg->inputs) && otg->gadget) {
628 msm_otg_start_peripheral(otg, 1);
629 otg->state = OTG_STATE_B_PERIPHERAL;
630 }
631 pm_runtime_put_sync(otg->dev);
632 break;
633 case OTG_STATE_B_PERIPHERAL:
634 dev_dbg(otg->dev, "OTG_STATE_B_PERIPHERAL state\n");
635 if (!test_bit(B_SESS_VLD, &motg->inputs) ||
636 !test_bit(ID, &motg->inputs)) {
637 msm_otg_start_peripheral(otg, 0);
638 otg->state = OTG_STATE_B_IDLE;
639 msm_otg_reset(otg);
640 schedule_work(w);
641 }
642 break;
643 case OTG_STATE_A_HOST:
644 dev_dbg(otg->dev, "OTG_STATE_A_HOST state\n");
645 if (test_bit(ID, &motg->inputs)) {
646 msm_otg_start_host(otg, 0);
647 otg->state = OTG_STATE_B_IDLE;
648 msm_otg_reset(otg);
649 schedule_work(w);
650 }
651 break;
652 default:
653 break;
654 }
655}
656
657static irqreturn_t msm_otg_irq(int irq, void *data)
658{
659 struct msm_otg *motg = data;
660 struct otg_transceiver *otg = &motg->otg;
661 u32 otgsc = 0;
662
663 if (atomic_read(&motg->in_lpm)) {
664 disable_irq_nosync(irq);
665 motg->async_int = 1;
666 pm_runtime_get(otg->dev);
667 return IRQ_HANDLED;
668 }
669
670 otgsc = readl(USB_OTGSC);
671 if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
672 return IRQ_NONE;
673
674 if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) {
675 if (otgsc & OTGSC_ID)
676 set_bit(ID, &motg->inputs);
677 else
678 clear_bit(ID, &motg->inputs);
679 dev_dbg(otg->dev, "ID set/clear\n");
680 pm_runtime_get_noresume(otg->dev);
681 } else if ((otgsc & OTGSC_BSVIS) && (otgsc & OTGSC_BSVIE)) {
682 if (otgsc & OTGSC_BSV)
683 set_bit(B_SESS_VLD, &motg->inputs);
684 else
685 clear_bit(B_SESS_VLD, &motg->inputs);
686 dev_dbg(otg->dev, "BSV set/clear\n");
687 pm_runtime_get_noresume(otg->dev);
688 }
689
690 writel(otgsc, USB_OTGSC);
691 schedule_work(&motg->sm_work);
692 return IRQ_HANDLED;
693}
694
695static int msm_otg_mode_show(struct seq_file *s, void *unused)
696{
697 struct msm_otg *motg = s->private;
698 struct otg_transceiver *otg = &motg->otg;
699
700 switch (otg->state) {
701 case OTG_STATE_A_HOST:
702 seq_printf(s, "host\n");
703 break;
704 case OTG_STATE_B_PERIPHERAL:
705 seq_printf(s, "peripheral\n");
706 break;
707 default:
708 seq_printf(s, "none\n");
709 break;
710 }
711
712 return 0;
713}
714
715static int msm_otg_mode_open(struct inode *inode, struct file *file)
716{
717 return single_open(file, msm_otg_mode_show, inode->i_private);
718}
719
720static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
721 size_t count, loff_t *ppos)
722{
723 struct msm_otg *motg = file->private_data;
724 char buf[16];
725 struct otg_transceiver *otg = &motg->otg;
726 int status = count;
727 enum usb_mode_type req_mode;
728
729 memset(buf, 0x00, sizeof(buf));
730
731 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
732 status = -EFAULT;
733 goto out;
734 }
735
736 if (!strncmp(buf, "host", 4)) {
737 req_mode = USB_HOST;
738 } else if (!strncmp(buf, "peripheral", 10)) {
739 req_mode = USB_PERIPHERAL;
740 } else if (!strncmp(buf, "none", 4)) {
741 req_mode = USB_NONE;
742 } else {
743 status = -EINVAL;
744 goto out;
745 }
746
747 switch (req_mode) {
748 case USB_NONE:
749 switch (otg->state) {
750 case OTG_STATE_A_HOST:
751 case OTG_STATE_B_PERIPHERAL:
752 set_bit(ID, &motg->inputs);
753 clear_bit(B_SESS_VLD, &motg->inputs);
754 break;
755 default:
756 goto out;
757 }
758 break;
759 case USB_PERIPHERAL:
760 switch (otg->state) {
761 case OTG_STATE_B_IDLE:
762 case OTG_STATE_A_HOST:
763 set_bit(ID, &motg->inputs);
764 set_bit(B_SESS_VLD, &motg->inputs);
765 break;
766 default:
767 goto out;
768 }
769 break;
770 case USB_HOST:
771 switch (otg->state) {
772 case OTG_STATE_B_IDLE:
773 case OTG_STATE_B_PERIPHERAL:
774 clear_bit(ID, &motg->inputs);
775 break;
776 default:
777 goto out;
778 }
779 break;
780 default:
781 goto out;
782 }
783
784 pm_runtime_get_sync(otg->dev);
785 schedule_work(&motg->sm_work);
786out:
787 return status;
788}
789
790const struct file_operations msm_otg_mode_fops = {
791 .open = msm_otg_mode_open,
792 .read = seq_read,
793 .write = msm_otg_mode_write,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
798static struct dentry *msm_otg_dbg_root;
799static struct dentry *msm_otg_dbg_mode;
800
801static int msm_otg_debugfs_init(struct msm_otg *motg)
802{
803 msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL);
804
805 if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root))
806 return -ENODEV;
807
808 msm_otg_dbg_mode = debugfs_create_file("mode", S_IRUGO | S_IWUSR,
809 msm_otg_dbg_root, motg, &msm_otg_mode_fops);
810 if (!msm_otg_dbg_mode) {
811 debugfs_remove(msm_otg_dbg_root);
812 msm_otg_dbg_root = NULL;
813 return -ENODEV;
814 }
815
816 return 0;
817}
818
819static void msm_otg_debugfs_cleanup(void)
820{
821 debugfs_remove(msm_otg_dbg_mode);
822 debugfs_remove(msm_otg_dbg_root);
823}
824
825static int __init msm_otg_probe(struct platform_device *pdev)
826{
827 int ret = 0;
828 struct resource *res;
829 struct msm_otg *motg;
830 struct otg_transceiver *otg;
831
832 dev_info(&pdev->dev, "msm_otg probe\n");
833 if (!pdev->dev.platform_data) {
834 dev_err(&pdev->dev, "No platform data given. Bailing out\n");
835 return -ENODEV;
836 }
837
838 motg = kzalloc(sizeof(struct msm_otg), GFP_KERNEL);
839 if (!motg) {
840 dev_err(&pdev->dev, "unable to allocate msm_otg\n");
841 return -ENOMEM;
842 }
843
844 motg->pdata = pdev->dev.platform_data;
845 otg = &motg->otg;
846 otg->dev = &pdev->dev;
847
848 motg->phy_reset_clk = clk_get(&pdev->dev, "usb_phy_clk");
849 if (IS_ERR(motg->phy_reset_clk)) {
850 dev_err(&pdev->dev, "failed to get usb_phy_clk\n");
851 ret = PTR_ERR(motg->phy_reset_clk);
852 goto free_motg;
853 }
854
855 motg->clk = clk_get(&pdev->dev, "usb_hs_clk");
856 if (IS_ERR(motg->clk)) {
857 dev_err(&pdev->dev, "failed to get usb_hs_clk\n");
858 ret = PTR_ERR(motg->clk);
859 goto put_phy_reset_clk;
860 }
861
862 motg->pclk = clk_get(&pdev->dev, "usb_hs_pclk");
863 if (IS_ERR(motg->pclk)) {
864 dev_err(&pdev->dev, "failed to get usb_hs_pclk\n");
865 ret = PTR_ERR(motg->pclk);
866 goto put_clk;
867 }
868
869 /*
870 * USB core clock is not present on all MSM chips. This
871 * clock is introduced to remove the dependency on AXI
872 * bus frequency.
873 */
874 motg->core_clk = clk_get(&pdev->dev, "usb_hs_core_clk");
875 if (IS_ERR(motg->core_clk))
876 motg->core_clk = NULL;
877
878 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
879 if (!res) {
880 dev_err(&pdev->dev, "failed to get platform resource mem\n");
881 ret = -ENODEV;
882 goto put_core_clk;
883 }
884
885 motg->regs = ioremap(res->start, resource_size(res));
886 if (!motg->regs) {
887 dev_err(&pdev->dev, "ioremap failed\n");
888 ret = -ENOMEM;
889 goto put_core_clk;
890 }
891 dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);
892
893 motg->irq = platform_get_irq(pdev, 0);
894 if (!motg->irq) {
895 dev_err(&pdev->dev, "platform_get_irq failed\n");
896 ret = -ENODEV;
897 goto free_regs;
898 }
899
900 clk_enable(motg->clk);
901 clk_enable(motg->pclk);
902 if (motg->core_clk)
903 clk_enable(motg->core_clk);
904
905 writel(0, USB_USBINTR);
906 writel(0, USB_OTGSC);
907
908 INIT_WORK(&motg->sm_work, msm_otg_sm_work);
909 ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
910 "msm_otg", motg);
911 if (ret) {
912 dev_err(&pdev->dev, "request irq failed\n");
913 goto disable_clks;
914 }
915
916 otg->init = msm_otg_reset;
917 otg->set_host = msm_otg_set_host;
918 otg->set_peripheral = msm_otg_set_peripheral;
919
920 otg->io_ops = &msm_otg_io_ops;
921
922 ret = otg_set_transceiver(&motg->otg);
923 if (ret) {
924 dev_err(&pdev->dev, "otg_set_transceiver failed\n");
925 goto free_irq;
926 }
927
928 platform_set_drvdata(pdev, motg);
929 device_init_wakeup(&pdev->dev, 1);
930
931 if (motg->pdata->mode == USB_OTG &&
932 motg->pdata->otg_control == OTG_USER_CONTROL) {
933 ret = msm_otg_debugfs_init(motg);
934 if (ret)
935 dev_dbg(&pdev->dev, "mode debugfs file is"
936 "not available\n");
937 }
938
939 pm_runtime_set_active(&pdev->dev);
940 pm_runtime_enable(&pdev->dev);
941
942 return 0;
943free_irq:
944 free_irq(motg->irq, motg);
945disable_clks:
946 clk_disable(motg->pclk);
947 clk_disable(motg->clk);
948free_regs:
949 iounmap(motg->regs);
950put_core_clk:
951 if (motg->core_clk)
952 clk_put(motg->core_clk);
953 clk_put(motg->pclk);
954put_clk:
955 clk_put(motg->clk);
956put_phy_reset_clk:
957 clk_put(motg->phy_reset_clk);
958free_motg:
959 kfree(motg);
960 return ret;
961}
962
963static int __devexit msm_otg_remove(struct platform_device *pdev)
964{
965 struct msm_otg *motg = platform_get_drvdata(pdev);
966 struct otg_transceiver *otg = &motg->otg;
967 int cnt = 0;
968
969 if (otg->host || otg->gadget)
970 return -EBUSY;
971
972 msm_otg_debugfs_cleanup();
973 cancel_work_sync(&motg->sm_work);
974
975 msm_otg_resume(motg);
976
977 device_init_wakeup(&pdev->dev, 0);
978 pm_runtime_disable(&pdev->dev);
979
980 otg_set_transceiver(NULL);
981 free_irq(motg->irq, motg);
982
983 /*
984 * Put PHY in low power mode.
985 */
986 ulpi_read(otg, 0x14);
987 ulpi_write(otg, 0x08, 0x09);
988
989 writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
990 while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
991 if (readl(USB_PORTSC) & PORTSC_PHCD)
992 break;
993 udelay(1);
994 cnt++;
995 }
996 if (cnt >= PHY_SUSPEND_TIMEOUT_USEC)
997 dev_err(otg->dev, "Unable to suspend PHY\n");
998
999 clk_disable(motg->pclk);
1000 clk_disable(motg->clk);
1001 if (motg->core_clk)
1002 clk_disable(motg->core_clk);
1003
1004 iounmap(motg->regs);
1005 pm_runtime_set_suspended(&pdev->dev);
1006
1007 clk_put(motg->phy_reset_clk);
1008 clk_put(motg->pclk);
1009 clk_put(motg->clk);
1010 if (motg->core_clk)
1011 clk_put(motg->core_clk);
1012
1013 kfree(motg);
1014
1015 return 0;
1016}
1017
1018#ifdef CONFIG_PM_RUNTIME
1019static int msm_otg_runtime_idle(struct device *dev)
1020{
1021 struct msm_otg *motg = dev_get_drvdata(dev);
1022 struct otg_transceiver *otg = &motg->otg;
1023
1024 dev_dbg(dev, "OTG runtime idle\n");
1025
1026 /*
1027 * It is observed some times that a spurious interrupt
1028 * comes when PHY is put into LPM immediately after PHY reset.
1029 * This 1 sec delay also prevents entering into LPM immediately
1030 * after asynchronous interrupt.
1031 */
1032 if (otg->state != OTG_STATE_UNDEFINED)
1033 pm_schedule_suspend(dev, 1000);
1034
1035 return -EAGAIN;
1036}
1037
1038static int msm_otg_runtime_suspend(struct device *dev)
1039{
1040 struct msm_otg *motg = dev_get_drvdata(dev);
1041
1042 dev_dbg(dev, "OTG runtime suspend\n");
1043 return msm_otg_suspend(motg);
1044}
1045
1046static int msm_otg_runtime_resume(struct device *dev)
1047{
1048 struct msm_otg *motg = dev_get_drvdata(dev);
1049
1050 dev_dbg(dev, "OTG runtime resume\n");
1051 return msm_otg_resume(motg);
1052}
1053#else
1054#define msm_otg_runtime_idle NULL
1055#define msm_otg_runtime_suspend NULL
1056#define msm_otg_runtime_resume NULL
1057#endif
1058
1059#ifdef CONFIG_PM
1060static int msm_otg_pm_suspend(struct device *dev)
1061{
1062 struct msm_otg *motg = dev_get_drvdata(dev);
1063
1064 dev_dbg(dev, "OTG PM suspend\n");
1065 return msm_otg_suspend(motg);
1066}
1067
1068static int msm_otg_pm_resume(struct device *dev)
1069{
1070 struct msm_otg *motg = dev_get_drvdata(dev);
1071 int ret;
1072
1073 dev_dbg(dev, "OTG PM resume\n");
1074
1075 ret = msm_otg_resume(motg);
1076 if (ret)
1077 return ret;
1078
1079 /*
1080 * Runtime PM Documentation recommends bringing the
1081 * device to full powered state upon resume.
1082 */
1083 pm_runtime_disable(dev);
1084 pm_runtime_set_active(dev);
1085 pm_runtime_enable(dev);
1086
1087 return 0;
1088}
1089#else
1090#define msm_otg_pm_suspend NULL
1091#define msm_otg_pm_resume NULL
1092#endif
1093
1094static const struct dev_pm_ops msm_otg_dev_pm_ops = {
1095 .runtime_suspend = msm_otg_runtime_suspend,
1096 .runtime_resume = msm_otg_runtime_resume,
1097 .runtime_idle = msm_otg_runtime_idle,
1098 .suspend = msm_otg_pm_suspend,
1099 .resume = msm_otg_pm_resume,
1100};
1101
1102static struct platform_driver msm_otg_driver = {
1103 .remove = __devexit_p(msm_otg_remove),
1104 .driver = {
1105 .name = DRIVER_NAME,
1106 .owner = THIS_MODULE,
1107 .pm = &msm_otg_dev_pm_ops,
1108 },
1109};
1110
1111static int __init msm_otg_init(void)
1112{
1113 return platform_driver_probe(&msm_otg_driver, msm_otg_probe);
1114}
1115
1116static void __exit msm_otg_exit(void)
1117{
1118 platform_driver_unregister(&msm_otg_driver);
1119}
1120
1121module_init(msm_otg_init);
1122module_exit(msm_otg_exit);
1123
1124MODULE_LICENSE("GPL v2");
1125MODULE_DESCRIPTION("MSM USB transceiver driver");
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index d335f484fcd8..6ca505f333e4 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -678,7 +678,8 @@ static int __exit twl4030_usb_remove(struct platform_device *pdev)
678 /* disable complete OTG block */ 678 /* disable complete OTG block */
679 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); 679 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
680 680
681 twl4030_phy_power(twl, 0); 681 if (!twl->asleep)
682 twl4030_phy_power(twl, 0);
682 regulator_put(twl->usb1v5); 683 regulator_put(twl->usb1v5);
683 regulator_put(twl->usb1v8); 684 regulator_put(twl->usb1v8);
684 regulator_put(twl->usb3v1); 685 regulator_put(twl->usb3v1);
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
new file mode 100644
index 000000000000..28f770103640
--- /dev/null
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -0,0 +1,493 @@
1/*
2 * twl6030_usb - TWL6030 USB transceiver, talking to OMAP OTG driver.
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * Author: Hema HK <hemahk@ti.com>
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/io.h>
28#include <linux/usb/otg.h>
29#include <linux/i2c/twl.h>
30#include <linux/regulator/consumer.h>
31#include <linux/err.h>
32#include <linux/notifier.h>
33#include <linux/slab.h>
34
35/* usb register definitions */
36#define USB_VENDOR_ID_LSB 0x00
37#define USB_VENDOR_ID_MSB 0x01
38#define USB_PRODUCT_ID_LSB 0x02
39#define USB_PRODUCT_ID_MSB 0x03
40#define USB_VBUS_CTRL_SET 0x04
41#define USB_VBUS_CTRL_CLR 0x05
42#define USB_ID_CTRL_SET 0x06
43#define USB_ID_CTRL_CLR 0x07
44#define USB_VBUS_INT_SRC 0x08
45#define USB_VBUS_INT_LATCH_SET 0x09
46#define USB_VBUS_INT_LATCH_CLR 0x0A
47#define USB_VBUS_INT_EN_LO_SET 0x0B
48#define USB_VBUS_INT_EN_LO_CLR 0x0C
49#define USB_VBUS_INT_EN_HI_SET 0x0D
50#define USB_VBUS_INT_EN_HI_CLR 0x0E
51#define USB_ID_INT_SRC 0x0F
52#define USB_ID_INT_LATCH_SET 0x10
53#define USB_ID_INT_LATCH_CLR 0x11
54
55#define USB_ID_INT_EN_LO_SET 0x12
56#define USB_ID_INT_EN_LO_CLR 0x13
57#define USB_ID_INT_EN_HI_SET 0x14
58#define USB_ID_INT_EN_HI_CLR 0x15
59#define USB_OTG_ADP_CTRL 0x16
60#define USB_OTG_ADP_HIGH 0x17
61#define USB_OTG_ADP_LOW 0x18
62#define USB_OTG_ADP_RISE 0x19
63#define USB_OTG_REVISION 0x1A
64
65/* to be moved to LDO */
66#define TWL6030_MISC2 0xE5
67#define TWL6030_CFG_LDO_PD2 0xF5
68#define TWL6030_BACKUP_REG 0xFA
69
70#define STS_HW_CONDITIONS 0x21
71
72/* In module TWL6030_MODULE_PM_MASTER */
73#define STS_HW_CONDITIONS 0x21
74#define STS_USB_ID BIT(2)
75
76/* In module TWL6030_MODULE_PM_RECEIVER */
77#define VUSB_CFG_TRANS 0x71
78#define VUSB_CFG_STATE 0x72
79#define VUSB_CFG_VOLTAGE 0x73
80
81/* in module TWL6030_MODULE_MAIN_CHARGE */
82
83#define CHARGERUSB_CTRL1 0x8
84
85#define CONTROLLER_STAT1 0x03
86#define VBUS_DET BIT(2)
87
88struct twl6030_usb {
89 struct otg_transceiver otg;
90 struct device *dev;
91
92 /* for vbus reporting with irqs disabled */
93 spinlock_t lock;
94
95 struct regulator *usb3v3;
96
97 int irq1;
98 int irq2;
99 u8 linkstat;
100 u8 asleep;
101 bool irq_enabled;
102};
103
104#define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg);
105
106/*-------------------------------------------------------------------------*/
107
108static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
109 u8 data, u8 address)
110{
111 int ret = 0;
112
113 ret = twl_i2c_write_u8(module, data, address);
114 if (ret < 0)
115 dev_err(twl->dev,
116 "Write[0x%x] Error %d\n", address, ret);
117 return ret;
118}
119
120static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
121{
122 u8 data, ret = 0;
123
124 ret = twl_i2c_read_u8(module, &data, address);
125 if (ret >= 0)
126 ret = data;
127 else
128 dev_err(twl->dev,
129 "readb[0x%x,0x%x] Error %d\n",
130 module, address, ret);
131 return ret;
132}
133
134/*-------------------------------------------------------------------------*/
135static int twl6030_set_phy_clk(struct otg_transceiver *x, int on)
136{
137 struct twl6030_usb *twl;
138 struct device *dev;
139 struct twl4030_usb_data *pdata;
140
141 twl = xceiv_to_twl(x);
142 dev = twl->dev;
143 pdata = dev->platform_data;
144
145 pdata->phy_set_clock(twl->dev, on);
146
147 return 0;
148}
149
150static int twl6030_phy_init(struct otg_transceiver *x)
151{
152 u8 hw_state;
153 struct twl6030_usb *twl;
154 struct device *dev;
155 struct twl4030_usb_data *pdata;
156
157 twl = xceiv_to_twl(x);
158 dev = twl->dev;
159 pdata = dev->platform_data;
160
161 regulator_enable(twl->usb3v3);
162
163 hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
164
165 if (hw_state & STS_USB_ID)
166 pdata->phy_power(twl->dev, 1, 1);
167 else
168 pdata->phy_power(twl->dev, 0, 1);
169
170 return 0;
171}
172
173static void twl6030_phy_shutdown(struct otg_transceiver *x)
174{
175 struct twl6030_usb *twl;
176 struct device *dev;
177 struct twl4030_usb_data *pdata;
178
179 twl = xceiv_to_twl(x);
180 dev = twl->dev;
181 pdata = dev->platform_data;
182 pdata->phy_power(twl->dev, 0, 0);
183 regulator_disable(twl->usb3v3);
184}
185
186static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
187{
188
189 /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
190 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
191
192 /* Program CFG_LDO_PD2 register and set VUSB bit */
193 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_CFG_LDO_PD2);
194
195 /* Program MISC2 register and set bit VUSB_IN_VBAT */
196 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
197
198 twl->usb3v3 = regulator_get(twl->dev, "vusb");
199 if (IS_ERR(twl->usb3v3))
200 return -ENODEV;
201
202 regulator_enable(twl->usb3v3);
203
204 /* Program the VUSB_CFG_TRANS for ACTIVE state. */
205 twl6030_writeb(twl, TWL_MODULE_PM_RECEIVER, 0x3F,
206 VUSB_CFG_TRANS);
207
208 /* Program the VUSB_CFG_STATE register to ON on all groups. */
209 twl6030_writeb(twl, TWL_MODULE_PM_RECEIVER, 0xE1,
210 VUSB_CFG_STATE);
211
212 /* Program the USB_VBUS_CTRL_SET and set VBUS_ACT_COMP bit */
213 twl6030_writeb(twl, TWL_MODULE_USB, 0x4, USB_VBUS_CTRL_SET);
214
215 /*
216 * Program the USB_ID_CTRL_SET register to enable GND drive
217 * and the ID comparators
218 */
219 twl6030_writeb(twl, TWL_MODULE_USB, 0x14, USB_ID_CTRL_SET);
220
221 return 0;
222}
223
224static ssize_t twl6030_usb_vbus_show(struct device *dev,
225 struct device_attribute *attr, char *buf)
226{
227 struct twl6030_usb *twl = dev_get_drvdata(dev);
228 unsigned long flags;
229 int ret = -EINVAL;
230
231 spin_lock_irqsave(&twl->lock, flags);
232
233 switch (twl->linkstat) {
234 case USB_EVENT_VBUS:
235 ret = snprintf(buf, PAGE_SIZE, "vbus\n");
236 break;
237 case USB_EVENT_ID:
238 ret = snprintf(buf, PAGE_SIZE, "id\n");
239 break;
240 case USB_EVENT_NONE:
241 ret = snprintf(buf, PAGE_SIZE, "none\n");
242 break;
243 default:
244 ret = snprintf(buf, PAGE_SIZE, "UNKNOWN\n");
245 }
246 spin_unlock_irqrestore(&twl->lock, flags);
247
248 return ret;
249}
250static DEVICE_ATTR(vbus, 0444, twl6030_usb_vbus_show, NULL);
251
252static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
253{
254 struct twl6030_usb *twl = _twl;
255 int status;
256 u8 vbus_state, hw_state;
257
258 hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
259
260 vbus_state = twl6030_readb(twl, TWL_MODULE_MAIN_CHARGE,
261 CONTROLLER_STAT1);
262 if (!(hw_state & STS_USB_ID)) {
263 if (vbus_state & VBUS_DET) {
264 status = USB_EVENT_VBUS;
265 twl->otg.default_a = false;
266 twl->otg.state = OTG_STATE_B_IDLE;
267 } else {
268 status = USB_EVENT_NONE;
269 }
270 if (status >= 0) {
271 twl->linkstat = status;
272 blocking_notifier_call_chain(&twl->otg.notifier,
273 status, twl->otg.gadget);
274 }
275 }
276 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
277
278 return IRQ_HANDLED;
279}
280
281static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
282{
283 struct twl6030_usb *twl = _twl;
284 int status = USB_EVENT_NONE;
285 u8 hw_state;
286
287 hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
288
289 if (hw_state & STS_USB_ID) {
290
291 twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
292 twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
293 0x10);
294 status = USB_EVENT_ID;
295 twl->otg.default_a = true;
296 twl->otg.state = OTG_STATE_A_IDLE;
297 blocking_notifier_call_chain(&twl->otg.notifier, status,
298 twl->otg.gadget);
299 } else {
300 twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
301 0x10);
302 twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
303 0x1);
304 }
305 twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
306 twl->linkstat = status;
307
308 return IRQ_HANDLED;
309}
310
311static int twl6030_set_peripheral(struct otg_transceiver *x,
312 struct usb_gadget *gadget)
313{
314 struct twl6030_usb *twl;
315
316 if (!x)
317 return -ENODEV;
318
319 twl = xceiv_to_twl(x);
320 twl->otg.gadget = gadget;
321 if (!gadget)
322 twl->otg.state = OTG_STATE_UNDEFINED;
323
324 return 0;
325}
326
327static int twl6030_enable_irq(struct otg_transceiver *x)
328{
329 struct twl6030_usb *twl = xceiv_to_twl(x);
330
331 twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
332 twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
333 twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
334
335 twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
336 REG_INT_MSK_LINE_C);
337 twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
338 REG_INT_MSK_STS_C);
339 twl6030_usb_irq(twl->irq2, twl);
340 twl6030_usbotg_irq(twl->irq1, twl);
341
342 return 0;
343}
344
345static int twl6030_set_vbus(struct otg_transceiver *x, bool enabled)
346{
347 struct twl6030_usb *twl = xceiv_to_twl(x);
348
349 /*
350 * Start driving VBUS. Set OPA_MODE bit in CHARGERUSB_CTRL1
351 * register. This enables boost mode.
352 */
353 if (enabled)
354 twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x40,
355 CHARGERUSB_CTRL1);
356 else
357 twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x00,
358 CHARGERUSB_CTRL1);
359 return 0;
360}
361
362static int twl6030_set_host(struct otg_transceiver *x, struct usb_bus *host)
363{
364 struct twl6030_usb *twl;
365
366 if (!x)
367 return -ENODEV;
368
369 twl = xceiv_to_twl(x);
370 twl->otg.host = host;
371 if (!host)
372 twl->otg.state = OTG_STATE_UNDEFINED;
373 return 0;
374}
375
376static int __devinit twl6030_usb_probe(struct platform_device *pdev)
377{
378 struct twl6030_usb *twl;
379 int status, err;
380 struct twl4030_usb_data *pdata;
381 struct device *dev = &pdev->dev;
382 pdata = dev->platform_data;
383
384 twl = kzalloc(sizeof *twl, GFP_KERNEL);
385 if (!twl)
386 return -ENOMEM;
387
388 twl->dev = &pdev->dev;
389 twl->irq1 = platform_get_irq(pdev, 0);
390 twl->irq2 = platform_get_irq(pdev, 1);
391 twl->otg.dev = twl->dev;
392 twl->otg.label = "twl6030";
393 twl->otg.set_host = twl6030_set_host;
394 twl->otg.set_peripheral = twl6030_set_peripheral;
395 twl->otg.set_vbus = twl6030_set_vbus;
396 twl->otg.init = twl6030_phy_init;
397 twl->otg.shutdown = twl6030_phy_shutdown;
398
399 /* init spinlock for workqueue */
400 spin_lock_init(&twl->lock);
401
402 err = twl6030_usb_ldo_init(twl);
403 if (err) {
404 dev_err(&pdev->dev, "ldo init failed\n");
405 kfree(twl);
406 return err;
407 }
408 otg_set_transceiver(&twl->otg);
409
410 platform_set_drvdata(pdev, twl);
411 if (device_create_file(&pdev->dev, &dev_attr_vbus))
412 dev_warn(&pdev->dev, "could not create sysfs file\n");
413
414 BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
415
416 twl->irq_enabled = true;
417 status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
418 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
419 "twl6030_usb", twl);
420 if (status < 0) {
421 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
422 twl->irq1, status);
423 device_remove_file(twl->dev, &dev_attr_vbus);
424 kfree(twl);
425 return status;
426 }
427
428 status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq,
429 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
430 "twl6030_usb", twl);
431 if (status < 0) {
432 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
433 twl->irq2, status);
434 free_irq(twl->irq1, twl);
435 device_remove_file(twl->dev, &dev_attr_vbus);
436 kfree(twl);
437 return status;
438 }
439
440 pdata->phy_init(dev);
441 twl6030_enable_irq(&twl->otg);
442 dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
443
444 return 0;
445}
446
447static int __exit twl6030_usb_remove(struct platform_device *pdev)
448{
449 struct twl6030_usb *twl = platform_get_drvdata(pdev);
450
451 struct twl4030_usb_data *pdata;
452 struct device *dev = &pdev->dev;
453 pdata = dev->platform_data;
454
455 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
456 REG_INT_MSK_LINE_C);
457 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
458 REG_INT_MSK_STS_C);
459 free_irq(twl->irq1, twl);
460 free_irq(twl->irq2, twl);
461 regulator_put(twl->usb3v3);
462 pdata->phy_exit(twl->dev);
463 device_remove_file(twl->dev, &dev_attr_vbus);
464 kfree(twl);
465
466 return 0;
467}
468
469static struct platform_driver twl6030_usb_driver = {
470 .probe = twl6030_usb_probe,
471 .remove = __exit_p(twl6030_usb_remove),
472 .driver = {
473 .name = "twl6030_usb",
474 .owner = THIS_MODULE,
475 },
476};
477
478static int __init twl6030_usb_init(void)
479{
480 return platform_driver_register(&twl6030_usb_driver);
481}
482subsys_initcall(twl6030_usb_init);
483
484static void __exit twl6030_usb_exit(void)
485{
486 platform_driver_unregister(&twl6030_usb_driver);
487}
488module_exit(twl6030_usb_exit);
489
490MODULE_ALIAS("platform:twl6030_usb");
491MODULE_AUTHOR("Hema HK <hemahk@ti.com>");
492MODULE_DESCRIPTION("TWL6030 USB transceiver driver");
493MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 2dec50013528..a2668d089260 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -75,6 +75,7 @@ struct ftdi_private {
75 unsigned long last_dtr_rts; /* saved modem control outputs */ 75 unsigned long last_dtr_rts; /* saved modem control outputs */
76 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ 76 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
77 char prev_status, diff_status; /* Used for TIOCMIWAIT */ 77 char prev_status, diff_status; /* Used for TIOCMIWAIT */
78 char transmit_empty; /* If transmitter is empty or not */
78 struct usb_serial_port *port; 79 struct usb_serial_port *port;
79 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface 80 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
80 (0 for FT232/245) */ 81 (0 for FT232/245) */
@@ -1323,6 +1324,23 @@ check_and_exit:
1323 return 0; 1324 return 0;
1324} 1325}
1325 1326
1327static int get_lsr_info(struct usb_serial_port *port,
1328 struct serial_struct __user *retinfo)
1329{
1330 struct ftdi_private *priv = usb_get_serial_port_data(port);
1331 unsigned int result = 0;
1332
1333 if (!retinfo)
1334 return -EFAULT;
1335
1336 if (priv->transmit_empty)
1337 result = TIOCSER_TEMT;
1338
1339 if (copy_to_user(retinfo, &result, sizeof(unsigned int)))
1340 return -EFAULT;
1341 return 0;
1342}
1343
1326 1344
1327/* Determine type of FTDI chip based on USB config and descriptor. */ 1345/* Determine type of FTDI chip based on USB config and descriptor. */
1328static void ftdi_determine_type(struct usb_serial_port *port) 1346static void ftdi_determine_type(struct usb_serial_port *port)
@@ -1872,6 +1890,12 @@ static int ftdi_process_packet(struct tty_struct *tty,
1872 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1890 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1873 } 1891 }
1874 1892
1893 /* save if the transmitter is empty or not */
1894 if (packet[1] & FTDI_RS_TEMT)
1895 priv->transmit_empty = 1;
1896 else
1897 priv->transmit_empty = 0;
1898
1875 len -= 2; 1899 len -= 2;
1876 if (!len) 1900 if (!len)
1877 return 0; /* status only */ 1901 return 0; /* status only */
@@ -2235,6 +2259,9 @@ static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
2235 } 2259 }
2236 } 2260 }
2237 return 0; 2261 return 0;
2262 case TIOCSERGETLSR:
2263 return get_lsr_info(port, (struct serial_struct __user *)arg);
2264 break;
2238 default: 2265 default:
2239 break; 2266 break;
2240 } 2267 }
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ef2977d3a613..cdfb1868caef 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -989,6 +989,7 @@ static struct usb_serial_driver option_1port_device = {
989 .set_termios = usb_wwan_set_termios, 989 .set_termios = usb_wwan_set_termios,
990 .tiocmget = usb_wwan_tiocmget, 990 .tiocmget = usb_wwan_tiocmget,
991 .tiocmset = usb_wwan_tiocmset, 991 .tiocmset = usb_wwan_tiocmset,
992 .ioctl = usb_wwan_ioctl,
992 .attach = usb_wwan_startup, 993 .attach = usb_wwan_startup,
993 .disconnect = usb_wwan_disconnect, 994 .disconnect = usb_wwan_disconnect,
994 .release = usb_wwan_release, 995 .release = usb_wwan_release,
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index f5312dd3331b..8359ec798959 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -79,7 +79,6 @@ struct ssu100_port_private {
79 u8 shadowLSR; 79 u8 shadowLSR;
80 u8 shadowMSR; 80 u8 shadowMSR;
81 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ 81 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
82 unsigned short max_packet_size;
83 struct async_icount icount; 82 struct async_icount icount;
84}; 83};
85 84
@@ -464,36 +463,6 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
464 return -ENOIOCTLCMD; 463 return -ENOIOCTLCMD;
465} 464}
466 465
467static void ssu100_set_max_packet_size(struct usb_serial_port *port)
468{
469 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
470 struct usb_serial *serial = port->serial;
471 struct usb_device *udev = serial->dev;
472
473 struct usb_interface *interface = serial->interface;
474 struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
475
476 unsigned num_endpoints;
477 int i;
478 unsigned long flags;
479
480 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
481 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
482
483 for (i = 0; i < num_endpoints; i++) {
484 dev_info(&udev->dev, "Endpoint %d MaxPacketSize %d\n", i+1,
485 interface->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
486 ep_desc = &interface->cur_altsetting->endpoint[i].desc;
487 }
488
489 /* set max packet size based on descriptor */
490 spin_lock_irqsave(&priv->status_lock, flags);
491 priv->max_packet_size = ep_desc->wMaxPacketSize;
492 spin_unlock_irqrestore(&priv->status_lock, flags);
493
494 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
495}
496
497static int ssu100_attach(struct usb_serial *serial) 466static int ssu100_attach(struct usb_serial *serial)
498{ 467{
499 struct ssu100_port_private *priv; 468 struct ssu100_port_private *priv;
@@ -511,7 +480,6 @@ static int ssu100_attach(struct usb_serial *serial)
511 spin_lock_init(&priv->status_lock); 480 spin_lock_init(&priv->status_lock);
512 init_waitqueue_head(&priv->delta_msr_wait); 481 init_waitqueue_head(&priv->delta_msr_wait);
513 usb_set_serial_port_data(port, priv); 482 usb_set_serial_port_data(port, priv);
514 ssu100_set_max_packet_size(port);
515 483
516 return ssu100_initdevice(serial->dev); 484 return ssu100_initdevice(serial->dev);
517} 485}
@@ -641,13 +609,14 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
641 609
642} 610}
643 611
644static int ssu100_process_packet(struct tty_struct *tty, 612static int ssu100_process_packet(struct urb *urb,
645 struct usb_serial_port *port, 613 struct tty_struct *tty)
646 struct ssu100_port_private *priv,
647 char *packet, int len)
648{ 614{
649 int i; 615 struct usb_serial_port *port = urb->context;
616 char *packet = (char *)urb->transfer_buffer;
650 char flag = TTY_NORMAL; 617 char flag = TTY_NORMAL;
618 u32 len = urb->actual_length;
619 int i;
651 char *ch; 620 char *ch;
652 621
653 dbg("%s - port %d", __func__, port->number); 622 dbg("%s - port %d", __func__, port->number);
@@ -685,12 +654,8 @@ static int ssu100_process_packet(struct tty_struct *tty,
685static void ssu100_process_read_urb(struct urb *urb) 654static void ssu100_process_read_urb(struct urb *urb)
686{ 655{
687 struct usb_serial_port *port = urb->context; 656 struct usb_serial_port *port = urb->context;
688 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
689 char *data = (char *)urb->transfer_buffer;
690 struct tty_struct *tty; 657 struct tty_struct *tty;
691 int count = 0; 658 int count;
692 int i;
693 int len;
694 659
695 dbg("%s", __func__); 660 dbg("%s", __func__);
696 661
@@ -698,10 +663,7 @@ static void ssu100_process_read_urb(struct urb *urb)
698 if (!tty) 663 if (!tty)
699 return; 664 return;
700 665
701 for (i = 0; i < urb->actual_length; i += priv->max_packet_size) { 666 count = ssu100_process_packet(urb, tty);
702 len = min_t(int, urb->actual_length - i, priv->max_packet_size);
703 count += ssu100_process_packet(tty, port, priv, &data[i], len);
704 }
705 667
706 if (count) 668 if (count)
707 tty_flip_buffer_push(tty); 669 tty_flip_buffer_push(tty);
@@ -717,8 +679,6 @@ static struct usb_serial_driver ssu100_device = {
717 .id_table = id_table, 679 .id_table = id_table,
718 .usb_driver = &ssu100_driver, 680 .usb_driver = &ssu100_driver,
719 .num_ports = 1, 681 .num_ports = 1,
720 .bulk_in_size = 256,
721 .bulk_out_size = 256,
722 .open = ssu100_open, 682 .open = ssu100_open,
723 .close = ssu100_close, 683 .close = ssu100_close,
724 .attach = ssu100_attach, 684 .attach = ssu100_attach,
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 2be298a1305b..3ab77c5d9819 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -18,6 +18,8 @@ extern void usb_wwan_set_termios(struct tty_struct *tty,
18extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file); 18extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file);
19extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file, 19extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
20 unsigned int set, unsigned int clear); 20 unsigned int set, unsigned int clear);
21extern int usb_wwan_ioctl(struct tty_struct *tty, struct file *file,
22 unsigned int cmd, unsigned long arg);
21extern int usb_wwan_send_setup(struct usb_serial_port *port); 23extern int usb_wwan_send_setup(struct usb_serial_port *port);
22extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, 24extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
23 const unsigned char *buf, int count); 25 const unsigned char *buf, int count);
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index fbc946797801..b004b2a485c3 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -31,8 +31,10 @@
31#include <linux/tty_flip.h> 31#include <linux/tty_flip.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/bitops.h> 33#include <linux/bitops.h>
34#include <linux/uaccess.h>
34#include <linux/usb.h> 35#include <linux/usb.h>
35#include <linux/usb/serial.h> 36#include <linux/usb/serial.h>
37#include <linux/serial.h>
36#include "usb-wwan.h" 38#include "usb-wwan.h"
37 39
38static int debug; 40static int debug;
@@ -123,6 +125,83 @@ int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
123} 125}
124EXPORT_SYMBOL(usb_wwan_tiocmset); 126EXPORT_SYMBOL(usb_wwan_tiocmset);
125 127
128static int get_serial_info(struct usb_serial_port *port,
129 struct serial_struct __user *retinfo)
130{
131 struct serial_struct tmp;
132
133 if (!retinfo)
134 return -EFAULT;
135
136 memset(&tmp, 0, sizeof(tmp));
137 tmp.line = port->serial->minor;
138 tmp.port = port->number;
139 tmp.baud_base = tty_get_baud_rate(port->port.tty);
140 tmp.close_delay = port->port.close_delay / 10;
141 tmp.closing_wait = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
142 ASYNC_CLOSING_WAIT_NONE :
143 port->port.closing_wait / 10;
144
145 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
146 return -EFAULT;
147 return 0;
148}
149
150static int set_serial_info(struct usb_serial_port *port,
151 struct serial_struct __user *newinfo)
152{
153 struct serial_struct new_serial;
154 unsigned int closing_wait, close_delay;
155 int retval = 0;
156
157 if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
158 return -EFAULT;
159
160 close_delay = new_serial.close_delay * 10;
161 closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
162 ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
163
164 mutex_lock(&port->port.mutex);
165
166 if (!capable(CAP_SYS_ADMIN)) {
167 if ((close_delay != port->port.close_delay) ||
168 (closing_wait != port->port.closing_wait))
169 retval = -EPERM;
170 else
171 retval = -EOPNOTSUPP;
172 } else {
173 port->port.close_delay = close_delay;
174 port->port.closing_wait = closing_wait;
175 }
176
177 mutex_unlock(&port->port.mutex);
178 return retval;
179}
180
181int usb_wwan_ioctl(struct tty_struct *tty, struct file *file,
182 unsigned int cmd, unsigned long arg)
183{
184 struct usb_serial_port *port = tty->driver_data;
185
186 dbg("%s cmd 0x%04x", __func__, cmd);
187
188 switch (cmd) {
189 case TIOCGSERIAL:
190 return get_serial_info(port,
191 (struct serial_struct __user *) arg);
192 case TIOCSSERIAL:
193 return set_serial_info(port,
194 (struct serial_struct __user *) arg);
195 default:
196 break;
197 }
198
199 dbg("%s arg not supported", __func__);
200
201 return -ENOIOCTLCMD;
202}
203EXPORT_SYMBOL(usb_wwan_ioctl);
204
126/* Write */ 205/* Write */
127int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, 206int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
128 const unsigned char *buf, int count) 207 const unsigned char *buf, int count)
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 339fac3949df..23f0dd9c36d4 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -49,14 +49,17 @@ struct command_iu {
49 __u8 cdb[16]; /* XXX: Overflow-checking tools may misunderstand */ 49 __u8 cdb[16]; /* XXX: Overflow-checking tools may misunderstand */
50}; 50};
51 51
52/*
53 * Also used for the Read Ready and Write Ready IUs since they have the
54 * same first four bytes
55 */
52struct sense_iu { 56struct sense_iu {
53 __u8 iu_id; 57 __u8 iu_id;
54 __u8 rsvd1; 58 __u8 rsvd1;
55 __be16 tag; 59 __be16 tag;
56 __be16 status_qual; 60 __be16 status_qual;
57 __u8 status; 61 __u8 status;
58 __u8 service_response; 62 __u8 rsvd7[7];
59 __u8 rsvd8[6];
60 __be16 len; 63 __be16 len;
61 __u8 sense[SCSI_SENSE_BUFFERSIZE]; 64 __u8 sense[SCSI_SENSE_BUFFERSIZE];
62}; 65};
@@ -97,8 +100,8 @@ struct uas_dev_info {
97}; 100};
98 101
99enum { 102enum {
100 ALLOC_SENSE_URB = (1 << 0), 103 ALLOC_STATUS_URB = (1 << 0),
101 SUBMIT_SENSE_URB = (1 << 1), 104 SUBMIT_STATUS_URB = (1 << 1),
102 ALLOC_DATA_IN_URB = (1 << 2), 105 ALLOC_DATA_IN_URB = (1 << 2),
103 SUBMIT_DATA_IN_URB = (1 << 3), 106 SUBMIT_DATA_IN_URB = (1 << 3),
104 ALLOC_DATA_OUT_URB = (1 << 4), 107 ALLOC_DATA_OUT_URB = (1 << 4),
@@ -112,7 +115,7 @@ struct uas_cmd_info {
112 unsigned int state; 115 unsigned int state;
113 unsigned int stream; 116 unsigned int stream;
114 struct urb *cmd_urb; 117 struct urb *cmd_urb;
115 struct urb *sense_urb; 118 struct urb *status_urb;
116 struct urb *data_in_urb; 119 struct urb *data_in_urb;
117 struct urb *data_out_urb; 120 struct urb *data_out_urb;
118 struct list_head list; 121 struct list_head list;
@@ -138,7 +141,7 @@ static void uas_do_work(struct work_struct *work)
138 struct scsi_pointer *scp = (void *)cmdinfo; 141 struct scsi_pointer *scp = (void *)cmdinfo;
139 struct scsi_cmnd *cmnd = container_of(scp, 142 struct scsi_cmnd *cmnd = container_of(scp,
140 struct scsi_cmnd, SCp); 143 struct scsi_cmnd, SCp);
141 uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_KERNEL); 144 uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_NOIO);
142 } 145 }
143} 146}
144 147
@@ -204,7 +207,7 @@ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
204 struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp; 207 struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
205 int err; 208 int err;
206 209
207 cmdinfo->state = direction | SUBMIT_SENSE_URB; 210 cmdinfo->state = direction | SUBMIT_STATUS_URB;
208 err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC); 211 err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
209 if (err) { 212 if (err) {
210 spin_lock(&uas_work_lock); 213 spin_lock(&uas_work_lock);
@@ -294,7 +297,7 @@ static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
294 if (!urb) 297 if (!urb)
295 goto out; 298 goto out;
296 299
297 iu = kmalloc(sizeof(*iu), gfp); 300 iu = kzalloc(sizeof(*iu), gfp);
298 if (!iu) 301 if (!iu)
299 goto free; 302 goto free;
300 303
@@ -325,7 +328,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
325 if (len < 0) 328 if (len < 0)
326 len = 0; 329 len = 0;
327 len = ALIGN(len, 4); 330 len = ALIGN(len, 4);
328 iu = kmalloc(sizeof(*iu) + len, gfp); 331 iu = kzalloc(sizeof(*iu) + len, gfp);
329 if (!iu) 332 if (!iu)
330 goto free; 333 goto free;
331 334
@@ -357,21 +360,21 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
357{ 360{
358 struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp; 361 struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
359 362
360 if (cmdinfo->state & ALLOC_SENSE_URB) { 363 if (cmdinfo->state & ALLOC_STATUS_URB) {
361 cmdinfo->sense_urb = uas_alloc_sense_urb(devinfo, gfp, cmnd, 364 cmdinfo->status_urb = uas_alloc_sense_urb(devinfo, gfp, cmnd,
362 cmdinfo->stream); 365 cmdinfo->stream);
363 if (!cmdinfo->sense_urb) 366 if (!cmdinfo->status_urb)
364 return SCSI_MLQUEUE_DEVICE_BUSY; 367 return SCSI_MLQUEUE_DEVICE_BUSY;
365 cmdinfo->state &= ~ALLOC_SENSE_URB; 368 cmdinfo->state &= ~ALLOC_STATUS_URB;
366 } 369 }
367 370
368 if (cmdinfo->state & SUBMIT_SENSE_URB) { 371 if (cmdinfo->state & SUBMIT_STATUS_URB) {
369 if (usb_submit_urb(cmdinfo->sense_urb, gfp)) { 372 if (usb_submit_urb(cmdinfo->status_urb, gfp)) {
370 scmd_printk(KERN_INFO, cmnd, 373 scmd_printk(KERN_INFO, cmnd,
371 "sense urb submission failure\n"); 374 "sense urb submission failure\n");
372 return SCSI_MLQUEUE_DEVICE_BUSY; 375 return SCSI_MLQUEUE_DEVICE_BUSY;
373 } 376 }
374 cmdinfo->state &= ~SUBMIT_SENSE_URB; 377 cmdinfo->state &= ~SUBMIT_STATUS_URB;
375 } 378 }
376 379
377 if (cmdinfo->state & ALLOC_DATA_IN_URB) { 380 if (cmdinfo->state & ALLOC_DATA_IN_URB) {
@@ -440,7 +443,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
440 443
441 BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer)); 444 BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
442 445
443 if (!cmdinfo->sense_urb && sdev->current_cmnd) 446 if (!cmdinfo->status_urb && sdev->current_cmnd)
444 return SCSI_MLQUEUE_DEVICE_BUSY; 447 return SCSI_MLQUEUE_DEVICE_BUSY;
445 448
446 if (blk_rq_tagged(cmnd->request)) { 449 if (blk_rq_tagged(cmnd->request)) {
@@ -452,7 +455,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
452 455
453 cmnd->scsi_done = done; 456 cmnd->scsi_done = done;
454 457
455 cmdinfo->state = ALLOC_SENSE_URB | SUBMIT_SENSE_URB | 458 cmdinfo->state = ALLOC_STATUS_URB | SUBMIT_STATUS_URB |
456 ALLOC_CMD_URB | SUBMIT_CMD_URB; 459 ALLOC_CMD_URB | SUBMIT_CMD_URB;
457 460
458 switch (cmnd->sc_data_direction) { 461 switch (cmnd->sc_data_direction) {
@@ -475,8 +478,8 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
475 err = uas_submit_urbs(cmnd, devinfo, GFP_ATOMIC); 478 err = uas_submit_urbs(cmnd, devinfo, GFP_ATOMIC);
476 if (err) { 479 if (err) {
477 /* If we did nothing, give up now */ 480 /* If we did nothing, give up now */
478 if (cmdinfo->state & SUBMIT_SENSE_URB) { 481 if (cmdinfo->state & SUBMIT_STATUS_URB) {
479 usb_free_urb(cmdinfo->sense_urb); 482 usb_free_urb(cmdinfo->status_urb);
480 return SCSI_MLQUEUE_DEVICE_BUSY; 483 return SCSI_MLQUEUE_DEVICE_BUSY;
481 } 484 }
482 spin_lock(&uas_work_lock); 485 spin_lock(&uas_work_lock);
@@ -578,6 +581,34 @@ static struct usb_device_id uas_usb_ids[] = {
578}; 581};
579MODULE_DEVICE_TABLE(usb, uas_usb_ids); 582MODULE_DEVICE_TABLE(usb, uas_usb_ids);
580 583
584static int uas_is_interface(struct usb_host_interface *intf)
585{
586 return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
587 intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
588 intf->desc.bInterfaceProtocol == USB_PR_UAS);
589}
590
591static int uas_switch_interface(struct usb_device *udev,
592 struct usb_interface *intf)
593{
594 int i;
595
596 if (uas_is_interface(intf->cur_altsetting))
597 return 0;
598
599 for (i = 0; i < intf->num_altsetting; i++) {
600 struct usb_host_interface *alt = &intf->altsetting[i];
601 if (alt == intf->cur_altsetting)
602 continue;
603 if (uas_is_interface(alt))
604 return usb_set_interface(udev,
605 alt->desc.bInterfaceNumber,
606 alt->desc.bAlternateSetting);
607 }
608
609 return -ENODEV;
610}
611
581static void uas_configure_endpoints(struct uas_dev_info *devinfo) 612static void uas_configure_endpoints(struct uas_dev_info *devinfo)
582{ 613{
583 struct usb_host_endpoint *eps[4] = { }; 614 struct usb_host_endpoint *eps[4] = { };
@@ -651,13 +682,8 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
651 struct uas_dev_info *devinfo; 682 struct uas_dev_info *devinfo;
652 struct usb_device *udev = interface_to_usbdev(intf); 683 struct usb_device *udev = interface_to_usbdev(intf);
653 684
654 if (id->bInterfaceProtocol == 0x50) { 685 if (uas_switch_interface(udev, intf))
655 int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 686 return -ENODEV;
656/* XXX: Shouldn't assume that 1 is the alternative we want */
657 int ret = usb_set_interface(udev, ifnum, 1);
658 if (ret)
659 return -ENODEV;
660 }
661 687
662 devinfo = kmalloc(sizeof(struct uas_dev_info), GFP_KERNEL); 688 devinfo = kmalloc(sizeof(struct uas_dev_info), GFP_KERNEL);
663 if (!devinfo) 689 if (!devinfo)
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c
index f2eb4d8b76c9..d5de5e131d47 100644
--- a/drivers/uwb/i1480/i1480-est.c
+++ b/drivers/uwb/i1480/i1480-est.c
@@ -91,7 +91,7 @@ MODULE_LICENSE("GPL");
91 * 91 *
92 * [so we are loaded when this kind device is connected] 92 * [so we are loaded when this kind device is connected]
93 */ 93 */
94static struct usb_device_id i1480_est_id_table[] = { 94static struct usb_device_id __used i1480_est_id_table[] = {
95 { USB_DEVICE(0x8086, 0xdf3b), }, 95 { USB_DEVICE(0x8086, 0xdf3b), },
96 { USB_DEVICE(0x8086, 0x0c3b), }, 96 { USB_DEVICE(0x8086, 0x0c3b), },
97 { }, 97 { },
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c
index 43ea9982e687..ccd2184e05d2 100644
--- a/drivers/uwb/umc-dev.c
+++ b/drivers/uwb/umc-dev.c
@@ -54,11 +54,8 @@ int umc_device_register(struct umc_dev *umc)
54 54
55 err = request_resource(umc->resource.parent, &umc->resource); 55 err = request_resource(umc->resource.parent, &umc->resource);
56 if (err < 0) { 56 if (err < 0) {
57 dev_err(&umc->dev, "can't allocate resource range " 57 dev_err(&umc->dev, "can't allocate resource range %pR: %d\n",
58 "%016Lx to %016Lx: %d\n", 58 &umc->resource, err);
59 (unsigned long long)umc->resource.start,
60 (unsigned long long)umc->resource.end,
61 err);
62 goto error_request_resource; 59 goto error_request_resource;
63 } 60 }
64 61
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c
index 73495583c444..70a004aa19db 100644
--- a/drivers/uwb/whc-rc.c
+++ b/drivers/uwb/whc-rc.c
@@ -449,7 +449,7 @@ static int whcrc_post_reset(struct umc_dev *umc)
449} 449}
450 450
451/* PCI device ID's that we handle [so it gets loaded] */ 451/* PCI device ID's that we handle [so it gets loaded] */
452static struct pci_device_id whcrc_id_table[] = { 452static struct pci_device_id __used whcrc_id_table[] = {
453 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, 453 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
454 { /* empty last entry */ } 454 { /* empty last entry */ }
455}; 455};
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 12d602351dbe..6e58c4ca1e6e 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -91,11 +91,14 @@ static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type)
91 return acl; 91 return acl;
92} 92}
93 93
94int v9fs_check_acl(struct inode *inode, int mask) 94int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags)
95{ 95{
96 struct posix_acl *acl; 96 struct posix_acl *acl;
97 struct v9fs_session_info *v9ses; 97 struct v9fs_session_info *v9ses;
98 98
99 if (flags & IPERM_FLAG_RCU)
100 return -ECHILD;
101
99 v9ses = v9fs_inode2v9ses(inode); 102 v9ses = v9fs_inode2v9ses(inode);
100 if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) { 103 if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
101 /* 104 /*
diff --git a/fs/9p/acl.h b/fs/9p/acl.h
index 59e18c2e8c7e..7ef3ac9f6d95 100644
--- a/fs/9p/acl.h
+++ b/fs/9p/acl.h
@@ -16,7 +16,7 @@
16 16
17#ifdef CONFIG_9P_FS_POSIX_ACL 17#ifdef CONFIG_9P_FS_POSIX_ACL
18extern int v9fs_get_acl(struct inode *, struct p9_fid *); 18extern int v9fs_get_acl(struct inode *, struct p9_fid *);
19extern int v9fs_check_acl(struct inode *inode, int mask); 19extern int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags);
20extern int v9fs_acl_chmod(struct dentry *); 20extern int v9fs_acl_chmod(struct dentry *);
21extern int v9fs_set_create_acl(struct dentry *, 21extern int v9fs_set_create_acl(struct dentry *,
22 struct posix_acl *, struct posix_acl *); 22 struct posix_acl *, struct posix_acl *);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index cbf4e50f3933..466d2a4fc5cb 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -51,7 +51,7 @@
51 * 51 *
52 */ 52 */
53 53
54static int v9fs_dentry_delete(struct dentry *dentry) 54static int v9fs_dentry_delete(const struct dentry *dentry)
55{ 55{
56 P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, 56 P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
57 dentry); 57 dentry);
@@ -68,7 +68,7 @@ static int v9fs_dentry_delete(struct dentry *dentry)
68 * 68 *
69 */ 69 */
70 70
71static int v9fs_cached_dentry_delete(struct dentry *dentry) 71static int v9fs_cached_dentry_delete(const struct dentry *dentry)
72{ 72{
73 struct inode *inode = dentry->d_inode; 73 struct inode *inode = dentry->d_inode;
74 P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, 74 P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 34bf71b56542..59782981b225 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -237,10 +237,17 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
237 * 237 *
238 */ 238 */
239 239
240void v9fs_destroy_inode(struct inode *inode) 240static void v9fs_i_callback(struct rcu_head *head)
241{ 241{
242 struct inode *inode = container_of(head, struct inode, i_rcu);
243 INIT_LIST_HEAD(&inode->i_dentry);
242 kmem_cache_free(vcookie_cache, v9fs_inode2cookie(inode)); 244 kmem_cache_free(vcookie_cache, v9fs_inode2cookie(inode));
243} 245}
246
247void v9fs_destroy_inode(struct inode *inode)
248{
249 call_rcu(&inode->i_rcu, v9fs_i_callback);
250}
244#endif 251#endif
245 252
246/** 253/**
@@ -270,11 +277,11 @@ static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
270{ 277{
271 struct dentry *dentry; 278 struct dentry *dentry;
272 279
273 spin_lock(&dcache_lock); 280 spin_lock(&inode->i_lock);
274 /* Directory should have only one entry. */ 281 /* Directory should have only one entry. */
275 BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry)); 282 BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
276 dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias); 283 dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
277 spin_unlock(&dcache_lock); 284 spin_unlock(&inode->i_lock);
278 return dentry; 285 return dentry;
279} 286}
280 287
@@ -628,9 +635,9 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
628 } 635 }
629 636
630 if (v9ses->cache) 637 if (v9ses->cache)
631 dentry->d_op = &v9fs_cached_dentry_operations; 638 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
632 else 639 else
633 dentry->d_op = &v9fs_dentry_operations; 640 d_set_d_op(dentry, &v9fs_dentry_operations);
634 641
635 d_instantiate(dentry, inode); 642 d_instantiate(dentry, inode);
636 err = v9fs_fid_add(dentry, fid); 643 err = v9fs_fid_add(dentry, fid);
@@ -742,7 +749,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
742 err); 749 err);
743 goto error; 750 goto error;
744 } 751 }
745 dentry->d_op = &v9fs_cached_dentry_operations; 752 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
746 d_instantiate(dentry, inode); 753 d_instantiate(dentry, inode);
747 err = v9fs_fid_add(dentry, fid); 754 err = v9fs_fid_add(dentry, fid);
748 if (err < 0) 755 if (err < 0)
@@ -760,7 +767,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
760 err = PTR_ERR(inode); 767 err = PTR_ERR(inode);
761 goto error; 768 goto error;
762 } 769 }
763 dentry->d_op = &v9fs_dentry_operations; 770 d_set_d_op(dentry, &v9fs_dentry_operations);
764 d_instantiate(dentry, inode); 771 d_instantiate(dentry, inode);
765 } 772 }
766 /* Now set the ACL based on the default value */ 773 /* Now set the ACL based on the default value */
@@ -949,7 +956,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
949 err); 956 err);
950 goto error; 957 goto error;
951 } 958 }
952 dentry->d_op = &v9fs_cached_dentry_operations; 959 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
953 d_instantiate(dentry, inode); 960 d_instantiate(dentry, inode);
954 err = v9fs_fid_add(dentry, fid); 961 err = v9fs_fid_add(dentry, fid);
955 if (err < 0) 962 if (err < 0)
@@ -966,7 +973,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
966 err = PTR_ERR(inode); 973 err = PTR_ERR(inode);
967 goto error; 974 goto error;
968 } 975 }
969 dentry->d_op = &v9fs_dentry_operations; 976 d_set_d_op(dentry, &v9fs_dentry_operations);
970 d_instantiate(dentry, inode); 977 d_instantiate(dentry, inode);
971 } 978 }
972 /* Now set the ACL based on the default value */ 979 /* Now set the ACL based on the default value */
@@ -1034,9 +1041,9 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
1034 1041
1035inst_out: 1042inst_out:
1036 if (v9ses->cache) 1043 if (v9ses->cache)
1037 dentry->d_op = &v9fs_cached_dentry_operations; 1044 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
1038 else 1045 else
1039 dentry->d_op = &v9fs_dentry_operations; 1046 d_set_d_op(dentry, &v9fs_dentry_operations);
1040 1047
1041 d_add(dentry, inode); 1048 d_add(dentry, inode);
1042 return NULL; 1049 return NULL;
@@ -1702,7 +1709,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
1702 err); 1709 err);
1703 goto error; 1710 goto error;
1704 } 1711 }
1705 dentry->d_op = &v9fs_cached_dentry_operations; 1712 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
1706 d_instantiate(dentry, inode); 1713 d_instantiate(dentry, inode);
1707 err = v9fs_fid_add(dentry, fid); 1714 err = v9fs_fid_add(dentry, fid);
1708 if (err < 0) 1715 if (err < 0)
@@ -1715,7 +1722,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
1715 err = PTR_ERR(inode); 1722 err = PTR_ERR(inode);
1716 goto error; 1723 goto error;
1717 } 1724 }
1718 dentry->d_op = &v9fs_dentry_operations; 1725 d_set_d_op(dentry, &v9fs_dentry_operations);
1719 d_instantiate(dentry, inode); 1726 d_instantiate(dentry, inode);
1720 } 1727 }
1721 1728
@@ -1849,7 +1856,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
1849 ihold(old_dentry->d_inode); 1856 ihold(old_dentry->d_inode);
1850 } 1857 }
1851 1858
1852 dentry->d_op = old_dentry->d_op; 1859 d_set_d_op(dentry, old_dentry->d_op);
1853 d_instantiate(dentry, old_dentry->d_inode); 1860 d_instantiate(dentry, old_dentry->d_inode);
1854 1861
1855 return err; 1862 return err;
@@ -1973,7 +1980,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
1973 err); 1980 err);
1974 goto error; 1981 goto error;
1975 } 1982 }
1976 dentry->d_op = &v9fs_cached_dentry_operations; 1983 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
1977 d_instantiate(dentry, inode); 1984 d_instantiate(dentry, inode);
1978 err = v9fs_fid_add(dentry, fid); 1985 err = v9fs_fid_add(dentry, fid);
1979 if (err < 0) 1986 if (err < 0)
@@ -1989,7 +1996,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
1989 err = PTR_ERR(inode); 1996 err = PTR_ERR(inode);
1990 goto error; 1997 goto error;
1991 } 1998 }
1992 dentry->d_op = &v9fs_dentry_operations; 1999 d_set_d_op(dentry, &v9fs_dentry_operations);
1993 d_instantiate(dentry, inode); 2000 d_instantiate(dentry, inode);
1994 } 2001 }
1995 /* Now set the ACL based on the default value */ 2002 /* Now set the ACL based on the default value */
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index f4287e4de744..bf7693c384f9 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -201,7 +201,8 @@ const struct file_operations adfs_dir_operations = {
201}; 201};
202 202
203static int 203static int
204adfs_hash(struct dentry *parent, struct qstr *qstr) 204adfs_hash(const struct dentry *parent, const struct inode *inode,
205 struct qstr *qstr)
205{ 206{
206 const unsigned int name_len = ADFS_SB(parent->d_sb)->s_namelen; 207 const unsigned int name_len = ADFS_SB(parent->d_sb)->s_namelen;
207 const unsigned char *name; 208 const unsigned char *name;
@@ -237,17 +238,19 @@ adfs_hash(struct dentry *parent, struct qstr *qstr)
237 * requirements of the underlying filesystem. 238 * requirements of the underlying filesystem.
238 */ 239 */
239static int 240static int
240adfs_compare(struct dentry *parent, struct qstr *entry, struct qstr *name) 241adfs_compare(const struct dentry *parent, const struct inode *pinode,
242 const struct dentry *dentry, const struct inode *inode,
243 unsigned int len, const char *str, const struct qstr *name)
241{ 244{
242 int i; 245 int i;
243 246
244 if (entry->len != name->len) 247 if (len != name->len)
245 return 1; 248 return 1;
246 249
247 for (i = 0; i < name->len; i++) { 250 for (i = 0; i < name->len; i++) {
248 char a, b; 251 char a, b;
249 252
250 a = entry->name[i]; 253 a = str[i];
251 b = name->name[i]; 254 b = name->name[i];
252 255
253 if (a >= 'A' && a <= 'Z') 256 if (a >= 'A' && a <= 'Z')
@@ -273,7 +276,7 @@ adfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
273 struct object_info obj; 276 struct object_info obj;
274 int error; 277 int error;
275 278
276 dentry->d_op = &adfs_dentry_operations; 279 d_set_d_op(dentry, &adfs_dentry_operations);
277 lock_kernel(); 280 lock_kernel();
278 error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj); 281 error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj);
279 if (error == 0) { 282 if (error == 0) {
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 959dbff2d42d..a4041b52fbca 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -240,11 +240,18 @@ static struct inode *adfs_alloc_inode(struct super_block *sb)
240 return &ei->vfs_inode; 240 return &ei->vfs_inode;
241} 241}
242 242
243static void adfs_destroy_inode(struct inode *inode) 243static void adfs_i_callback(struct rcu_head *head)
244{ 244{
245 struct inode *inode = container_of(head, struct inode, i_rcu);
246 INIT_LIST_HEAD(&inode->i_dentry);
245 kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); 247 kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
246} 248}
247 249
250static void adfs_destroy_inode(struct inode *inode)
251{
252 call_rcu(&inode->i_rcu, adfs_i_callback);
253}
254
248static void init_once(void *foo) 255static void init_once(void *foo)
249{ 256{
250 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; 257 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
@@ -477,7 +484,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
477 adfs_error(sb, "get root inode failed\n"); 484 adfs_error(sb, "get root inode failed\n");
478 goto error; 485 goto error;
479 } else 486 } else
480 sb->s_root->d_op = &adfs_dentry_operations; 487 d_set_d_op(sb->s_root, &adfs_dentry_operations);
481 unlock_kernel(); 488 unlock_kernel();
482 return 0; 489 return 0;
483 490
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 7d0f0a30f7a3..3a4557e8325c 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -128,7 +128,7 @@ affs_fix_dcache(struct dentry *dentry, u32 entry_ino)
128 void *data = dentry->d_fsdata; 128 void *data = dentry->d_fsdata;
129 struct list_head *head, *next; 129 struct list_head *head, *next;
130 130
131 spin_lock(&dcache_lock); 131 spin_lock(&inode->i_lock);
132 head = &inode->i_dentry; 132 head = &inode->i_dentry;
133 next = head->next; 133 next = head->next;
134 while (next != head) { 134 while (next != head) {
@@ -139,7 +139,7 @@ affs_fix_dcache(struct dentry *dentry, u32 entry_ino)
139 } 139 }
140 next = next->next; 140 next = next->next;
141 } 141 }
142 spin_unlock(&dcache_lock); 142 spin_unlock(&inode->i_lock);
143} 143}
144 144
145 145
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 914d1c0bc07a..944a4042fb65 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -13,11 +13,19 @@
13typedef int (*toupper_t)(int); 13typedef int (*toupper_t)(int);
14 14
15static int affs_toupper(int ch); 15static int affs_toupper(int ch);
16static int affs_hash_dentry(struct dentry *, struct qstr *); 16static int affs_hash_dentry(const struct dentry *,
17static int affs_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 17 const struct inode *, struct qstr *);
18static int affs_compare_dentry(const struct dentry *parent,
19 const struct inode *pinode,
20 const struct dentry *dentry, const struct inode *inode,
21 unsigned int len, const char *str, const struct qstr *name);
18static int affs_intl_toupper(int ch); 22static int affs_intl_toupper(int ch);
19static int affs_intl_hash_dentry(struct dentry *, struct qstr *); 23static int affs_intl_hash_dentry(const struct dentry *,
20static int affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 24 const struct inode *, struct qstr *);
25static int affs_intl_compare_dentry(const struct dentry *parent,
26 const struct inode *pinode,
27 const struct dentry *dentry, const struct inode *inode,
28 unsigned int len, const char *str, const struct qstr *name);
21 29
22const struct dentry_operations affs_dentry_operations = { 30const struct dentry_operations affs_dentry_operations = {
23 .d_hash = affs_hash_dentry, 31 .d_hash = affs_hash_dentry,
@@ -58,13 +66,13 @@ affs_get_toupper(struct super_block *sb)
58 * Note: the dentry argument is the parent dentry. 66 * Note: the dentry argument is the parent dentry.
59 */ 67 */
60static inline int 68static inline int
61__affs_hash_dentry(struct dentry *dentry, struct qstr *qstr, toupper_t toupper) 69__affs_hash_dentry(struct qstr *qstr, toupper_t toupper)
62{ 70{
63 const u8 *name = qstr->name; 71 const u8 *name = qstr->name;
64 unsigned long hash; 72 unsigned long hash;
65 int i; 73 int i;
66 74
67 i = affs_check_name(qstr->name,qstr->len); 75 i = affs_check_name(qstr->name, qstr->len);
68 if (i) 76 if (i)
69 return i; 77 return i;
70 78
@@ -78,39 +86,41 @@ __affs_hash_dentry(struct dentry *dentry, struct qstr *qstr, toupper_t toupper)
78} 86}
79 87
80static int 88static int
81affs_hash_dentry(struct dentry *dentry, struct qstr *qstr) 89affs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
90 struct qstr *qstr)
82{ 91{
83 return __affs_hash_dentry(dentry, qstr, affs_toupper); 92 return __affs_hash_dentry(qstr, affs_toupper);
84} 93}
85static int 94static int
86affs_intl_hash_dentry(struct dentry *dentry, struct qstr *qstr) 95affs_intl_hash_dentry(const struct dentry *dentry, const struct inode *inode,
96 struct qstr *qstr)
87{ 97{
88 return __affs_hash_dentry(dentry, qstr, affs_intl_toupper); 98 return __affs_hash_dentry(qstr, affs_intl_toupper);
89} 99}
90 100
91static inline int 101static inline int __affs_compare_dentry(unsigned int len,
92__affs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b, toupper_t toupper) 102 const char *str, const struct qstr *name, toupper_t toupper)
93{ 103{
94 const u8 *aname = a->name; 104 const u8 *aname = str;
95 const u8 *bname = b->name; 105 const u8 *bname = name->name;
96 int len;
97 106
98 /* 'a' is the qstr of an already existing dentry, so the name 107 /*
99 * must be valid. 'b' must be validated first. 108 * 'str' is the name of an already existing dentry, so the name
109 * must be valid. 'name' must be validated first.
100 */ 110 */
101 111
102 if (affs_check_name(b->name,b->len)) 112 if (affs_check_name(name->name, name->len))
103 return 1; 113 return 1;
104 114
105 /* If the names are longer than the allowed 30 chars, 115 /*
116 * If the names are longer than the allowed 30 chars,
106 * the excess is ignored, so their length may differ. 117 * the excess is ignored, so their length may differ.
107 */ 118 */
108 len = a->len;
109 if (len >= 30) { 119 if (len >= 30) {
110 if (b->len < 30) 120 if (name->len < 30)
111 return 1; 121 return 1;
112 len = 30; 122 len = 30;
113 } else if (len != b->len) 123 } else if (len != name->len)
114 return 1; 124 return 1;
115 125
116 for (; len > 0; len--) 126 for (; len > 0; len--)
@@ -121,14 +131,18 @@ __affs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b, tou
121} 131}
122 132
123static int 133static int
124affs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b) 134affs_compare_dentry(const struct dentry *parent, const struct inode *pinode,
135 const struct dentry *dentry, const struct inode *inode,
136 unsigned int len, const char *str, const struct qstr *name)
125{ 137{
126 return __affs_compare_dentry(dentry, a, b, affs_toupper); 138 return __affs_compare_dentry(len, str, name, affs_toupper);
127} 139}
128static int 140static int
129affs_intl_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b) 141affs_intl_compare_dentry(const struct dentry *parent,const struct inode *pinode,
142 const struct dentry *dentry, const struct inode *inode,
143 unsigned int len, const char *str, const struct qstr *name)
130{ 144{
131 return __affs_compare_dentry(dentry, a, b, affs_intl_toupper); 145 return __affs_compare_dentry(len, str, name, affs_intl_toupper);
132} 146}
133 147
134/* 148/*
@@ -226,7 +240,7 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
226 if (IS_ERR(inode)) 240 if (IS_ERR(inode))
227 return ERR_CAST(inode); 241 return ERR_CAST(inode);
228 } 242 }
229 dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations; 243 d_set_d_op(dentry, AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations);
230 d_add(dentry, inode); 244 d_add(dentry, inode);
231 return NULL; 245 return NULL;
232} 246}
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 0cf7f4384cbd..d39081bbe7ce 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -95,11 +95,18 @@ static struct inode *affs_alloc_inode(struct super_block *sb)
95 return &i->vfs_inode; 95 return &i->vfs_inode;
96} 96}
97 97
98static void affs_destroy_inode(struct inode *inode) 98static void affs_i_callback(struct rcu_head *head)
99{ 99{
100 struct inode *inode = container_of(head, struct inode, i_rcu);
101 INIT_LIST_HEAD(&inode->i_dentry);
100 kmem_cache_free(affs_inode_cachep, AFFS_I(inode)); 102 kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
101} 103}
102 104
105static void affs_destroy_inode(struct inode *inode)
106{
107 call_rcu(&inode->i_rcu, affs_i_callback);
108}
109
103static void init_once(void *foo) 110static void init_once(void *foo)
104{ 111{
105 struct affs_inode_info *ei = (struct affs_inode_info *) foo; 112 struct affs_inode_info *ei = (struct affs_inode_info *) foo;
@@ -475,7 +482,7 @@ got_root:
475 printk(KERN_ERR "AFFS: Get root inode failed\n"); 482 printk(KERN_ERR "AFFS: Get root inode failed\n");
476 goto out_error; 483 goto out_error;
477 } 484 }
478 sb->s_root->d_op = &affs_dentry_operations; 485 d_set_d_op(sb->s_root, &affs_dentry_operations);
479 486
480 pr_debug("AFFS: s_flags=%lX\n",sb->s_flags); 487 pr_debug("AFFS: s_flags=%lX\n",sb->s_flags);
481 return 0; 488 return 0;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 5439e1bc9a86..34a3263d60a4 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/namei.h>
16#include <linux/pagemap.h> 17#include <linux/pagemap.h>
17#include <linux/ctype.h> 18#include <linux/ctype.h>
18#include <linux/sched.h> 19#include <linux/sched.h>
@@ -23,7 +24,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
23static int afs_dir_open(struct inode *inode, struct file *file); 24static int afs_dir_open(struct inode *inode, struct file *file);
24static int afs_readdir(struct file *file, void *dirent, filldir_t filldir); 25static int afs_readdir(struct file *file, void *dirent, filldir_t filldir);
25static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd); 26static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd);
26static int afs_d_delete(struct dentry *dentry); 27static int afs_d_delete(const struct dentry *dentry);
27static void afs_d_release(struct dentry *dentry); 28static void afs_d_release(struct dentry *dentry);
28static int afs_lookup_filldir(void *_cookie, const char *name, int nlen, 29static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
29 loff_t fpos, u64 ino, unsigned dtype); 30 loff_t fpos, u64 ino, unsigned dtype);
@@ -581,7 +582,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
581 } 582 }
582 583
583success: 584success:
584 dentry->d_op = &afs_fs_dentry_operations; 585 d_set_d_op(dentry, &afs_fs_dentry_operations);
585 586
586 d_add(dentry, inode); 587 d_add(dentry, inode);
587 _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }", 588 _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }",
@@ -607,6 +608,9 @@ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
607 void *dir_version; 608 void *dir_version;
608 int ret; 609 int ret;
609 610
611 if (nd->flags & LOOKUP_RCU)
612 return -ECHILD;
613
610 vnode = AFS_FS_I(dentry->d_inode); 614 vnode = AFS_FS_I(dentry->d_inode);
611 615
612 if (dentry->d_inode) 616 if (dentry->d_inode)
@@ -730,7 +734,7 @@ out_bad:
730 * - called from dput() when d_count is going to 0. 734 * - called from dput() when d_count is going to 0.
731 * - return 1 to request dentry be unhashed, 0 otherwise 735 * - return 1 to request dentry be unhashed, 0 otherwise
732 */ 736 */
733static int afs_d_delete(struct dentry *dentry) 737static int afs_d_delete(const struct dentry *dentry)
734{ 738{
735 _enter("%s", dentry->d_name.name); 739 _enter("%s", dentry->d_name.name);
736 740
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index cca8eef736fc..6d4bc1c8ff60 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -624,7 +624,7 @@ extern void afs_clear_permits(struct afs_vnode *);
624extern void afs_cache_permit(struct afs_vnode *, struct key *, long); 624extern void afs_cache_permit(struct afs_vnode *, struct key *, long);
625extern void afs_zap_permits(struct rcu_head *); 625extern void afs_zap_permits(struct rcu_head *);
626extern struct key *afs_request_key(struct afs_cell *); 626extern struct key *afs_request_key(struct afs_cell *);
627extern int afs_permission(struct inode *, int); 627extern int afs_permission(struct inode *, int, unsigned int);
628 628
629/* 629/*
630 * server.c 630 * server.c
diff --git a/fs/afs/security.c b/fs/afs/security.c
index bb4ed144d0e4..f44b9d355377 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -285,13 +285,16 @@ static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
285 * - AFS ACLs are attached to directories only, and a file is controlled by its 285 * - AFS ACLs are attached to directories only, and a file is controlled by its
286 * parent directory's ACL 286 * parent directory's ACL
287 */ 287 */
288int afs_permission(struct inode *inode, int mask) 288int afs_permission(struct inode *inode, int mask, unsigned int flags)
289{ 289{
290 struct afs_vnode *vnode = AFS_FS_I(inode); 290 struct afs_vnode *vnode = AFS_FS_I(inode);
291 afs_access_t uninitialized_var(access); 291 afs_access_t uninitialized_var(access);
292 struct key *key; 292 struct key *key;
293 int ret; 293 int ret;
294 294
295 if (flags & IPERM_FLAG_RCU)
296 return -ECHILD;
297
295 _enter("{{%x:%u},%lx},%x,", 298 _enter("{{%x:%u},%lx},%x,",
296 vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask); 299 vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask);
297 300
@@ -347,7 +350,7 @@ int afs_permission(struct inode *inode, int mask)
347 } 350 }
348 351
349 key_put(key); 352 key_put(key);
350 ret = generic_permission(inode, mask, NULL); 353 ret = generic_permission(inode, mask, flags, NULL);
351 _leave(" = %d", ret); 354 _leave(" = %d", ret);
352 return ret; 355 return ret;
353 356
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 27201cffece4..f901a9d7c111 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -498,6 +498,14 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
498 return &vnode->vfs_inode; 498 return &vnode->vfs_inode;
499} 499}
500 500
501static void afs_i_callback(struct rcu_head *head)
502{
503 struct inode *inode = container_of(head, struct inode, i_rcu);
504 struct afs_vnode *vnode = AFS_FS_I(inode);
505 INIT_LIST_HEAD(&inode->i_dentry);
506 kmem_cache_free(afs_inode_cachep, vnode);
507}
508
501/* 509/*
502 * destroy an AFS inode struct 510 * destroy an AFS inode struct
503 */ 511 */
@@ -511,7 +519,7 @@ static void afs_destroy_inode(struct inode *inode)
511 519
512 ASSERTCMP(vnode->server, ==, NULL); 520 ASSERTCMP(vnode->server, ==, NULL);
513 521
514 kmem_cache_free(afs_inode_cachep, vnode); 522 call_rcu(&inode->i_rcu, afs_i_callback);
515 atomic_dec(&afs_count_active_inodes); 523 atomic_dec(&afs_count_active_inodes);
516} 524}
517 525
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 57ce55b2564c..5fd38112a6ca 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -102,7 +102,7 @@ struct file *anon_inode_getfile(const char *name,
102 this.name = name; 102 this.name = name;
103 this.len = strlen(name); 103 this.len = strlen(name);
104 this.hash = 0; 104 this.hash = 0;
105 path.dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this); 105 path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
106 if (!path.dentry) 106 if (!path.dentry)
107 goto err_module; 107 goto err_module;
108 108
@@ -113,7 +113,7 @@ struct file *anon_inode_getfile(const char *name,
113 */ 113 */
114 ihold(anon_inode_inode); 114 ihold(anon_inode_inode);
115 115
116 path.dentry->d_op = &anon_inodefs_dentry_operations; 116 d_set_d_op(path.dentry, &anon_inodefs_dentry_operations);
117 d_instantiate(path.dentry, anon_inode_inode); 117 d_instantiate(path.dentry, anon_inode_inode);
118 118
119 error = -ENFILE; 119 error = -ENFILE;
@@ -232,7 +232,7 @@ static int __init anon_inode_init(void)
232 return 0; 232 return 0;
233 233
234err_mntput: 234err_mntput:
235 mntput(anon_inode_mnt); 235 mntput_long(anon_inode_mnt);
236err_unregister_filesystem: 236err_unregister_filesystem:
237 unregister_filesystem(&anon_inode_fs_type); 237 unregister_filesystem(&anon_inode_fs_type);
238err_exit: 238err_exit:
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 3d283abf67d7..0fffe1c24cec 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -16,6 +16,7 @@
16#include <linux/auto_fs4.h> 16#include <linux/auto_fs4.h>
17#include <linux/auto_dev-ioctl.h> 17#include <linux/auto_dev-ioctl.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/spinlock.h>
19#include <linux/list.h> 20#include <linux/list.h>
20 21
21/* This is the range of ioctl() numbers we claim as ours */ 22/* This is the range of ioctl() numbers we claim as ours */
@@ -60,6 +61,8 @@ do { \
60 current->pid, __func__, ##args); \ 61 current->pid, __func__, ##args); \
61} while (0) 62} while (0)
62 63
64extern spinlock_t autofs4_lock;
65
63/* Unified info structure. This is pointed to by both the dentry and 66/* Unified info structure. This is pointed to by both the dentry and
64 inode structures. Each file in the filesystem has an instance of this 67 inode structures. Each file in the filesystem has an instance of this
65 structure. It holds a reference to the dentry, so dentries are never 68 structure. It holds a reference to the dentry, so dentries are never
@@ -254,17 +257,15 @@ static inline int simple_positive(struct dentry *dentry)
254 return dentry->d_inode && !d_unhashed(dentry); 257 return dentry->d_inode && !d_unhashed(dentry);
255} 258}
256 259
257static inline int __simple_empty(struct dentry *dentry) 260static inline void __autofs4_add_expiring(struct dentry *dentry)
258{ 261{
259 struct dentry *child; 262 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
260 int ret = 0; 263 struct autofs_info *ino = autofs4_dentry_ino(dentry);
261 264 if (ino) {
262 list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) 265 if (list_empty(&ino->expiring))
263 if (simple_positive(child)) 266 list_add(&ino->expiring, &sbi->expiring_list);
264 goto out; 267 }
265 ret = 1; 268 return;
266out:
267 return ret;
268} 269}
269 270
270static inline void autofs4_add_expiring(struct dentry *dentry) 271static inline void autofs4_add_expiring(struct dentry *dentry)
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index a796c9417fb1..cc1d01365905 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -91,24 +91,64 @@ done:
91} 91}
92 92
93/* 93/*
94 * Calculate next entry in top down tree traversal. 94 * Calculate and dget next entry in top down tree traversal.
95 * From next_mnt in namespace.c - elegant.
96 */ 95 */
97static struct dentry *next_dentry(struct dentry *p, struct dentry *root) 96static struct dentry *get_next_positive_dentry(struct dentry *prev,
97 struct dentry *root)
98{ 98{
99 struct list_head *next = p->d_subdirs.next; 99 struct list_head *next;
100 struct dentry *p, *ret;
101
102 if (prev == NULL)
103 return dget(prev);
100 104
105 spin_lock(&autofs4_lock);
106relock:
107 p = prev;
108 spin_lock(&p->d_lock);
109again:
110 next = p->d_subdirs.next;
101 if (next == &p->d_subdirs) { 111 if (next == &p->d_subdirs) {
102 while (1) { 112 while (1) {
103 if (p == root) 113 struct dentry *parent;
114
115 if (p == root) {
116 spin_unlock(&p->d_lock);
117 spin_unlock(&autofs4_lock);
118 dput(prev);
104 return NULL; 119 return NULL;
120 }
121
122 parent = p->d_parent;
123 if (!spin_trylock(&parent->d_lock)) {
124 spin_unlock(&p->d_lock);
125 cpu_relax();
126 goto relock;
127 }
128 spin_unlock(&p->d_lock);
105 next = p->d_u.d_child.next; 129 next = p->d_u.d_child.next;
106 if (next != &p->d_parent->d_subdirs) 130 p = parent;
131 if (next != &parent->d_subdirs)
107 break; 132 break;
108 p = p->d_parent;
109 } 133 }
110 } 134 }
111 return list_entry(next, struct dentry, d_u.d_child); 135 ret = list_entry(next, struct dentry, d_u.d_child);
136
137 spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
138 /* Negative dentry - try next */
139 if (!simple_positive(ret)) {
140 spin_unlock(&ret->d_lock);
141 p = ret;
142 goto again;
143 }
144 dget_dlock(ret);
145 spin_unlock(&ret->d_lock);
146 spin_unlock(&p->d_lock);
147 spin_unlock(&autofs4_lock);
148
149 dput(prev);
150
151 return ret;
112} 152}
113 153
114/* 154/*
@@ -158,18 +198,11 @@ static int autofs4_tree_busy(struct vfsmount *mnt,
158 if (!simple_positive(top)) 198 if (!simple_positive(top))
159 return 1; 199 return 1;
160 200
161 spin_lock(&dcache_lock); 201 p = NULL;
162 for (p = top; p; p = next_dentry(p, top)) { 202 while ((p = get_next_positive_dentry(p, top))) {
163 /* Negative dentry - give up */
164 if (!simple_positive(p))
165 continue;
166
167 DPRINTK("dentry %p %.*s", 203 DPRINTK("dentry %p %.*s",
168 p, (int) p->d_name.len, p->d_name.name); 204 p, (int) p->d_name.len, p->d_name.name);
169 205
170 p = dget(p);
171 spin_unlock(&dcache_lock);
172
173 /* 206 /*
174 * Is someone visiting anywhere in the subtree ? 207 * Is someone visiting anywhere in the subtree ?
175 * If there's no mount we need to check the usage 208 * If there's no mount we need to check the usage
@@ -198,16 +231,13 @@ static int autofs4_tree_busy(struct vfsmount *mnt,
198 else 231 else
199 ino_count++; 232 ino_count++;
200 233
201 if (atomic_read(&p->d_count) > ino_count) { 234 if (p->d_count > ino_count) {
202 top_ino->last_used = jiffies; 235 top_ino->last_used = jiffies;
203 dput(p); 236 dput(p);
204 return 1; 237 return 1;
205 } 238 }
206 } 239 }
207 dput(p);
208 spin_lock(&dcache_lock);
209 } 240 }
210 spin_unlock(&dcache_lock);
211 241
212 /* Timeout of a tree mount is ultimately determined by its top dentry */ 242 /* Timeout of a tree mount is ultimately determined by its top dentry */
213 if (!autofs4_can_expire(top, timeout, do_now)) 243 if (!autofs4_can_expire(top, timeout, do_now))
@@ -226,32 +256,21 @@ static struct dentry *autofs4_check_leaves(struct vfsmount *mnt,
226 DPRINTK("parent %p %.*s", 256 DPRINTK("parent %p %.*s",
227 parent, (int)parent->d_name.len, parent->d_name.name); 257 parent, (int)parent->d_name.len, parent->d_name.name);
228 258
229 spin_lock(&dcache_lock); 259 p = NULL;
230 for (p = parent; p; p = next_dentry(p, parent)) { 260 while ((p = get_next_positive_dentry(p, parent))) {
231 /* Negative dentry - give up */
232 if (!simple_positive(p))
233 continue;
234
235 DPRINTK("dentry %p %.*s", 261 DPRINTK("dentry %p %.*s",
236 p, (int) p->d_name.len, p->d_name.name); 262 p, (int) p->d_name.len, p->d_name.name);
237 263
238 p = dget(p);
239 spin_unlock(&dcache_lock);
240
241 if (d_mountpoint(p)) { 264 if (d_mountpoint(p)) {
242 /* Can we umount this guy */ 265 /* Can we umount this guy */
243 if (autofs4_mount_busy(mnt, p)) 266 if (autofs4_mount_busy(mnt, p))
244 goto cont; 267 continue;
245 268
246 /* Can we expire this guy */ 269 /* Can we expire this guy */
247 if (autofs4_can_expire(p, timeout, do_now)) 270 if (autofs4_can_expire(p, timeout, do_now))
248 return p; 271 return p;
249 } 272 }
250cont:
251 dput(p);
252 spin_lock(&dcache_lock);
253 } 273 }
254 spin_unlock(&dcache_lock);
255 return NULL; 274 return NULL;
256} 275}
257 276
@@ -276,7 +295,9 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
276 struct autofs_info *ino = autofs4_dentry_ino(root); 295 struct autofs_info *ino = autofs4_dentry_ino(root);
277 if (d_mountpoint(root)) { 296 if (d_mountpoint(root)) {
278 ino->flags |= AUTOFS_INF_MOUNTPOINT; 297 ino->flags |= AUTOFS_INF_MOUNTPOINT;
279 root->d_mounted--; 298 spin_lock(&root->d_lock);
299 root->d_flags &= ~DCACHE_MOUNTED;
300 spin_unlock(&root->d_lock);
280 } 301 }
281 ino->flags |= AUTOFS_INF_EXPIRING; 302 ino->flags |= AUTOFS_INF_EXPIRING;
282 init_completion(&ino->expire_complete); 303 init_completion(&ino->expire_complete);
@@ -302,8 +323,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
302{ 323{
303 unsigned long timeout; 324 unsigned long timeout;
304 struct dentry *root = sb->s_root; 325 struct dentry *root = sb->s_root;
326 struct dentry *dentry;
305 struct dentry *expired = NULL; 327 struct dentry *expired = NULL;
306 struct list_head *next;
307 int do_now = how & AUTOFS_EXP_IMMEDIATE; 328 int do_now = how & AUTOFS_EXP_IMMEDIATE;
308 int exp_leaves = how & AUTOFS_EXP_LEAVES; 329 int exp_leaves = how & AUTOFS_EXP_LEAVES;
309 struct autofs_info *ino; 330 struct autofs_info *ino;
@@ -315,23 +336,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
315 now = jiffies; 336 now = jiffies;
316 timeout = sbi->exp_timeout; 337 timeout = sbi->exp_timeout;
317 338
318 spin_lock(&dcache_lock); 339 dentry = NULL;
319 next = root->d_subdirs.next; 340 while ((dentry = get_next_positive_dentry(dentry, root))) {
320
321 /* On exit from the loop expire is set to a dgot dentry
322 * to expire or it's NULL */
323 while ( next != &root->d_subdirs ) {
324 struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
325
326 /* Negative dentry - give up */
327 if (!simple_positive(dentry)) {
328 next = next->next;
329 continue;
330 }
331
332 dentry = dget(dentry);
333 spin_unlock(&dcache_lock);
334
335 spin_lock(&sbi->fs_lock); 341 spin_lock(&sbi->fs_lock);
336 ino = autofs4_dentry_ino(dentry); 342 ino = autofs4_dentry_ino(dentry);
337 343
@@ -347,7 +353,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
347 353
348 /* Path walk currently on this dentry? */ 354 /* Path walk currently on this dentry? */
349 ino_count = atomic_read(&ino->count) + 2; 355 ino_count = atomic_read(&ino->count) + 2;
350 if (atomic_read(&dentry->d_count) > ino_count) 356 if (dentry->d_count > ino_count)
351 goto next; 357 goto next;
352 358
353 /* Can we umount this guy */ 359 /* Can we umount this guy */
@@ -369,7 +375,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
369 if (!exp_leaves) { 375 if (!exp_leaves) {
370 /* Path walk currently on this dentry? */ 376 /* Path walk currently on this dentry? */
371 ino_count = atomic_read(&ino->count) + 1; 377 ino_count = atomic_read(&ino->count) + 1;
372 if (atomic_read(&dentry->d_count) > ino_count) 378 if (dentry->d_count > ino_count)
373 goto next; 379 goto next;
374 380
375 if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) { 381 if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
@@ -383,7 +389,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
383 } else { 389 } else {
384 /* Path walk currently on this dentry? */ 390 /* Path walk currently on this dentry? */
385 ino_count = atomic_read(&ino->count) + 1; 391 ino_count = atomic_read(&ino->count) + 1;
386 if (atomic_read(&dentry->d_count) > ino_count) 392 if (dentry->d_count > ino_count)
387 goto next; 393 goto next;
388 394
389 expired = autofs4_check_leaves(mnt, dentry, timeout, do_now); 395 expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
@@ -394,11 +400,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
394 } 400 }
395next: 401next:
396 spin_unlock(&sbi->fs_lock); 402 spin_unlock(&sbi->fs_lock);
397 dput(dentry);
398 spin_lock(&dcache_lock);
399 next = next->next;
400 } 403 }
401 spin_unlock(&dcache_lock);
402 return NULL; 404 return NULL;
403 405
404found: 406found:
@@ -408,9 +410,13 @@ found:
408 ino->flags |= AUTOFS_INF_EXPIRING; 410 ino->flags |= AUTOFS_INF_EXPIRING;
409 init_completion(&ino->expire_complete); 411 init_completion(&ino->expire_complete);
410 spin_unlock(&sbi->fs_lock); 412 spin_unlock(&sbi->fs_lock);
411 spin_lock(&dcache_lock); 413 spin_lock(&autofs4_lock);
414 spin_lock(&expired->d_parent->d_lock);
415 spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
412 list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child); 416 list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
413 spin_unlock(&dcache_lock); 417 spin_unlock(&expired->d_lock);
418 spin_unlock(&expired->d_parent->d_lock);
419 spin_unlock(&autofs4_lock);
414 return expired; 420 return expired;
415} 421}
416 422
@@ -499,7 +505,14 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
499 505
500 spin_lock(&sbi->fs_lock); 506 spin_lock(&sbi->fs_lock);
501 if (ino->flags & AUTOFS_INF_MOUNTPOINT) { 507 if (ino->flags & AUTOFS_INF_MOUNTPOINT) {
502 sb->s_root->d_mounted++; 508 spin_lock(&sb->s_root->d_lock);
509 /*
510 * If we haven't been expired away, then reset
511 * mounted status.
512 */
513 if (mnt->mnt_parent != mnt)
514 sb->s_root->d_flags |= DCACHE_MOUNTED;
515 spin_unlock(&sb->s_root->d_lock);
503 ino->flags &= ~AUTOFS_INF_MOUNTPOINT; 516 ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
504 } 517 }
505 ino->flags &= ~AUTOFS_INF_EXPIRING; 518 ino->flags &= ~AUTOFS_INF_EXPIRING;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index ac87e49fa706..a7bdb9dcac84 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -309,7 +309,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
309 goto fail_iput; 309 goto fail_iput;
310 pipe = NULL; 310 pipe = NULL;
311 311
312 root->d_op = &autofs4_sb_dentry_operations; 312 d_set_d_op(root, &autofs4_sb_dentry_operations);
313 root->d_fsdata = ino; 313 root->d_fsdata = ino;
314 314
315 /* Can this call block? */ 315 /* Can this call block? */
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index d34896cfb19f..651e4ef563b1 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -23,6 +23,8 @@
23 23
24#include "autofs_i.h" 24#include "autofs_i.h"
25 25
26DEFINE_SPINLOCK(autofs4_lock);
27
26static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *); 28static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
27static int autofs4_dir_unlink(struct inode *,struct dentry *); 29static int autofs4_dir_unlink(struct inode *,struct dentry *);
28static int autofs4_dir_rmdir(struct inode *,struct dentry *); 30static int autofs4_dir_rmdir(struct inode *,struct dentry *);
@@ -142,12 +144,15 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
142 * autofs file system so just let the libfs routines handle 144 * autofs file system so just let the libfs routines handle
143 * it. 145 * it.
144 */ 146 */
145 spin_lock(&dcache_lock); 147 spin_lock(&autofs4_lock);
148 spin_lock(&dentry->d_lock);
146 if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { 149 if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
147 spin_unlock(&dcache_lock); 150 spin_unlock(&dentry->d_lock);
151 spin_unlock(&autofs4_lock);
148 return -ENOENT; 152 return -ENOENT;
149 } 153 }
150 spin_unlock(&dcache_lock); 154 spin_unlock(&dentry->d_lock);
155 spin_unlock(&autofs4_lock);
151 156
152out: 157out:
153 return dcache_dir_open(inode, file); 158 return dcache_dir_open(inode, file);
@@ -252,9 +257,11 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
252 /* We trigger a mount for almost all flags */ 257 /* We trigger a mount for almost all flags */
253 lookup_type = autofs4_need_mount(nd->flags); 258 lookup_type = autofs4_need_mount(nd->flags);
254 spin_lock(&sbi->fs_lock); 259 spin_lock(&sbi->fs_lock);
255 spin_lock(&dcache_lock); 260 spin_lock(&autofs4_lock);
261 spin_lock(&dentry->d_lock);
256 if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) { 262 if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
257 spin_unlock(&dcache_lock); 263 spin_unlock(&dentry->d_lock);
264 spin_unlock(&autofs4_lock);
258 spin_unlock(&sbi->fs_lock); 265 spin_unlock(&sbi->fs_lock);
259 goto follow; 266 goto follow;
260 } 267 }
@@ -266,7 +273,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
266 */ 273 */
267 if (ino->flags & AUTOFS_INF_PENDING || 274 if (ino->flags & AUTOFS_INF_PENDING ||
268 (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) { 275 (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
269 spin_unlock(&dcache_lock); 276 spin_unlock(&dentry->d_lock);
277 spin_unlock(&autofs4_lock);
270 spin_unlock(&sbi->fs_lock); 278 spin_unlock(&sbi->fs_lock);
271 279
272 status = try_to_fill_dentry(dentry, nd->flags); 280 status = try_to_fill_dentry(dentry, nd->flags);
@@ -275,7 +283,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
275 283
276 goto follow; 284 goto follow;
277 } 285 }
278 spin_unlock(&dcache_lock); 286 spin_unlock(&dentry->d_lock);
287 spin_unlock(&autofs4_lock);
279 spin_unlock(&sbi->fs_lock); 288 spin_unlock(&sbi->fs_lock);
280follow: 289follow:
281 /* 290 /*
@@ -306,12 +315,19 @@ out_error:
306 */ 315 */
307static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) 316static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
308{ 317{
309 struct inode *dir = dentry->d_parent->d_inode; 318 struct inode *dir;
310 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 319 struct autofs_sb_info *sbi;
311 int oz_mode = autofs4_oz_mode(sbi); 320 int oz_mode;
312 int flags = nd ? nd->flags : 0; 321 int flags = nd ? nd->flags : 0;
313 int status = 1; 322 int status = 1;
314 323
324 if (flags & LOOKUP_RCU)
325 return -ECHILD;
326
327 dir = dentry->d_parent->d_inode;
328 sbi = autofs4_sbi(dir->i_sb);
329 oz_mode = autofs4_oz_mode(sbi);
330
315 /* Pending dentry */ 331 /* Pending dentry */
316 spin_lock(&sbi->fs_lock); 332 spin_lock(&sbi->fs_lock);
317 if (autofs4_ispending(dentry)) { 333 if (autofs4_ispending(dentry)) {
@@ -346,12 +362,14 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
346 return 0; 362 return 0;
347 363
348 /* Check for a non-mountpoint directory with no contents */ 364 /* Check for a non-mountpoint directory with no contents */
349 spin_lock(&dcache_lock); 365 spin_lock(&autofs4_lock);
366 spin_lock(&dentry->d_lock);
350 if (S_ISDIR(dentry->d_inode->i_mode) && 367 if (S_ISDIR(dentry->d_inode->i_mode) &&
351 !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { 368 !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
352 DPRINTK("dentry=%p %.*s, emptydir", 369 DPRINTK("dentry=%p %.*s, emptydir",
353 dentry, dentry->d_name.len, dentry->d_name.name); 370 dentry, dentry->d_name.len, dentry->d_name.name);
354 spin_unlock(&dcache_lock); 371 spin_unlock(&dentry->d_lock);
372 spin_unlock(&autofs4_lock);
355 373
356 /* The daemon never causes a mount to trigger */ 374 /* The daemon never causes a mount to trigger */
357 if (oz_mode) 375 if (oz_mode)
@@ -367,7 +385,8 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
367 385
368 return status; 386 return status;
369 } 387 }
370 spin_unlock(&dcache_lock); 388 spin_unlock(&dentry->d_lock);
389 spin_unlock(&autofs4_lock);
371 390
372 return 1; 391 return 1;
373} 392}
@@ -422,7 +441,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
422 const unsigned char *str = name->name; 441 const unsigned char *str = name->name;
423 struct list_head *p, *head; 442 struct list_head *p, *head;
424 443
425 spin_lock(&dcache_lock); 444 spin_lock(&autofs4_lock);
426 spin_lock(&sbi->lookup_lock); 445 spin_lock(&sbi->lookup_lock);
427 head = &sbi->active_list; 446 head = &sbi->active_list;
428 list_for_each(p, head) { 447 list_for_each(p, head) {
@@ -436,7 +455,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
436 spin_lock(&active->d_lock); 455 spin_lock(&active->d_lock);
437 456
438 /* Already gone? */ 457 /* Already gone? */
439 if (atomic_read(&active->d_count) == 0) 458 if (active->d_count == 0)
440 goto next; 459 goto next;
441 460
442 qstr = &active->d_name; 461 qstr = &active->d_name;
@@ -452,17 +471,17 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
452 goto next; 471 goto next;
453 472
454 if (d_unhashed(active)) { 473 if (d_unhashed(active)) {
455 dget(active); 474 dget_dlock(active);
456 spin_unlock(&active->d_lock); 475 spin_unlock(&active->d_lock);
457 spin_unlock(&sbi->lookup_lock); 476 spin_unlock(&sbi->lookup_lock);
458 spin_unlock(&dcache_lock); 477 spin_unlock(&autofs4_lock);
459 return active; 478 return active;
460 } 479 }
461next: 480next:
462 spin_unlock(&active->d_lock); 481 spin_unlock(&active->d_lock);
463 } 482 }
464 spin_unlock(&sbi->lookup_lock); 483 spin_unlock(&sbi->lookup_lock);
465 spin_unlock(&dcache_lock); 484 spin_unlock(&autofs4_lock);
466 485
467 return NULL; 486 return NULL;
468} 487}
@@ -477,7 +496,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
477 const unsigned char *str = name->name; 496 const unsigned char *str = name->name;
478 struct list_head *p, *head; 497 struct list_head *p, *head;
479 498
480 spin_lock(&dcache_lock); 499 spin_lock(&autofs4_lock);
481 spin_lock(&sbi->lookup_lock); 500 spin_lock(&sbi->lookup_lock);
482 head = &sbi->expiring_list; 501 head = &sbi->expiring_list;
483 list_for_each(p, head) { 502 list_for_each(p, head) {
@@ -507,17 +526,17 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
507 goto next; 526 goto next;
508 527
509 if (d_unhashed(expiring)) { 528 if (d_unhashed(expiring)) {
510 dget(expiring); 529 dget_dlock(expiring);
511 spin_unlock(&expiring->d_lock); 530 spin_unlock(&expiring->d_lock);
512 spin_unlock(&sbi->lookup_lock); 531 spin_unlock(&sbi->lookup_lock);
513 spin_unlock(&dcache_lock); 532 spin_unlock(&autofs4_lock);
514 return expiring; 533 return expiring;
515 } 534 }
516next: 535next:
517 spin_unlock(&expiring->d_lock); 536 spin_unlock(&expiring->d_lock);
518 } 537 }
519 spin_unlock(&sbi->lookup_lock); 538 spin_unlock(&sbi->lookup_lock);
520 spin_unlock(&dcache_lock); 539 spin_unlock(&autofs4_lock);
521 540
522 return NULL; 541 return NULL;
523} 542}
@@ -559,7 +578,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
559 * we check for the hashed dentry and return the newly 578 * we check for the hashed dentry and return the newly
560 * hashed dentry. 579 * hashed dentry.
561 */ 580 */
562 dentry->d_op = &autofs4_root_dentry_operations; 581 d_set_d_op(dentry, &autofs4_root_dentry_operations);
563 582
564 /* 583 /*
565 * And we need to ensure that the same dentry is used for 584 * And we need to ensure that the same dentry is used for
@@ -698,9 +717,9 @@ static int autofs4_dir_symlink(struct inode *dir,
698 d_add(dentry, inode); 717 d_add(dentry, inode);
699 718
700 if (dir == dir->i_sb->s_root->d_inode) 719 if (dir == dir->i_sb->s_root->d_inode)
701 dentry->d_op = &autofs4_root_dentry_operations; 720 d_set_d_op(dentry, &autofs4_root_dentry_operations);
702 else 721 else
703 dentry->d_op = &autofs4_dentry_operations; 722 d_set_d_op(dentry, &autofs4_dentry_operations);
704 723
705 dentry->d_fsdata = ino; 724 dentry->d_fsdata = ino;
706 ino->dentry = dget(dentry); 725 ino->dentry = dget(dentry);
@@ -753,12 +772,12 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
753 772
754 dir->i_mtime = CURRENT_TIME; 773 dir->i_mtime = CURRENT_TIME;
755 774
756 spin_lock(&dcache_lock); 775 spin_lock(&autofs4_lock);
757 autofs4_add_expiring(dentry); 776 autofs4_add_expiring(dentry);
758 spin_lock(&dentry->d_lock); 777 spin_lock(&dentry->d_lock);
759 __d_drop(dentry); 778 __d_drop(dentry);
760 spin_unlock(&dentry->d_lock); 779 spin_unlock(&dentry->d_lock);
761 spin_unlock(&dcache_lock); 780 spin_unlock(&autofs4_lock);
762 781
763 return 0; 782 return 0;
764} 783}
@@ -775,16 +794,20 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
775 if (!autofs4_oz_mode(sbi)) 794 if (!autofs4_oz_mode(sbi))
776 return -EACCES; 795 return -EACCES;
777 796
778 spin_lock(&dcache_lock); 797 spin_lock(&autofs4_lock);
798 spin_lock(&sbi->lookup_lock);
799 spin_lock(&dentry->d_lock);
779 if (!list_empty(&dentry->d_subdirs)) { 800 if (!list_empty(&dentry->d_subdirs)) {
780 spin_unlock(&dcache_lock); 801 spin_unlock(&dentry->d_lock);
802 spin_unlock(&sbi->lookup_lock);
803 spin_unlock(&autofs4_lock);
781 return -ENOTEMPTY; 804 return -ENOTEMPTY;
782 } 805 }
783 autofs4_add_expiring(dentry); 806 __autofs4_add_expiring(dentry);
784 spin_lock(&dentry->d_lock); 807 spin_unlock(&sbi->lookup_lock);
785 __d_drop(dentry); 808 __d_drop(dentry);
786 spin_unlock(&dentry->d_lock); 809 spin_unlock(&dentry->d_lock);
787 spin_unlock(&dcache_lock); 810 spin_unlock(&autofs4_lock);
788 811
789 if (atomic_dec_and_test(&ino->count)) { 812 if (atomic_dec_and_test(&ino->count)) {
790 p_ino = autofs4_dentry_ino(dentry->d_parent); 813 p_ino = autofs4_dentry_ino(dentry->d_parent);
@@ -829,9 +852,9 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
829 d_add(dentry, inode); 852 d_add(dentry, inode);
830 853
831 if (dir == dir->i_sb->s_root->d_inode) 854 if (dir == dir->i_sb->s_root->d_inode)
832 dentry->d_op = &autofs4_root_dentry_operations; 855 d_set_d_op(dentry, &autofs4_root_dentry_operations);
833 else 856 else
834 dentry->d_op = &autofs4_dentry_operations; 857 d_set_d_op(dentry, &autofs4_dentry_operations);
835 858
836 dentry->d_fsdata = ino; 859 dentry->d_fsdata = ino;
837 ino->dentry = dget(dentry); 860 ino->dentry = dget(dentry);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 2341375386f8..c5f8459c905e 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -186,16 +186,26 @@ static int autofs4_getpath(struct autofs_sb_info *sbi,
186{ 186{
187 struct dentry *root = sbi->sb->s_root; 187 struct dentry *root = sbi->sb->s_root;
188 struct dentry *tmp; 188 struct dentry *tmp;
189 char *buf = *name; 189 char *buf;
190 char *p; 190 char *p;
191 int len = 0; 191 int len;
192 unsigned seq;
192 193
193 spin_lock(&dcache_lock); 194rename_retry:
195 buf = *name;
196 len = 0;
197
198 seq = read_seqbegin(&rename_lock);
199 rcu_read_lock();
200 spin_lock(&autofs4_lock);
194 for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent) 201 for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
195 len += tmp->d_name.len + 1; 202 len += tmp->d_name.len + 1;
196 203
197 if (!len || --len > NAME_MAX) { 204 if (!len || --len > NAME_MAX) {
198 spin_unlock(&dcache_lock); 205 spin_unlock(&autofs4_lock);
206 rcu_read_unlock();
207 if (read_seqretry(&rename_lock, seq))
208 goto rename_retry;
199 return 0; 209 return 0;
200 } 210 }
201 211
@@ -208,7 +218,10 @@ static int autofs4_getpath(struct autofs_sb_info *sbi,
208 p -= tmp->d_name.len; 218 p -= tmp->d_name.len;
209 strncpy(p, tmp->d_name.name, tmp->d_name.len); 219 strncpy(p, tmp->d_name.name, tmp->d_name.len);
210 } 220 }
211 spin_unlock(&dcache_lock); 221 spin_unlock(&autofs4_lock);
222 rcu_read_unlock();
223 if (read_seqretry(&rename_lock, seq))
224 goto rename_retry;
212 225
213 return len; 226 return len;
214} 227}
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index f024d8aaddef..9ad2369d9e35 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -229,8 +229,11 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
229 return -EIO; 229 return -EIO;
230} 230}
231 231
232static int bad_inode_permission(struct inode *inode, int mask) 232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags)
233{ 233{
234 if (flags & IPERM_FLAG_RCU)
235 return -ECHILD;
236
234 return -EIO; 237 return -EIO;
235} 238}
236 239
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index aa4e7c7ae3c6..de93581b79a2 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -284,12 +284,18 @@ befs_alloc_inode(struct super_block *sb)
284 return &bi->vfs_inode; 284 return &bi->vfs_inode;
285} 285}
286 286
287static void 287static void befs_i_callback(struct rcu_head *head)
288befs_destroy_inode(struct inode *inode)
289{ 288{
289 struct inode *inode = container_of(head, struct inode, i_rcu);
290 INIT_LIST_HEAD(&inode->i_dentry);
290 kmem_cache_free(befs_inode_cachep, BEFS_I(inode)); 291 kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
291} 292}
292 293
294static void befs_destroy_inode(struct inode *inode)
295{
296 call_rcu(&inode->i_rcu, befs_i_callback);
297}
298
293static void init_once(void *foo) 299static void init_once(void *foo)
294{ 300{
295 struct befs_inode_info *bi = (struct befs_inode_info *) foo; 301 struct befs_inode_info *bi = (struct befs_inode_info *) foo;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 76db6d7d49bb..a8e37f81d097 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -248,11 +248,18 @@ static struct inode *bfs_alloc_inode(struct super_block *sb)
248 return &bi->vfs_inode; 248 return &bi->vfs_inode;
249} 249}
250 250
251static void bfs_destroy_inode(struct inode *inode) 251static void bfs_i_callback(struct rcu_head *head)
252{ 252{
253 struct inode *inode = container_of(head, struct inode, i_rcu);
254 INIT_LIST_HEAD(&inode->i_dentry);
253 kmem_cache_free(bfs_inode_cachep, BFS_I(inode)); 255 kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
254} 256}
255 257
258static void bfs_destroy_inode(struct inode *inode)
259{
260 call_rcu(&inode->i_rcu, bfs_i_callback);
261}
262
256static void init_once(void *foo) 263static void init_once(void *foo)
257{ 264{
258 struct bfs_inode_info *bi = foo; 265 struct bfs_inode_info *bi = foo;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4230252fd689..771f23527010 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -409,13 +409,20 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
409 return &ei->vfs_inode; 409 return &ei->vfs_inode;
410} 410}
411 411
412static void bdev_destroy_inode(struct inode *inode) 412static void bdev_i_callback(struct rcu_head *head)
413{ 413{
414 struct inode *inode = container_of(head, struct inode, i_rcu);
414 struct bdev_inode *bdi = BDEV_I(inode); 415 struct bdev_inode *bdi = BDEV_I(inode);
415 416
417 INIT_LIST_HEAD(&inode->i_dentry);
416 kmem_cache_free(bdev_cachep, bdi); 418 kmem_cache_free(bdev_cachep, bdi);
417} 419}
418 420
421static void bdev_destroy_inode(struct inode *inode)
422{
423 call_rcu(&inode->i_rcu, bdev_i_callback);
424}
425
419static void init_once(void *foo) 426static void init_once(void *foo)
420{ 427{
421 struct bdev_inode *ei = (struct bdev_inode *) foo; 428 struct bdev_inode *ei = (struct bdev_inode *) foo;
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 2222d161c7b6..6ae2c8cac9d5 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -185,18 +185,23 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
185 return ret; 185 return ret;
186} 186}
187 187
188int btrfs_check_acl(struct inode *inode, int mask) 188int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags)
189{ 189{
190 struct posix_acl *acl;
191 int error = -EAGAIN; 190 int error = -EAGAIN;
192 191
193 acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); 192 if (flags & IPERM_FLAG_RCU) {
193 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
194 error = -ECHILD;
194 195
195 if (IS_ERR(acl)) 196 } else {
196 return PTR_ERR(acl); 197 struct posix_acl *acl;
197 if (acl) { 198 acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
198 error = posix_acl_permission(inode, acl, mask); 199 if (IS_ERR(acl))
199 posix_acl_release(acl); 200 return PTR_ERR(acl);
201 if (acl) {
202 error = posix_acl_permission(inode, acl, mask);
203 posix_acl_release(acl);
204 }
200 } 205 }
201 206
202 return error; 207 return error;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index af52f6d7a4d8..a142d204b526 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2544,7 +2544,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait);
2544 2544
2545/* acl.c */ 2545/* acl.c */
2546#ifdef CONFIG_BTRFS_FS_POSIX_ACL 2546#ifdef CONFIG_BTRFS_FS_POSIX_ACL
2547int btrfs_check_acl(struct inode *inode, int mask); 2547int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags);
2548#else 2548#else
2549#define btrfs_check_acl NULL 2549#define btrfs_check_acl NULL
2550#endif 2550#endif
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 659f532d26a0..0ccf9a8afcdf 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -110,7 +110,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
110 110
111 dentry = d_obtain_alias(inode); 111 dentry = d_obtain_alias(inode);
112 if (!IS_ERR(dentry)) 112 if (!IS_ERR(dentry))
113 dentry->d_op = &btrfs_dentry_operations; 113 d_set_d_op(dentry, &btrfs_dentry_operations);
114 return dentry; 114 return dentry;
115fail: 115fail:
116 srcu_read_unlock(&fs_info->subvol_srcu, index); 116 srcu_read_unlock(&fs_info->subvol_srcu, index);
@@ -225,7 +225,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
225 key.offset = 0; 225 key.offset = 0;
226 dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); 226 dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
227 if (!IS_ERR(dentry)) 227 if (!IS_ERR(dentry))
228 dentry->d_op = &btrfs_dentry_operations; 228 d_set_d_op(dentry, &btrfs_dentry_operations);
229 return dentry; 229 return dentry;
230fail: 230fail:
231 btrfs_free_path(path); 231 btrfs_free_path(path);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 72f31ecb5c90..a0ff46a47895 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4084,7 +4084,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4084 int index; 4084 int index;
4085 int ret; 4085 int ret;
4086 4086
4087 dentry->d_op = &btrfs_dentry_operations; 4087 d_set_d_op(dentry, &btrfs_dentry_operations);
4088 4088
4089 if (dentry->d_name.len > BTRFS_NAME_LEN) 4089 if (dentry->d_name.len > BTRFS_NAME_LEN)
4090 return ERR_PTR(-ENAMETOOLONG); 4090 return ERR_PTR(-ENAMETOOLONG);
@@ -4127,7 +4127,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4127 return inode; 4127 return inode;
4128} 4128}
4129 4129
4130static int btrfs_dentry_delete(struct dentry *dentry) 4130static int btrfs_dentry_delete(const struct dentry *dentry)
4131{ 4131{
4132 struct btrfs_root *root; 4132 struct btrfs_root *root;
4133 4133
@@ -6495,6 +6495,13 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6495 return inode; 6495 return inode;
6496} 6496}
6497 6497
6498static void btrfs_i_callback(struct rcu_head *head)
6499{
6500 struct inode *inode = container_of(head, struct inode, i_rcu);
6501 INIT_LIST_HEAD(&inode->i_dentry);
6502 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
6503}
6504
6498void btrfs_destroy_inode(struct inode *inode) 6505void btrfs_destroy_inode(struct inode *inode)
6499{ 6506{
6500 struct btrfs_ordered_extent *ordered; 6507 struct btrfs_ordered_extent *ordered;
@@ -6564,7 +6571,7 @@ void btrfs_destroy_inode(struct inode *inode)
6564 inode_tree_del(inode); 6571 inode_tree_del(inode);
6565 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 6572 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6566free: 6573free:
6567 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 6574 call_rcu(&inode->i_rcu, btrfs_i_callback);
6568} 6575}
6569 6576
6570int btrfs_drop_inode(struct inode *inode) 6577int btrfs_drop_inode(struct inode *inode)
@@ -7204,11 +7211,11 @@ static int btrfs_set_page_dirty(struct page *page)
7204 return __set_page_dirty_nobuffers(page); 7211 return __set_page_dirty_nobuffers(page);
7205} 7212}
7206 7213
7207static int btrfs_permission(struct inode *inode, int mask) 7214static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
7208{ 7215{
7209 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) 7216 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
7210 return -EACCES; 7217 return -EACCES;
7211 return generic_permission(inode, mask, btrfs_check_acl); 7218 return generic_permission(inode, mask, flags, btrfs_check_acl);
7212} 7219}
7213 7220
7214static const struct inode_operations btrfs_dir_inode_operations = { 7221static const struct inode_operations btrfs_dir_inode_operations = {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index d902948a90d8..fa7ca04ee816 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -42,11 +42,11 @@ int ceph_init_dentry(struct dentry *dentry)
42 42
43 if (dentry->d_parent == NULL || /* nfs fh_to_dentry */ 43 if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
44 ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 44 ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
45 dentry->d_op = &ceph_dentry_ops; 45 d_set_d_op(dentry, &ceph_dentry_ops);
46 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 46 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
47 dentry->d_op = &ceph_snapdir_dentry_ops; 47 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
48 else 48 else
49 dentry->d_op = &ceph_snap_dentry_ops; 49 d_set_d_op(dentry, &ceph_snap_dentry_ops);
50 50
51 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); 51 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
52 if (!di) 52 if (!di)
@@ -112,7 +112,7 @@ static int __dcache_readdir(struct file *filp,
112 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, 112 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
113 last); 113 last);
114 114
115 spin_lock(&dcache_lock); 115 spin_lock(&parent->d_lock);
116 116
117 /* start at beginning? */ 117 /* start at beginning? */
118 if (filp->f_pos == 2 || last == NULL || 118 if (filp->f_pos == 2 || last == NULL ||
@@ -136,6 +136,7 @@ more:
136 fi->at_end = 1; 136 fi->at_end = 1;
137 goto out_unlock; 137 goto out_unlock;
138 } 138 }
139 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
139 if (!d_unhashed(dentry) && dentry->d_inode && 140 if (!d_unhashed(dentry) && dentry->d_inode &&
140 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 141 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
141 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 142 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
@@ -145,13 +146,15 @@ more:
145 dentry->d_name.len, dentry->d_name.name, di->offset, 146 dentry->d_name.len, dentry->d_name.name, di->offset,
146 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", 147 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
147 !dentry->d_inode ? " null" : ""); 148 !dentry->d_inode ? " null" : "");
149 spin_unlock(&dentry->d_lock);
148 p = p->prev; 150 p = p->prev;
149 dentry = list_entry(p, struct dentry, d_u.d_child); 151 dentry = list_entry(p, struct dentry, d_u.d_child);
150 di = ceph_dentry(dentry); 152 di = ceph_dentry(dentry);
151 } 153 }
152 154
153 atomic_inc(&dentry->d_count); 155 dget_dlock(dentry);
154 spin_unlock(&dcache_lock); 156 spin_unlock(&dentry->d_lock);
157 spin_unlock(&parent->d_lock);
155 158
156 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, 159 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
157 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 160 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
@@ -177,19 +180,19 @@ more:
177 180
178 filp->f_pos++; 181 filp->f_pos++;
179 182
180 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */ 183 /* make sure a dentry wasn't dropped while we didn't have parent lock */
181 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) { 184 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
182 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); 185 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
183 err = -EAGAIN; 186 err = -EAGAIN;
184 goto out; 187 goto out;
185 } 188 }
186 189
187 spin_lock(&dcache_lock); 190 spin_lock(&parent->d_lock);
188 p = p->prev; /* advance to next dentry */ 191 p = p->prev; /* advance to next dentry */
189 goto more; 192 goto more;
190 193
191out_unlock: 194out_unlock:
192 spin_unlock(&dcache_lock); 195 spin_unlock(&parent->d_lock);
193out: 196out:
194 if (last) 197 if (last)
195 dput(last); 198 dput(last);
@@ -987,7 +990,12 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
987 */ 990 */
988static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) 991static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
989{ 992{
990 struct inode *dir = dentry->d_parent->d_inode; 993 struct inode *dir;
994
995 if (nd->flags & LOOKUP_RCU)
996 return -ECHILD;
997
998 dir = dentry->d_parent->d_inode;
991 999
992 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, 1000 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
993 dentry->d_name.len, dentry->d_name.name, dentry->d_inode, 1001 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index bf1286588f26..e61de4f7b99d 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -368,6 +368,15 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
368 return &ci->vfs_inode; 368 return &ci->vfs_inode;
369} 369}
370 370
371static void ceph_i_callback(struct rcu_head *head)
372{
373 struct inode *inode = container_of(head, struct inode, i_rcu);
374 struct ceph_inode_info *ci = ceph_inode(inode);
375
376 INIT_LIST_HEAD(&inode->i_dentry);
377 kmem_cache_free(ceph_inode_cachep, ci);
378}
379
371void ceph_destroy_inode(struct inode *inode) 380void ceph_destroy_inode(struct inode *inode)
372{ 381{
373 struct ceph_inode_info *ci = ceph_inode(inode); 382 struct ceph_inode_info *ci = ceph_inode(inode);
@@ -407,7 +416,7 @@ void ceph_destroy_inode(struct inode *inode)
407 if (ci->i_xattrs.prealloc_blob) 416 if (ci->i_xattrs.prealloc_blob)
408 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 417 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
409 418
410 kmem_cache_free(ceph_inode_cachep, ci); 419 call_rcu(&inode->i_rcu, ceph_i_callback);
411} 420}
412 421
413 422
@@ -841,13 +850,13 @@ static void ceph_set_dentry_offset(struct dentry *dn)
841 di->offset = ceph_inode(inode)->i_max_offset++; 850 di->offset = ceph_inode(inode)->i_max_offset++;
842 spin_unlock(&inode->i_lock); 851 spin_unlock(&inode->i_lock);
843 852
844 spin_lock(&dcache_lock); 853 spin_lock(&dir->d_lock);
845 spin_lock(&dn->d_lock); 854 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
846 list_move(&dn->d_u.d_child, &dir->d_subdirs); 855 list_move(&dn->d_u.d_child, &dir->d_subdirs);
847 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 856 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
848 dn->d_u.d_child.prev, dn->d_u.d_child.next); 857 dn->d_u.d_child.prev, dn->d_u.d_child.next);
849 spin_unlock(&dn->d_lock); 858 spin_unlock(&dn->d_lock);
850 spin_unlock(&dcache_lock); 859 spin_unlock(&dir->d_lock);
851} 860}
852 861
853/* 862/*
@@ -879,8 +888,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
879 } else if (realdn) { 888 } else if (realdn) {
880 dout("dn %p (%d) spliced with %p (%d) " 889 dout("dn %p (%d) spliced with %p (%d) "
881 "inode %p ino %llx.%llx\n", 890 "inode %p ino %llx.%llx\n",
882 dn, atomic_read(&dn->d_count), 891 dn, dn->d_count,
883 realdn, atomic_read(&realdn->d_count), 892 realdn, realdn->d_count,
884 realdn->d_inode, ceph_vinop(realdn->d_inode)); 893 realdn->d_inode, ceph_vinop(realdn->d_inode));
885 dput(dn); 894 dput(dn);
886 dn = realdn; 895 dn = realdn;
@@ -1231,11 +1240,11 @@ retry_lookup:
1231 goto retry_lookup; 1240 goto retry_lookup;
1232 } else { 1241 } else {
1233 /* reorder parent's d_subdirs */ 1242 /* reorder parent's d_subdirs */
1234 spin_lock(&dcache_lock); 1243 spin_lock(&parent->d_lock);
1235 spin_lock(&dn->d_lock); 1244 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1236 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1245 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1237 spin_unlock(&dn->d_lock); 1246 spin_unlock(&dn->d_lock);
1238 spin_unlock(&dcache_lock); 1247 spin_unlock(&parent->d_lock);
1239 } 1248 }
1240 1249
1241 di = dn->d_fsdata; 1250 di = dn->d_fsdata;
@@ -1772,12 +1781,17 @@ int ceph_do_getattr(struct inode *inode, int mask)
1772 * Check inode permissions. We verify we have a valid value for 1781 * Check inode permissions. We verify we have a valid value for
1773 * the AUTH cap, then call the generic handler. 1782 * the AUTH cap, then call the generic handler.
1774 */ 1783 */
1775int ceph_permission(struct inode *inode, int mask) 1784int ceph_permission(struct inode *inode, int mask, unsigned int flags)
1776{ 1785{
1777 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1786 int err;
1787
1788 if (flags & IPERM_FLAG_RCU)
1789 return -ECHILD;
1790
1791 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1778 1792
1779 if (!err) 1793 if (!err)
1780 err = generic_permission(inode, mask, NULL); 1794 err = generic_permission(inode, mask, flags, NULL);
1781 return err; 1795 return err;
1782} 1796}
1783 1797
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 38800eaa81d0..a50fca1e03be 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1486,7 +1486,7 @@ retry:
1486 *base = ceph_ino(temp->d_inode); 1486 *base = ceph_ino(temp->d_inode);
1487 *plen = len; 1487 *plen = len;
1488 dout("build_path on %p %d built %llx '%.*s'\n", 1488 dout("build_path on %p %d built %llx '%.*s'\n",
1489 dentry, atomic_read(&dentry->d_count), *base, len, path); 1489 dentry, dentry->d_count, *base, len, path);
1490 return path; 1490 return path;
1491} 1491}
1492 1492
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 7f01728a4657..4553d8829edb 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -665,7 +665,7 @@ extern void ceph_queue_invalidate(struct inode *inode);
665extern void ceph_queue_writeback(struct inode *inode); 665extern void ceph_queue_writeback(struct inode *inode);
666 666
667extern int ceph_do_getattr(struct inode *inode, int mask); 667extern int ceph_do_getattr(struct inode *inode, int mask);
668extern int ceph_permission(struct inode *inode, int mask); 668extern int ceph_permission(struct inode *inode, int mask, unsigned int flags);
669extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); 669extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
670extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 670extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
671 struct kstat *stat); 671 struct kstat *stat);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3936aa7f2c22..8e21e0fe65d5 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -283,10 +283,13 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
283 return 0; 283 return 0;
284} 284}
285 285
286static int cifs_permission(struct inode *inode, int mask) 286static int cifs_permission(struct inode *inode, int mask, unsigned int flags)
287{ 287{
288 struct cifs_sb_info *cifs_sb; 288 struct cifs_sb_info *cifs_sb;
289 289
290 if (flags & IPERM_FLAG_RCU)
291 return -ECHILD;
292
290 cifs_sb = CIFS_SB(inode->i_sb); 293 cifs_sb = CIFS_SB(inode->i_sb);
291 294
292 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 295 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
@@ -298,7 +301,7 @@ static int cifs_permission(struct inode *inode, int mask)
298 on the client (above and beyond ACL on servers) for 301 on the client (above and beyond ACL on servers) for
299 servers which do not support setting and viewing mode bits, 302 servers which do not support setting and viewing mode bits,
300 so allowing client to check permissions is useful */ 303 so allowing client to check permissions is useful */
301 return generic_permission(inode, mask, NULL); 304 return generic_permission(inode, mask, flags, NULL);
302} 305}
303 306
304static struct kmem_cache *cifs_inode_cachep; 307static struct kmem_cache *cifs_inode_cachep;
@@ -334,10 +337,17 @@ cifs_alloc_inode(struct super_block *sb)
334 return &cifs_inode->vfs_inode; 337 return &cifs_inode->vfs_inode;
335} 338}
336 339
340static void cifs_i_callback(struct rcu_head *head)
341{
342 struct inode *inode = container_of(head, struct inode, i_rcu);
343 INIT_LIST_HEAD(&inode->i_dentry);
344 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
345}
346
337static void 347static void
338cifs_destroy_inode(struct inode *inode) 348cifs_destroy_inode(struct inode *inode)
339{ 349{
340 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode)); 350 call_rcu(&inode->i_rcu, cifs_i_callback);
341} 351}
342 352
343static void 353static void
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3840eddbfb7a..db2a58c00f7b 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -135,9 +135,9 @@ static void setup_cifs_dentry(struct cifsTconInfo *tcon,
135 struct inode *newinode) 135 struct inode *newinode)
136{ 136{
137 if (tcon->nocase) 137 if (tcon->nocase)
138 direntry->d_op = &cifs_ci_dentry_ops; 138 d_set_d_op(direntry, &cifs_ci_dentry_ops);
139 else 139 else
140 direntry->d_op = &cifs_dentry_ops; 140 d_set_d_op(direntry, &cifs_dentry_ops);
141 d_instantiate(direntry, newinode); 141 d_instantiate(direntry, newinode);
142} 142}
143 143
@@ -421,9 +421,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
421 rc = cifs_get_inode_info_unix(&newinode, full_path, 421 rc = cifs_get_inode_info_unix(&newinode, full_path,
422 inode->i_sb, xid); 422 inode->i_sb, xid);
423 if (pTcon->nocase) 423 if (pTcon->nocase)
424 direntry->d_op = &cifs_ci_dentry_ops; 424 d_set_d_op(direntry, &cifs_ci_dentry_ops);
425 else 425 else
426 direntry->d_op = &cifs_dentry_ops; 426 d_set_d_op(direntry, &cifs_dentry_ops);
427 427
428 if (rc == 0) 428 if (rc == 0)
429 d_instantiate(direntry, newinode); 429 d_instantiate(direntry, newinode);
@@ -604,9 +604,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
604 604
605 if ((rc == 0) && (newInode != NULL)) { 605 if ((rc == 0) && (newInode != NULL)) {
606 if (pTcon->nocase) 606 if (pTcon->nocase)
607 direntry->d_op = &cifs_ci_dentry_ops; 607 d_set_d_op(direntry, &cifs_ci_dentry_ops);
608 else 608 else
609 direntry->d_op = &cifs_dentry_ops; 609 d_set_d_op(direntry, &cifs_dentry_ops);
610 d_add(direntry, newInode); 610 d_add(direntry, newInode);
611 if (posix_open) { 611 if (posix_open) {
612 filp = lookup_instantiate_filp(nd, direntry, 612 filp = lookup_instantiate_filp(nd, direntry,
@@ -634,9 +634,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
634 rc = 0; 634 rc = 0;
635 direntry->d_time = jiffies; 635 direntry->d_time = jiffies;
636 if (pTcon->nocase) 636 if (pTcon->nocase)
637 direntry->d_op = &cifs_ci_dentry_ops; 637 d_set_d_op(direntry, &cifs_ci_dentry_ops);
638 else 638 else
639 direntry->d_op = &cifs_dentry_ops; 639 d_set_d_op(direntry, &cifs_dentry_ops);
640 d_add(direntry, NULL); 640 d_add(direntry, NULL);
641 /* if it was once a directory (but how can we tell?) we could do 641 /* if it was once a directory (but how can we tell?) we could do
642 shrink_dcache_parent(direntry); */ 642 shrink_dcache_parent(direntry); */
@@ -656,22 +656,37 @@ lookup_out:
656static int 656static int
657cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) 657cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
658{ 658{
659 int isValid = 1; 659 if (nd->flags & LOOKUP_RCU)
660 return -ECHILD;
660 661
661 if (direntry->d_inode) { 662 if (direntry->d_inode) {
662 if (cifs_revalidate_dentry(direntry)) 663 if (cifs_revalidate_dentry(direntry))
663 return 0; 664 return 0;
664 } else { 665 else
665 cFYI(1, "neg dentry 0x%p name = %s", 666 return 1;
666 direntry, direntry->d_name.name);
667 if (time_after(jiffies, direntry->d_time + HZ) ||
668 !lookupCacheEnabled) {
669 d_drop(direntry);
670 isValid = 0;
671 }
672 } 667 }
673 668
674 return isValid; 669 /*
670 * This may be nfsd (or something), anyway, we can't see the
671 * intent of this. So, since this can be for creation, drop it.
672 */
673 if (!nd)
674 return 0;
675
676 /*
677 * Drop the negative dentry, in order to make sure to use the
678 * case sensitive name which is specified by user if this is
679 * for creation.
680 */
681 if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) {
682 if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
683 return 0;
684 }
685
686 if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled)
687 return 0;
688
689 return 1;
675} 690}
676 691
677/* static int cifs_d_delete(struct dentry *direntry) 692/* static int cifs_d_delete(struct dentry *direntry)
@@ -688,9 +703,10 @@ const struct dentry_operations cifs_dentry_ops = {
688/* d_delete: cifs_d_delete, */ /* not needed except for debugging */ 703/* d_delete: cifs_d_delete, */ /* not needed except for debugging */
689}; 704};
690 705
691static int cifs_ci_hash(struct dentry *dentry, struct qstr *q) 706static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode,
707 struct qstr *q)
692{ 708{
693 struct nls_table *codepage = CIFS_SB(dentry->d_inode->i_sb)->local_nls; 709 struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
694 unsigned long hash; 710 unsigned long hash;
695 int i; 711 int i;
696 712
@@ -703,21 +719,16 @@ static int cifs_ci_hash(struct dentry *dentry, struct qstr *q)
703 return 0; 719 return 0;
704} 720}
705 721
706static int cifs_ci_compare(struct dentry *dentry, struct qstr *a, 722static int cifs_ci_compare(const struct dentry *parent,
707 struct qstr *b) 723 const struct inode *pinode,
724 const struct dentry *dentry, const struct inode *inode,
725 unsigned int len, const char *str, const struct qstr *name)
708{ 726{
709 struct nls_table *codepage = CIFS_SB(dentry->d_inode->i_sb)->local_nls; 727 struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls;
710 728
711 if ((a->len == b->len) && 729 if ((name->len == len) &&
712 (nls_strnicmp(codepage, a->name, b->name, a->len) == 0)) { 730 (nls_strnicmp(codepage, name->name, str, len) == 0))
713 /*
714 * To preserve case, don't let an existing negative dentry's
715 * case take precedence. If a is not a negative dentry, this
716 * should have no side effects
717 */
718 memcpy((void *)a->name, b->name, a->len);
719 return 0; 731 return 0;
720 }
721 return 1; 732 return 1;
722} 733}
723 734
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 589f3e3f6e00..a853a89857a5 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -809,14 +809,14 @@ inode_has_hashed_dentries(struct inode *inode)
809{ 809{
810 struct dentry *dentry; 810 struct dentry *dentry;
811 811
812 spin_lock(&dcache_lock); 812 spin_lock(&inode->i_lock);
813 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 813 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
814 if (!d_unhashed(dentry) || IS_ROOT(dentry)) { 814 if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
815 spin_unlock(&dcache_lock); 815 spin_unlock(&inode->i_lock);
816 return true; 816 return true;
817 } 817 }
818 } 818 }
819 spin_unlock(&dcache_lock); 819 spin_unlock(&inode->i_lock);
820 return false; 820 return false;
821} 821}
822 822
@@ -1319,9 +1319,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1319 to set uid/gid */ 1319 to set uid/gid */
1320 inc_nlink(inode); 1320 inc_nlink(inode);
1321 if (pTcon->nocase) 1321 if (pTcon->nocase)
1322 direntry->d_op = &cifs_ci_dentry_ops; 1322 d_set_d_op(direntry, &cifs_ci_dentry_ops);
1323 else 1323 else
1324 direntry->d_op = &cifs_dentry_ops; 1324 d_set_d_op(direntry, &cifs_dentry_ops);
1325 1325
1326 cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); 1326 cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
1327 cifs_fill_uniqueid(inode->i_sb, &fattr); 1327 cifs_fill_uniqueid(inode->i_sb, &fattr);
@@ -1363,9 +1363,9 @@ mkdir_get_info:
1363 inode->i_sb, xid, NULL); 1363 inode->i_sb, xid, NULL);
1364 1364
1365 if (pTcon->nocase) 1365 if (pTcon->nocase)
1366 direntry->d_op = &cifs_ci_dentry_ops; 1366 d_set_d_op(direntry, &cifs_ci_dentry_ops);
1367 else 1367 else
1368 direntry->d_op = &cifs_dentry_ops; 1368 d_set_d_op(direntry, &cifs_dentry_ops);
1369 d_instantiate(direntry, newinode); 1369 d_instantiate(direntry, newinode);
1370 /* setting nlink not necessary except in cases where we 1370 /* setting nlink not necessary except in cases where we
1371 * failed to get it from the server or was set bogus */ 1371 * failed to get it from the server or was set bogus */
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 85cdbf831e7b..fe2f6a93c49e 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -525,9 +525,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
525 rc); 525 rc);
526 } else { 526 } else {
527 if (pTcon->nocase) 527 if (pTcon->nocase)
528 direntry->d_op = &cifs_ci_dentry_ops; 528 d_set_d_op(direntry, &cifs_ci_dentry_ops);
529 else 529 else
530 direntry->d_op = &cifs_dentry_ops; 530 d_set_d_op(direntry, &cifs_dentry_ops);
531 d_instantiate(direntry, newinode); 531 d_instantiate(direntry, newinode);
532 } 532 }
533 } 533 }
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index a73eb9f4bdaf..ec5b68e3b928 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -79,7 +79,7 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
79 cFYI(1, "For %s", name->name); 79 cFYI(1, "For %s", name->name);
80 80
81 if (parent->d_op && parent->d_op->d_hash) 81 if (parent->d_op && parent->d_op->d_hash)
82 parent->d_op->d_hash(parent, name); 82 parent->d_op->d_hash(parent, parent->d_inode, name);
83 else 83 else
84 name->hash = full_name_hash(name->name, name->len); 84 name->hash = full_name_hash(name->name, name->len);
85 85
@@ -103,9 +103,9 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
103 } 103 }
104 104
105 if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase) 105 if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase)
106 dentry->d_op = &cifs_ci_dentry_ops; 106 d_set_d_op(dentry, &cifs_ci_dentry_ops);
107 else 107 else
108 dentry->d_op = &cifs_dentry_ops; 108 d_set_d_op(dentry, &cifs_dentry_ops);
109 109
110 alias = d_materialise_unique(dentry, inode); 110 alias = d_materialise_unique(dentry, inode);
111 if (alias != NULL) { 111 if (alias != NULL) {
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 9060f08e70cf..5525e1c660fd 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -93,7 +93,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
93 struct list_head *child; 93 struct list_head *child;
94 struct dentry *de; 94 struct dentry *de;
95 95
96 spin_lock(&dcache_lock); 96 spin_lock(&parent->d_lock);
97 list_for_each(child, &parent->d_subdirs) 97 list_for_each(child, &parent->d_subdirs)
98 { 98 {
99 de = list_entry(child, struct dentry, d_u.d_child); 99 de = list_entry(child, struct dentry, d_u.d_child);
@@ -102,7 +102,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
102 continue; 102 continue;
103 coda_flag_inode(de->d_inode, flag); 103 coda_flag_inode(de->d_inode, flag);
104 } 104 }
105 spin_unlock(&dcache_lock); 105 spin_unlock(&parent->d_lock);
106 return; 106 return;
107} 107}
108 108
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 5d8b35539601..29badd91360f 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -18,6 +18,7 @@
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/namei.h>
21 22
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23 24
@@ -47,7 +48,7 @@ static int coda_readdir(struct file *file, void *buf, filldir_t filldir);
47 48
48/* dentry ops */ 49/* dentry ops */
49static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd); 50static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd);
50static int coda_dentry_delete(struct dentry *); 51static int coda_dentry_delete(const struct dentry *);
51 52
52/* support routines */ 53/* support routines */
53static int coda_venus_readdir(struct file *coda_file, void *buf, 54static int coda_venus_readdir(struct file *coda_file, void *buf,
@@ -125,7 +126,7 @@ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struc
125 return ERR_PTR(error); 126 return ERR_PTR(error);
126 127
127exit: 128exit:
128 entry->d_op = &coda_dentry_operations; 129 d_set_d_op(entry, &coda_dentry_operations);
129 130
130 if (inode && (type & CODA_NOCACHE)) 131 if (inode && (type & CODA_NOCACHE))
131 coda_flag_inode(inode, C_VATTR | C_PURGE); 132 coda_flag_inode(inode, C_VATTR | C_PURGE);
@@ -134,10 +135,13 @@ exit:
134} 135}
135 136
136 137
137int coda_permission(struct inode *inode, int mask) 138int coda_permission(struct inode *inode, int mask, unsigned int flags)
138{ 139{
139 int error; 140 int error;
140 141
142 if (flags & IPERM_FLAG_RCU)
143 return -ECHILD;
144
141 mask &= MAY_READ | MAY_WRITE | MAY_EXEC; 145 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
142 146
143 if (!mask) 147 if (!mask)
@@ -541,9 +545,13 @@ out:
541/* called when a cache lookup succeeds */ 545/* called when a cache lookup succeeds */
542static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd) 546static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd)
543{ 547{
544 struct inode *inode = de->d_inode; 548 struct inode *inode;
545 struct coda_inode_info *cii; 549 struct coda_inode_info *cii;
546 550
551 if (nd->flags & LOOKUP_RCU)
552 return -ECHILD;
553
554 inode = de->d_inode;
547 if (!inode || coda_isroot(inode)) 555 if (!inode || coda_isroot(inode))
548 goto out; 556 goto out;
549 if (is_bad_inode(inode)) 557 if (is_bad_inode(inode))
@@ -559,7 +567,7 @@ static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd)
559 if (cii->c_flags & C_FLUSH) 567 if (cii->c_flags & C_FLUSH)
560 coda_flag_inode_children(inode, C_FLUSH); 568 coda_flag_inode_children(inode, C_FLUSH);
561 569
562 if (atomic_read(&de->d_count) > 1) 570 if (de->d_count > 1)
563 /* pretend it's valid, but don't change the flags */ 571 /* pretend it's valid, but don't change the flags */
564 goto out; 572 goto out;
565 573
@@ -577,7 +585,7 @@ out:
577 * This is the callback from dput() when d_count is going to 0. 585 * This is the callback from dput() when d_count is going to 0.
578 * We use this to unhash dentries with bad inodes. 586 * We use this to unhash dentries with bad inodes.
579 */ 587 */
580static int coda_dentry_delete(struct dentry * dentry) 588static int coda_dentry_delete(const struct dentry * dentry)
581{ 589{
582 int flags; 590 int flags;
583 591
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 5ea57c8c7f97..50dc7d189f56 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -56,11 +56,18 @@ static struct inode *coda_alloc_inode(struct super_block *sb)
56 return &ei->vfs_inode; 56 return &ei->vfs_inode;
57} 57}
58 58
59static void coda_destroy_inode(struct inode *inode) 59static void coda_i_callback(struct rcu_head *head)
60{ 60{
61 struct inode *inode = container_of(head, struct inode, i_rcu);
62 INIT_LIST_HEAD(&inode->i_dentry);
61 kmem_cache_free(coda_inode_cachep, ITOC(inode)); 63 kmem_cache_free(coda_inode_cachep, ITOC(inode));
62} 64}
63 65
66static void coda_destroy_inode(struct inode *inode)
67{
68 call_rcu(&inode->i_rcu, coda_i_callback);
69}
70
64static void init_once(void *foo) 71static void init_once(void *foo)
65{ 72{
66 struct coda_inode_info *ei = (struct coda_inode_info *) foo; 73 struct coda_inode_info *ei = (struct coda_inode_info *) foo;
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 2fd89b5c5c7b..741f0bd03918 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -24,7 +24,7 @@
24#include <linux/coda_psdev.h> 24#include <linux/coda_psdev.h>
25 25
26/* pioctl ops */ 26/* pioctl ops */
27static int coda_ioctl_permission(struct inode *inode, int mask); 27static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags);
28static long coda_pioctl(struct file *filp, unsigned int cmd, 28static long coda_pioctl(struct file *filp, unsigned int cmd,
29 unsigned long user_data); 29 unsigned long user_data);
30 30
@@ -41,8 +41,10 @@ const struct file_operations coda_ioctl_operations = {
41}; 41};
42 42
43/* the coda pioctl inode ops */ 43/* the coda pioctl inode ops */
44static int coda_ioctl_permission(struct inode *inode, int mask) 44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags)
45{ 45{
46 if (flags & IPERM_FLAG_RCU)
47 return -ECHILD;
46 return (mask & MAY_EXEC) ? -EACCES : 0; 48 return (mask & MAY_EXEC) ? -EACCES : 0;
47} 49}
48 50
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index da6061a6df40..026cf68553a4 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -120,7 +120,7 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry
120{ 120{
121 struct config_item * item = NULL; 121 struct config_item * item = NULL;
122 122
123 spin_lock(&dcache_lock); 123 spin_lock(&dentry->d_lock);
124 if (!d_unhashed(dentry)) { 124 if (!d_unhashed(dentry)) {
125 struct configfs_dirent * sd = dentry->d_fsdata; 125 struct configfs_dirent * sd = dentry->d_fsdata;
126 if (sd->s_type & CONFIGFS_ITEM_LINK) { 126 if (sd->s_type & CONFIGFS_ITEM_LINK) {
@@ -129,7 +129,7 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry
129 } else 129 } else
130 item = config_item_get(sd->s_element); 130 item = config_item_get(sd->s_element);
131 } 131 }
132 spin_unlock(&dcache_lock); 132 spin_unlock(&dentry->d_lock);
133 133
134 return item; 134 return item;
135} 135}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 0b502f80c691..36637a8c1ed3 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -67,7 +67,7 @@ static void configfs_d_iput(struct dentry * dentry,
67 * We _must_ delete our dentries on last dput, as the chain-to-parent 67 * We _must_ delete our dentries on last dput, as the chain-to-parent
68 * behavior is required to clear the parents of default_groups. 68 * behavior is required to clear the parents of default_groups.
69 */ 69 */
70static int configfs_d_delete(struct dentry *dentry) 70static int configfs_d_delete(const struct dentry *dentry)
71{ 71{
72 return 1; 72 return 1;
73} 73}
@@ -232,10 +232,8 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd,
232 232
233 sd->s_mode = mode; 233 sd->s_mode = mode;
234 sd->s_dentry = dentry; 234 sd->s_dentry = dentry;
235 if (dentry) { 235 if (dentry)
236 dentry->d_fsdata = configfs_get(sd); 236 dentry->d_fsdata = configfs_get(sd);
237 dentry->d_op = &configfs_dentry_ops;
238 }
239 237
240 return 0; 238 return 0;
241} 239}
@@ -278,7 +276,6 @@ static int create_dir(struct config_item * k, struct dentry * p,
278 error = configfs_create(d, mode, init_dir); 276 error = configfs_create(d, mode, init_dir);
279 if (!error) { 277 if (!error) {
280 inc_nlink(p->d_inode); 278 inc_nlink(p->d_inode);
281 (d)->d_op = &configfs_dentry_ops;
282 } else { 279 } else {
283 struct configfs_dirent *sd = d->d_fsdata; 280 struct configfs_dirent *sd = d->d_fsdata;
284 if (sd) { 281 if (sd) {
@@ -371,9 +368,7 @@ int configfs_create_link(struct configfs_symlink *sl,
371 CONFIGFS_ITEM_LINK); 368 CONFIGFS_ITEM_LINK);
372 if (!err) { 369 if (!err) {
373 err = configfs_create(dentry, mode, init_symlink); 370 err = configfs_create(dentry, mode, init_symlink);
374 if (!err) 371 if (err) {
375 dentry->d_op = &configfs_dentry_ops;
376 else {
377 struct configfs_dirent *sd = dentry->d_fsdata; 372 struct configfs_dirent *sd = dentry->d_fsdata;
378 if (sd) { 373 if (sd) {
379 spin_lock(&configfs_dirent_lock); 374 spin_lock(&configfs_dirent_lock);
@@ -399,8 +394,7 @@ static void remove_dir(struct dentry * d)
399 if (d->d_inode) 394 if (d->d_inode)
400 simple_rmdir(parent->d_inode,d); 395 simple_rmdir(parent->d_inode,d);
401 396
402 pr_debug(" o %s removing done (%d)\n",d->d_name.name, 397 pr_debug(" o %s removing done (%d)\n",d->d_name.name, d->d_count);
403 atomic_read(&d->d_count));
404 398
405 dput(parent); 399 dput(parent);
406} 400}
@@ -448,7 +442,7 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
448 return error; 442 return error;
449 } 443 }
450 444
451 dentry->d_op = &configfs_dentry_ops; 445 d_set_d_op(dentry, &configfs_dentry_ops);
452 d_rehash(dentry); 446 d_rehash(dentry);
453 447
454 return 0; 448 return 0;
@@ -493,7 +487,11 @@ static struct dentry * configfs_lookup(struct inode *dir,
493 * If it doesn't exist and it isn't a NOT_PINNED item, 487 * If it doesn't exist and it isn't a NOT_PINNED item,
494 * it must be negative. 488 * it must be negative.
495 */ 489 */
496 return simple_lookup(dir, dentry, nd); 490 if (dentry->d_name.len > NAME_MAX)
491 return ERR_PTR(-ENAMETOOLONG);
492 d_set_d_op(dentry, &configfs_dentry_ops);
493 d_add(dentry, NULL);
494 return NULL;
497 } 495 }
498 496
499out: 497out:
@@ -685,6 +683,7 @@ static int create_default_group(struct config_group *parent_group,
685 ret = -ENOMEM; 683 ret = -ENOMEM;
686 child = d_alloc(parent, &name); 684 child = d_alloc(parent, &name);
687 if (child) { 685 if (child) {
686 d_set_d_op(child, &configfs_dentry_ops);
688 d_add(child, NULL); 687 d_add(child, NULL);
689 688
690 ret = configfs_attach_group(&parent_group->cg_item, 689 ret = configfs_attach_group(&parent_group->cg_item,
@@ -1682,6 +1681,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1682 err = -ENOMEM; 1681 err = -ENOMEM;
1683 dentry = d_alloc(configfs_sb->s_root, &name); 1682 dentry = d_alloc(configfs_sb->s_root, &name);
1684 if (dentry) { 1683 if (dentry) {
1684 d_set_d_op(dentry, &configfs_dentry_ops);
1685 d_add(dentry, NULL); 1685 d_add(dentry, NULL);
1686 1686
1687 err = configfs_attach_group(sd->s_element, &group->cg_item, 1687 err = configfs_attach_group(sd->s_element, &group->cg_item,
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 253476d78ed8..c83f4768eeaa 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -250,18 +250,14 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
250 struct dentry * dentry = sd->s_dentry; 250 struct dentry * dentry = sd->s_dentry;
251 251
252 if (dentry) { 252 if (dentry) {
253 spin_lock(&dcache_lock);
254 spin_lock(&dentry->d_lock); 253 spin_lock(&dentry->d_lock);
255 if (!(d_unhashed(dentry) && dentry->d_inode)) { 254 if (!(d_unhashed(dentry) && dentry->d_inode)) {
256 dget_locked(dentry); 255 dget_dlock(dentry);
257 __d_drop(dentry); 256 __d_drop(dentry);
258 spin_unlock(&dentry->d_lock); 257 spin_unlock(&dentry->d_lock);
259 spin_unlock(&dcache_lock);
260 simple_unlink(parent->d_inode, dentry); 258 simple_unlink(parent->d_inode, dentry);
261 } else { 259 } else
262 spin_unlock(&dentry->d_lock); 260 spin_unlock(&dentry->d_lock);
263 spin_unlock(&dcache_lock);
264 }
265 } 261 }
266} 262}
267 263
diff --git a/fs/dcache.c b/fs/dcache.c
index 23702a9d4e6d..5699d4c027cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -33,20 +33,58 @@
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/fs_struct.h> 34#include <linux/fs_struct.h>
35#include <linux/hardirq.h> 35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
36#include "internal.h" 38#include "internal.h"
37 39
40/*
41 * Usage:
42 * dcache->d_inode->i_lock protects:
43 * - i_dentry, d_alias, d_inode of aliases
44 * dcache_hash_bucket lock protects:
45 * - the dcache hash table
46 * s_anon bl list spinlock protects:
47 * - the s_anon list (see __d_drop)
48 * dcache_lru_lock protects:
49 * - the dcache lru lists and counters
50 * d_lock protects:
51 * - d_flags
52 * - d_name
53 * - d_lru
54 * - d_count
55 * - d_unhashed()
56 * - d_parent and d_subdirs
57 * - childrens' d_child and d_parent
58 * - d_alias, d_inode
59 *
60 * Ordering:
61 * dentry->d_inode->i_lock
62 * dentry->d_lock
63 * dcache_lru_lock
64 * dcache_hash_bucket lock
65 * s_anon lock
66 *
67 * If there is an ancestor relationship:
68 * dentry->d_parent->...->d_parent->d_lock
69 * ...
70 * dentry->d_parent->d_lock
71 * dentry->d_lock
72 *
73 * If no ancestor relationship:
74 * if (dentry1 < dentry2)
75 * dentry1->d_lock
76 * dentry2->d_lock
77 */
38int sysctl_vfs_cache_pressure __read_mostly = 100; 78int sysctl_vfs_cache_pressure __read_mostly = 100;
39EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 79EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
40 80
41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 81static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
42__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 82__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
43 83
44EXPORT_SYMBOL(dcache_lock); 84EXPORT_SYMBOL(rename_lock);
45 85
46static struct kmem_cache *dentry_cache __read_mostly; 86static struct kmem_cache *dentry_cache __read_mostly;
47 87
48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
49
50/* 88/*
51 * This is the single most critical data structure when it comes 89 * This is the single most critical data structure when it comes
52 * to the dcache: the hashtable for lookups. Somebody should try 90 * to the dcache: the hashtable for lookups. Somebody should try
@@ -60,22 +98,51 @@ static struct kmem_cache *dentry_cache __read_mostly;
60 98
61static unsigned int d_hash_mask __read_mostly; 99static unsigned int d_hash_mask __read_mostly;
62static unsigned int d_hash_shift __read_mostly; 100static unsigned int d_hash_shift __read_mostly;
63static struct hlist_head *dentry_hashtable __read_mostly; 101
102struct dcache_hash_bucket {
103 struct hlist_bl_head head;
104};
105static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
106
107static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
108 unsigned long hash)
109{
110 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
111 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
112 return dentry_hashtable + (hash & D_HASHMASK);
113}
114
115static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
116{
117 bit_spin_lock(0, (unsigned long *)&b->head.first);
118}
119
120static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
121{
122 __bit_spin_unlock(0, (unsigned long *)&b->head.first);
123}
64 124
65/* Statistics gathering. */ 125/* Statistics gathering. */
66struct dentry_stat_t dentry_stat = { 126struct dentry_stat_t dentry_stat = {
67 .age_limit = 45, 127 .age_limit = 45,
68}; 128};
69 129
70static struct percpu_counter nr_dentry __cacheline_aligned_in_smp; 130static DEFINE_PER_CPU(unsigned int, nr_dentry);
71static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
72 131
73#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 132#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
133static int get_nr_dentry(void)
134{
135 int i;
136 int sum = 0;
137 for_each_possible_cpu(i)
138 sum += per_cpu(nr_dentry, i);
139 return sum < 0 ? 0 : sum;
140}
141
74int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 142int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
75 size_t *lenp, loff_t *ppos) 143 size_t *lenp, loff_t *ppos)
76{ 144{
77 dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry); 145 dentry_stat.nr_dentry = get_nr_dentry();
78 dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
79 return proc_dointvec(table, write, buffer, lenp, ppos); 146 return proc_dointvec(table, write, buffer, lenp, ppos);
80} 147}
81#endif 148#endif
@@ -91,35 +158,50 @@ static void __d_free(struct rcu_head *head)
91} 158}
92 159
93/* 160/*
94 * no dcache_lock, please. 161 * no locks, please.
95 */ 162 */
96static void d_free(struct dentry *dentry) 163static void d_free(struct dentry *dentry)
97{ 164{
98 percpu_counter_dec(&nr_dentry); 165 BUG_ON(dentry->d_count);
166 this_cpu_dec(nr_dentry);
99 if (dentry->d_op && dentry->d_op->d_release) 167 if (dentry->d_op && dentry->d_op->d_release)
100 dentry->d_op->d_release(dentry); 168 dentry->d_op->d_release(dentry);
101 169
102 /* if dentry was never inserted into hash, immediate free is OK */ 170 /* if dentry was never inserted into hash, immediate free is OK */
103 if (hlist_unhashed(&dentry->d_hash)) 171 if (hlist_bl_unhashed(&dentry->d_hash))
104 __d_free(&dentry->d_u.d_rcu); 172 __d_free(&dentry->d_u.d_rcu);
105 else 173 else
106 call_rcu(&dentry->d_u.d_rcu, __d_free); 174 call_rcu(&dentry->d_u.d_rcu, __d_free);
107} 175}
108 176
177/**
178 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
179 * After this call, in-progress rcu-walk path lookup will fail. This
180 * should be called after unhashing, and after changing d_inode (if
181 * the dentry has not already been unhashed).
182 */
183static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
184{
185 assert_spin_locked(&dentry->d_lock);
186 /* Go through a barrier */
187 write_seqcount_barrier(&dentry->d_seq);
188}
189
109/* 190/*
110 * Release the dentry's inode, using the filesystem 191 * Release the dentry's inode, using the filesystem
111 * d_iput() operation if defined. 192 * d_iput() operation if defined. Dentry has no refcount
193 * and is unhashed.
112 */ 194 */
113static void dentry_iput(struct dentry * dentry) 195static void dentry_iput(struct dentry * dentry)
114 __releases(dentry->d_lock) 196 __releases(dentry->d_lock)
115 __releases(dcache_lock) 197 __releases(dentry->d_inode->i_lock)
116{ 198{
117 struct inode *inode = dentry->d_inode; 199 struct inode *inode = dentry->d_inode;
118 if (inode) { 200 if (inode) {
119 dentry->d_inode = NULL; 201 dentry->d_inode = NULL;
120 list_del_init(&dentry->d_alias); 202 list_del_init(&dentry->d_alias);
121 spin_unlock(&dentry->d_lock); 203 spin_unlock(&dentry->d_lock);
122 spin_unlock(&dcache_lock); 204 spin_unlock(&inode->i_lock);
123 if (!inode->i_nlink) 205 if (!inode->i_nlink)
124 fsnotify_inoderemove(inode); 206 fsnotify_inoderemove(inode);
125 if (dentry->d_op && dentry->d_op->d_iput) 207 if (dentry->d_op && dentry->d_op->d_iput)
@@ -128,40 +210,72 @@ static void dentry_iput(struct dentry * dentry)
128 iput(inode); 210 iput(inode);
129 } else { 211 } else {
130 spin_unlock(&dentry->d_lock); 212 spin_unlock(&dentry->d_lock);
131 spin_unlock(&dcache_lock);
132 } 213 }
133} 214}
134 215
135/* 216/*
136 * dentry_lru_(add|del|move_tail) must be called with dcache_lock held. 217 * Release the dentry's inode, using the filesystem
218 * d_iput() operation if defined. dentry remains in-use.
219 */
220static void dentry_unlink_inode(struct dentry * dentry)
221 __releases(dentry->d_lock)
222 __releases(dentry->d_inode->i_lock)
223{
224 struct inode *inode = dentry->d_inode;
225 dentry->d_inode = NULL;
226 list_del_init(&dentry->d_alias);
227 dentry_rcuwalk_barrier(dentry);
228 spin_unlock(&dentry->d_lock);
229 spin_unlock(&inode->i_lock);
230 if (!inode->i_nlink)
231 fsnotify_inoderemove(inode);
232 if (dentry->d_op && dentry->d_op->d_iput)
233 dentry->d_op->d_iput(dentry, inode);
234 else
235 iput(inode);
236}
237
238/*
239 * dentry_lru_(add|del|move_tail) must be called with d_lock held.
137 */ 240 */
138static void dentry_lru_add(struct dentry *dentry) 241static void dentry_lru_add(struct dentry *dentry)
139{ 242{
140 if (list_empty(&dentry->d_lru)) { 243 if (list_empty(&dentry->d_lru)) {
244 spin_lock(&dcache_lru_lock);
141 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 245 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
142 dentry->d_sb->s_nr_dentry_unused++; 246 dentry->d_sb->s_nr_dentry_unused++;
143 percpu_counter_inc(&nr_dentry_unused); 247 dentry_stat.nr_unused++;
248 spin_unlock(&dcache_lru_lock);
144 } 249 }
145} 250}
146 251
252static void __dentry_lru_del(struct dentry *dentry)
253{
254 list_del_init(&dentry->d_lru);
255 dentry->d_sb->s_nr_dentry_unused--;
256 dentry_stat.nr_unused--;
257}
258
147static void dentry_lru_del(struct dentry *dentry) 259static void dentry_lru_del(struct dentry *dentry)
148{ 260{
149 if (!list_empty(&dentry->d_lru)) { 261 if (!list_empty(&dentry->d_lru)) {
150 list_del_init(&dentry->d_lru); 262 spin_lock(&dcache_lru_lock);
151 dentry->d_sb->s_nr_dentry_unused--; 263 __dentry_lru_del(dentry);
152 percpu_counter_dec(&nr_dentry_unused); 264 spin_unlock(&dcache_lru_lock);
153 } 265 }
154} 266}
155 267
156static void dentry_lru_move_tail(struct dentry *dentry) 268static void dentry_lru_move_tail(struct dentry *dentry)
157{ 269{
270 spin_lock(&dcache_lru_lock);
158 if (list_empty(&dentry->d_lru)) { 271 if (list_empty(&dentry->d_lru)) {
159 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 272 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
160 dentry->d_sb->s_nr_dentry_unused++; 273 dentry->d_sb->s_nr_dentry_unused++;
161 percpu_counter_inc(&nr_dentry_unused); 274 dentry_stat.nr_unused++;
162 } else { 275 } else {
163 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 276 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
164 } 277 }
278 spin_unlock(&dcache_lru_lock);
165} 279}
166 280
167/** 281/**
@@ -171,22 +285,115 @@ static void dentry_lru_move_tail(struct dentry *dentry)
171 * The dentry must already be unhashed and removed from the LRU. 285 * The dentry must already be unhashed and removed from the LRU.
172 * 286 *
173 * If this is the root of the dentry tree, return NULL. 287 * If this is the root of the dentry tree, return NULL.
288 *
289 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
290 * d_kill.
174 */ 291 */
175static struct dentry *d_kill(struct dentry *dentry) 292static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
176 __releases(dentry->d_lock) 293 __releases(dentry->d_lock)
177 __releases(dcache_lock) 294 __releases(parent->d_lock)
295 __releases(dentry->d_inode->i_lock)
178{ 296{
179 struct dentry *parent; 297 dentry->d_parent = NULL;
180
181 list_del(&dentry->d_u.d_child); 298 list_del(&dentry->d_u.d_child);
182 /*drops the locks, at that point nobody can reach this dentry */ 299 if (parent)
300 spin_unlock(&parent->d_lock);
183 dentry_iput(dentry); 301 dentry_iput(dentry);
302 /*
303 * dentry_iput drops the locks, at which point nobody (except
304 * transient RCU lookups) can reach this dentry.
305 */
306 d_free(dentry);
307 return parent;
308}
309
310/**
311 * d_drop - drop a dentry
312 * @dentry: dentry to drop
313 *
314 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
315 * be found through a VFS lookup any more. Note that this is different from
316 * deleting the dentry - d_delete will try to mark the dentry negative if
317 * possible, giving a successful _negative_ lookup, while d_drop will
318 * just make the cache lookup fail.
319 *
320 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
321 * reason (NFS timeouts or autofs deletes).
322 *
323 * __d_drop requires dentry->d_lock.
324 */
325void __d_drop(struct dentry *dentry)
326{
327 if (!(dentry->d_flags & DCACHE_UNHASHED)) {
328 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
329 bit_spin_lock(0,
330 (unsigned long *)&dentry->d_sb->s_anon.first);
331 dentry->d_flags |= DCACHE_UNHASHED;
332 hlist_bl_del_init(&dentry->d_hash);
333 __bit_spin_unlock(0,
334 (unsigned long *)&dentry->d_sb->s_anon.first);
335 } else {
336 struct dcache_hash_bucket *b;
337 b = d_hash(dentry->d_parent, dentry->d_name.hash);
338 spin_lock_bucket(b);
339 /*
340 * We may not actually need to put DCACHE_UNHASHED
341 * manipulations under the hash lock, but follow
342 * the principle of least surprise.
343 */
344 dentry->d_flags |= DCACHE_UNHASHED;
345 hlist_bl_del_rcu(&dentry->d_hash);
346 spin_unlock_bucket(b);
347 dentry_rcuwalk_barrier(dentry);
348 }
349 }
350}
351EXPORT_SYMBOL(__d_drop);
352
353void d_drop(struct dentry *dentry)
354{
355 spin_lock(&dentry->d_lock);
356 __d_drop(dentry);
357 spin_unlock(&dentry->d_lock);
358}
359EXPORT_SYMBOL(d_drop);
360
361/*
362 * Finish off a dentry we've decided to kill.
363 * dentry->d_lock must be held, returns with it unlocked.
364 * If ref is non-zero, then decrement the refcount too.
365 * Returns dentry requiring refcount drop, or NULL if we're done.
366 */
367static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
368 __releases(dentry->d_lock)
369{
370 struct inode *inode;
371 struct dentry *parent;
372
373 inode = dentry->d_inode;
374 if (inode && !spin_trylock(&inode->i_lock)) {
375relock:
376 spin_unlock(&dentry->d_lock);
377 cpu_relax();
378 return dentry; /* try again with same dentry */
379 }
184 if (IS_ROOT(dentry)) 380 if (IS_ROOT(dentry))
185 parent = NULL; 381 parent = NULL;
186 else 382 else
187 parent = dentry->d_parent; 383 parent = dentry->d_parent;
188 d_free(dentry); 384 if (parent && !spin_trylock(&parent->d_lock)) {
189 return parent; 385 if (inode)
386 spin_unlock(&inode->i_lock);
387 goto relock;
388 }
389
390 if (ref)
391 dentry->d_count--;
392 /* if dentry was on the d_lru list delete it from there */
393 dentry_lru_del(dentry);
394 /* if it was on the hash then remove it */
395 __d_drop(dentry);
396 return d_kill(dentry, parent);
190} 397}
191 398
192/* 399/*
@@ -214,34 +421,26 @@ static struct dentry *d_kill(struct dentry *dentry)
214 * call the dentry unlink method as well as removing it from the queues and 421 * call the dentry unlink method as well as removing it from the queues and
215 * releasing its resources. If the parent dentries were scheduled for release 422 * releasing its resources. If the parent dentries were scheduled for release
216 * they too may now get deleted. 423 * they too may now get deleted.
217 *
218 * no dcache lock, please.
219 */ 424 */
220
221void dput(struct dentry *dentry) 425void dput(struct dentry *dentry)
222{ 426{
223 if (!dentry) 427 if (!dentry)
224 return; 428 return;
225 429
226repeat: 430repeat:
227 if (atomic_read(&dentry->d_count) == 1) 431 if (dentry->d_count == 1)
228 might_sleep(); 432 might_sleep();
229 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
230 return;
231
232 spin_lock(&dentry->d_lock); 433 spin_lock(&dentry->d_lock);
233 if (atomic_read(&dentry->d_count)) { 434 BUG_ON(!dentry->d_count);
435 if (dentry->d_count > 1) {
436 dentry->d_count--;
234 spin_unlock(&dentry->d_lock); 437 spin_unlock(&dentry->d_lock);
235 spin_unlock(&dcache_lock);
236 return; 438 return;
237 } 439 }
238 440
239 /* 441 if (dentry->d_flags & DCACHE_OP_DELETE) {
240 * AV: ->d_delete() is _NOT_ allowed to block now.
241 */
242 if (dentry->d_op && dentry->d_op->d_delete) {
243 if (dentry->d_op->d_delete(dentry)) 442 if (dentry->d_op->d_delete(dentry))
244 goto unhash_it; 443 goto kill_it;
245 } 444 }
246 445
247 /* Unreachable? Get rid of it */ 446 /* Unreachable? Get rid of it */
@@ -252,16 +451,12 @@ repeat:
252 dentry->d_flags |= DCACHE_REFERENCED; 451 dentry->d_flags |= DCACHE_REFERENCED;
253 dentry_lru_add(dentry); 452 dentry_lru_add(dentry);
254 453
255 spin_unlock(&dentry->d_lock); 454 dentry->d_count--;
256 spin_unlock(&dcache_lock); 455 spin_unlock(&dentry->d_lock);
257 return; 456 return;
258 457
259unhash_it:
260 __d_drop(dentry);
261kill_it: 458kill_it:
262 /* if dentry was on the d_lru list delete it from there */ 459 dentry = dentry_kill(dentry, 1);
263 dentry_lru_del(dentry);
264 dentry = d_kill(dentry);
265 if (dentry) 460 if (dentry)
266 goto repeat; 461 goto repeat;
267} 462}
@@ -284,9 +479,9 @@ int d_invalidate(struct dentry * dentry)
284 /* 479 /*
285 * If it's already been dropped, return OK. 480 * If it's already been dropped, return OK.
286 */ 481 */
287 spin_lock(&dcache_lock); 482 spin_lock(&dentry->d_lock);
288 if (d_unhashed(dentry)) { 483 if (d_unhashed(dentry)) {
289 spin_unlock(&dcache_lock); 484 spin_unlock(&dentry->d_lock);
290 return 0; 485 return 0;
291 } 486 }
292 /* 487 /*
@@ -294,9 +489,9 @@ int d_invalidate(struct dentry * dentry)
294 * to get rid of unused child entries. 489 * to get rid of unused child entries.
295 */ 490 */
296 if (!list_empty(&dentry->d_subdirs)) { 491 if (!list_empty(&dentry->d_subdirs)) {
297 spin_unlock(&dcache_lock); 492 spin_unlock(&dentry->d_lock);
298 shrink_dcache_parent(dentry); 493 shrink_dcache_parent(dentry);
299 spin_lock(&dcache_lock); 494 spin_lock(&dentry->d_lock);
300 } 495 }
301 496
302 /* 497 /*
@@ -309,35 +504,61 @@ int d_invalidate(struct dentry * dentry)
309 * we might still populate it if it was a 504 * we might still populate it if it was a
310 * working directory or similar). 505 * working directory or similar).
311 */ 506 */
312 spin_lock(&dentry->d_lock); 507 if (dentry->d_count > 1) {
313 if (atomic_read(&dentry->d_count) > 1) {
314 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { 508 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
315 spin_unlock(&dentry->d_lock); 509 spin_unlock(&dentry->d_lock);
316 spin_unlock(&dcache_lock);
317 return -EBUSY; 510 return -EBUSY;
318 } 511 }
319 } 512 }
320 513
321 __d_drop(dentry); 514 __d_drop(dentry);
322 spin_unlock(&dentry->d_lock); 515 spin_unlock(&dentry->d_lock);
323 spin_unlock(&dcache_lock);
324 return 0; 516 return 0;
325} 517}
326EXPORT_SYMBOL(d_invalidate); 518EXPORT_SYMBOL(d_invalidate);
327 519
328/* This should be called _only_ with dcache_lock held */ 520/* This must be called with d_lock held */
329static inline struct dentry * __dget_locked(struct dentry *dentry) 521static inline void __dget_dlock(struct dentry *dentry)
330{ 522{
331 atomic_inc(&dentry->d_count); 523 dentry->d_count++;
332 dentry_lru_del(dentry);
333 return dentry;
334} 524}
335 525
336struct dentry * dget_locked(struct dentry *dentry) 526static inline void __dget(struct dentry *dentry)
337{ 527{
338 return __dget_locked(dentry); 528 spin_lock(&dentry->d_lock);
529 __dget_dlock(dentry);
530 spin_unlock(&dentry->d_lock);
531}
532
533struct dentry *dget_parent(struct dentry *dentry)
534{
535 struct dentry *ret;
536
537repeat:
538 /*
539 * Don't need rcu_dereference because we re-check it was correct under
540 * the lock.
541 */
542 rcu_read_lock();
543 ret = dentry->d_parent;
544 if (!ret) {
545 rcu_read_unlock();
546 goto out;
547 }
548 spin_lock(&ret->d_lock);
549 if (unlikely(ret != dentry->d_parent)) {
550 spin_unlock(&ret->d_lock);
551 rcu_read_unlock();
552 goto repeat;
553 }
554 rcu_read_unlock();
555 BUG_ON(!ret->d_count);
556 ret->d_count++;
557 spin_unlock(&ret->d_lock);
558out:
559 return ret;
339} 560}
340EXPORT_SYMBOL(dget_locked); 561EXPORT_SYMBOL(dget_parent);
341 562
342/** 563/**
343 * d_find_alias - grab a hashed alias of inode 564 * d_find_alias - grab a hashed alias of inode
@@ -355,42 +576,51 @@ EXPORT_SYMBOL(dget_locked);
355 * any other hashed alias over that one unless @want_discon is set, 576 * any other hashed alias over that one unless @want_discon is set,
356 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 577 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
357 */ 578 */
358 579static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
359static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
360{ 580{
361 struct list_head *head, *next, *tmp; 581 struct dentry *alias, *discon_alias;
362 struct dentry *alias, *discon_alias=NULL;
363 582
364 head = &inode->i_dentry; 583again:
365 next = inode->i_dentry.next; 584 discon_alias = NULL;
366 while (next != head) { 585 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
367 tmp = next; 586 spin_lock(&alias->d_lock);
368 next = tmp->next;
369 prefetch(next);
370 alias = list_entry(tmp, struct dentry, d_alias);
371 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 587 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
372 if (IS_ROOT(alias) && 588 if (IS_ROOT(alias) &&
373 (alias->d_flags & DCACHE_DISCONNECTED)) 589 (alias->d_flags & DCACHE_DISCONNECTED)) {
374 discon_alias = alias; 590 discon_alias = alias;
375 else if (!want_discon) { 591 } else if (!want_discon) {
376 __dget_locked(alias); 592 __dget_dlock(alias);
593 spin_unlock(&alias->d_lock);
594 return alias;
595 }
596 }
597 spin_unlock(&alias->d_lock);
598 }
599 if (discon_alias) {
600 alias = discon_alias;
601 spin_lock(&alias->d_lock);
602 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
603 if (IS_ROOT(alias) &&
604 (alias->d_flags & DCACHE_DISCONNECTED)) {
605 __dget_dlock(alias);
606 spin_unlock(&alias->d_lock);
377 return alias; 607 return alias;
378 } 608 }
379 } 609 }
610 spin_unlock(&alias->d_lock);
611 goto again;
380 } 612 }
381 if (discon_alias) 613 return NULL;
382 __dget_locked(discon_alias);
383 return discon_alias;
384} 614}
385 615
386struct dentry * d_find_alias(struct inode *inode) 616struct dentry *d_find_alias(struct inode *inode)
387{ 617{
388 struct dentry *de = NULL; 618 struct dentry *de = NULL;
389 619
390 if (!list_empty(&inode->i_dentry)) { 620 if (!list_empty(&inode->i_dentry)) {
391 spin_lock(&dcache_lock); 621 spin_lock(&inode->i_lock);
392 de = __d_find_alias(inode, 0); 622 de = __d_find_alias(inode, 0);
393 spin_unlock(&dcache_lock); 623 spin_unlock(&inode->i_lock);
394 } 624 }
395 return de; 625 return de;
396} 626}
@@ -404,54 +634,61 @@ void d_prune_aliases(struct inode *inode)
404{ 634{
405 struct dentry *dentry; 635 struct dentry *dentry;
406restart: 636restart:
407 spin_lock(&dcache_lock); 637 spin_lock(&inode->i_lock);
408 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 638 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
409 spin_lock(&dentry->d_lock); 639 spin_lock(&dentry->d_lock);
410 if (!atomic_read(&dentry->d_count)) { 640 if (!dentry->d_count) {
411 __dget_locked(dentry); 641 __dget_dlock(dentry);
412 __d_drop(dentry); 642 __d_drop(dentry);
413 spin_unlock(&dentry->d_lock); 643 spin_unlock(&dentry->d_lock);
414 spin_unlock(&dcache_lock); 644 spin_unlock(&inode->i_lock);
415 dput(dentry); 645 dput(dentry);
416 goto restart; 646 goto restart;
417 } 647 }
418 spin_unlock(&dentry->d_lock); 648 spin_unlock(&dentry->d_lock);
419 } 649 }
420 spin_unlock(&dcache_lock); 650 spin_unlock(&inode->i_lock);
421} 651}
422EXPORT_SYMBOL(d_prune_aliases); 652EXPORT_SYMBOL(d_prune_aliases);
423 653
424/* 654/*
425 * Throw away a dentry - free the inode, dput the parent. This requires that 655 * Try to throw away a dentry - free the inode, dput the parent.
426 * the LRU list has already been removed. 656 * Requires dentry->d_lock is held, and dentry->d_count == 0.
657 * Releases dentry->d_lock.
427 * 658 *
428 * Try to prune ancestors as well. This is necessary to prevent 659 * This may fail if locks cannot be acquired no problem, just try again.
429 * quadratic behavior of shrink_dcache_parent(), but is also expected
430 * to be beneficial in reducing dentry cache fragmentation.
431 */ 660 */
432static void prune_one_dentry(struct dentry * dentry) 661static void try_prune_one_dentry(struct dentry *dentry)
433 __releases(dentry->d_lock) 662 __releases(dentry->d_lock)
434 __releases(dcache_lock)
435 __acquires(dcache_lock)
436{ 663{
437 __d_drop(dentry); 664 struct dentry *parent;
438 dentry = d_kill(dentry);
439 665
666 parent = dentry_kill(dentry, 0);
440 /* 667 /*
441 * Prune ancestors. Locking is simpler than in dput(), 668 * If dentry_kill returns NULL, we have nothing more to do.
442 * because dcache_lock needs to be taken anyway. 669 * if it returns the same dentry, trylocks failed. In either
670 * case, just loop again.
671 *
672 * Otherwise, we need to prune ancestors too. This is necessary
673 * to prevent quadratic behavior of shrink_dcache_parent(), but
674 * is also expected to be beneficial in reducing dentry cache
675 * fragmentation.
443 */ 676 */
444 spin_lock(&dcache_lock); 677 if (!parent)
678 return;
679 if (parent == dentry)
680 return;
681
682 /* Prune ancestors. */
683 dentry = parent;
445 while (dentry) { 684 while (dentry) {
446 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) 685 spin_lock(&dentry->d_lock);
686 if (dentry->d_count > 1) {
687 dentry->d_count--;
688 spin_unlock(&dentry->d_lock);
447 return; 689 return;
448 690 }
449 if (dentry->d_op && dentry->d_op->d_delete) 691 dentry = dentry_kill(dentry, 1);
450 dentry->d_op->d_delete(dentry);
451 dentry_lru_del(dentry);
452 __d_drop(dentry);
453 dentry = d_kill(dentry);
454 spin_lock(&dcache_lock);
455 } 692 }
456} 693}
457 694
@@ -459,24 +696,35 @@ static void shrink_dentry_list(struct list_head *list)
459{ 696{
460 struct dentry *dentry; 697 struct dentry *dentry;
461 698
462 while (!list_empty(list)) { 699 rcu_read_lock();
463 dentry = list_entry(list->prev, struct dentry, d_lru); 700 for (;;) {
464 dentry_lru_del(dentry); 701 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
702 if (&dentry->d_lru == list)
703 break; /* empty */
704 spin_lock(&dentry->d_lock);
705 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
706 spin_unlock(&dentry->d_lock);
707 continue;
708 }
465 709
466 /* 710 /*
467 * We found an inuse dentry which was not removed from 711 * We found an inuse dentry which was not removed from
468 * the LRU because of laziness during lookup. Do not free 712 * the LRU because of laziness during lookup. Do not free
469 * it - just keep it off the LRU list. 713 * it - just keep it off the LRU list.
470 */ 714 */
471 spin_lock(&dentry->d_lock); 715 if (dentry->d_count) {
472 if (atomic_read(&dentry->d_count)) { 716 dentry_lru_del(dentry);
473 spin_unlock(&dentry->d_lock); 717 spin_unlock(&dentry->d_lock);
474 continue; 718 continue;
475 } 719 }
476 prune_one_dentry(dentry); 720
477 /* dentry->d_lock was dropped in prune_one_dentry() */ 721 rcu_read_unlock();
478 cond_resched_lock(&dcache_lock); 722
723 try_prune_one_dentry(dentry);
724
725 rcu_read_lock();
479 } 726 }
727 rcu_read_unlock();
480} 728}
481 729
482/** 730/**
@@ -495,42 +743,44 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
495 LIST_HEAD(tmp); 743 LIST_HEAD(tmp);
496 int cnt = *count; 744 int cnt = *count;
497 745
498 spin_lock(&dcache_lock); 746relock:
747 spin_lock(&dcache_lru_lock);
499 while (!list_empty(&sb->s_dentry_lru)) { 748 while (!list_empty(&sb->s_dentry_lru)) {
500 dentry = list_entry(sb->s_dentry_lru.prev, 749 dentry = list_entry(sb->s_dentry_lru.prev,
501 struct dentry, d_lru); 750 struct dentry, d_lru);
502 BUG_ON(dentry->d_sb != sb); 751 BUG_ON(dentry->d_sb != sb);
503 752
753 if (!spin_trylock(&dentry->d_lock)) {
754 spin_unlock(&dcache_lru_lock);
755 cpu_relax();
756 goto relock;
757 }
758
504 /* 759 /*
505 * If we are honouring the DCACHE_REFERENCED flag and the 760 * If we are honouring the DCACHE_REFERENCED flag and the
506 * dentry has this flag set, don't free it. Clear the flag 761 * dentry has this flag set, don't free it. Clear the flag
507 * and put it back on the LRU. 762 * and put it back on the LRU.
508 */ 763 */
509 if (flags & DCACHE_REFERENCED) { 764 if (flags & DCACHE_REFERENCED &&
510 spin_lock(&dentry->d_lock); 765 dentry->d_flags & DCACHE_REFERENCED) {
511 if (dentry->d_flags & DCACHE_REFERENCED) { 766 dentry->d_flags &= ~DCACHE_REFERENCED;
512 dentry->d_flags &= ~DCACHE_REFERENCED; 767 list_move(&dentry->d_lru, &referenced);
513 list_move(&dentry->d_lru, &referenced);
514 spin_unlock(&dentry->d_lock);
515 cond_resched_lock(&dcache_lock);
516 continue;
517 }
518 spin_unlock(&dentry->d_lock); 768 spin_unlock(&dentry->d_lock);
769 } else {
770 list_move_tail(&dentry->d_lru, &tmp);
771 spin_unlock(&dentry->d_lock);
772 if (!--cnt)
773 break;
519 } 774 }
520 775 cond_resched_lock(&dcache_lru_lock);
521 list_move_tail(&dentry->d_lru, &tmp);
522 if (!--cnt)
523 break;
524 cond_resched_lock(&dcache_lock);
525 } 776 }
526
527 *count = cnt;
528 shrink_dentry_list(&tmp);
529
530 if (!list_empty(&referenced)) 777 if (!list_empty(&referenced))
531 list_splice(&referenced, &sb->s_dentry_lru); 778 list_splice(&referenced, &sb->s_dentry_lru);
532 spin_unlock(&dcache_lock); 779 spin_unlock(&dcache_lru_lock);
533 780
781 shrink_dentry_list(&tmp);
782
783 *count = cnt;
534} 784}
535 785
536/** 786/**
@@ -546,13 +796,12 @@ static void prune_dcache(int count)
546{ 796{
547 struct super_block *sb, *p = NULL; 797 struct super_block *sb, *p = NULL;
548 int w_count; 798 int w_count;
549 int unused = percpu_counter_sum_positive(&nr_dentry_unused); 799 int unused = dentry_stat.nr_unused;
550 int prune_ratio; 800 int prune_ratio;
551 int pruned; 801 int pruned;
552 802
553 if (unused == 0 || count == 0) 803 if (unused == 0 || count == 0)
554 return; 804 return;
555 spin_lock(&dcache_lock);
556 if (count >= unused) 805 if (count >= unused)
557 prune_ratio = 1; 806 prune_ratio = 1;
558 else 807 else
@@ -589,11 +838,9 @@ static void prune_dcache(int count)
589 if (down_read_trylock(&sb->s_umount)) { 838 if (down_read_trylock(&sb->s_umount)) {
590 if ((sb->s_root != NULL) && 839 if ((sb->s_root != NULL) &&
591 (!list_empty(&sb->s_dentry_lru))) { 840 (!list_empty(&sb->s_dentry_lru))) {
592 spin_unlock(&dcache_lock);
593 __shrink_dcache_sb(sb, &w_count, 841 __shrink_dcache_sb(sb, &w_count,
594 DCACHE_REFERENCED); 842 DCACHE_REFERENCED);
595 pruned -= w_count; 843 pruned -= w_count;
596 spin_lock(&dcache_lock);
597 } 844 }
598 up_read(&sb->s_umount); 845 up_read(&sb->s_umount);
599 } 846 }
@@ -609,7 +856,6 @@ static void prune_dcache(int count)
609 if (p) 856 if (p)
610 __put_super(p); 857 __put_super(p);
611 spin_unlock(&sb_lock); 858 spin_unlock(&sb_lock);
612 spin_unlock(&dcache_lock);
613} 859}
614 860
615/** 861/**
@@ -623,12 +869,14 @@ void shrink_dcache_sb(struct super_block *sb)
623{ 869{
624 LIST_HEAD(tmp); 870 LIST_HEAD(tmp);
625 871
626 spin_lock(&dcache_lock); 872 spin_lock(&dcache_lru_lock);
627 while (!list_empty(&sb->s_dentry_lru)) { 873 while (!list_empty(&sb->s_dentry_lru)) {
628 list_splice_init(&sb->s_dentry_lru, &tmp); 874 list_splice_init(&sb->s_dentry_lru, &tmp);
875 spin_unlock(&dcache_lru_lock);
629 shrink_dentry_list(&tmp); 876 shrink_dentry_list(&tmp);
877 spin_lock(&dcache_lru_lock);
630 } 878 }
631 spin_unlock(&dcache_lock); 879 spin_unlock(&dcache_lru_lock);
632} 880}
633EXPORT_SYMBOL(shrink_dcache_sb); 881EXPORT_SYMBOL(shrink_dcache_sb);
634 882
@@ -645,10 +893,10 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
645 BUG_ON(!IS_ROOT(dentry)); 893 BUG_ON(!IS_ROOT(dentry));
646 894
647 /* detach this root from the system */ 895 /* detach this root from the system */
648 spin_lock(&dcache_lock); 896 spin_lock(&dentry->d_lock);
649 dentry_lru_del(dentry); 897 dentry_lru_del(dentry);
650 __d_drop(dentry); 898 __d_drop(dentry);
651 spin_unlock(&dcache_lock); 899 spin_unlock(&dentry->d_lock);
652 900
653 for (;;) { 901 for (;;) {
654 /* descend to the first leaf in the current subtree */ 902 /* descend to the first leaf in the current subtree */
@@ -657,14 +905,16 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
657 905
658 /* this is a branch with children - detach all of them 906 /* this is a branch with children - detach all of them
659 * from the system in one go */ 907 * from the system in one go */
660 spin_lock(&dcache_lock); 908 spin_lock(&dentry->d_lock);
661 list_for_each_entry(loop, &dentry->d_subdirs, 909 list_for_each_entry(loop, &dentry->d_subdirs,
662 d_u.d_child) { 910 d_u.d_child) {
911 spin_lock_nested(&loop->d_lock,
912 DENTRY_D_LOCK_NESTED);
663 dentry_lru_del(loop); 913 dentry_lru_del(loop);
664 __d_drop(loop); 914 __d_drop(loop);
665 cond_resched_lock(&dcache_lock); 915 spin_unlock(&loop->d_lock);
666 } 916 }
667 spin_unlock(&dcache_lock); 917 spin_unlock(&dentry->d_lock);
668 918
669 /* move to the first child */ 919 /* move to the first child */
670 dentry = list_entry(dentry->d_subdirs.next, 920 dentry = list_entry(dentry->d_subdirs.next,
@@ -676,7 +926,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
676 do { 926 do {
677 struct inode *inode; 927 struct inode *inode;
678 928
679 if (atomic_read(&dentry->d_count) != 0) { 929 if (dentry->d_count != 0) {
680 printk(KERN_ERR 930 printk(KERN_ERR
681 "BUG: Dentry %p{i=%lx,n=%s}" 931 "BUG: Dentry %p{i=%lx,n=%s}"
682 " still in use (%d)" 932 " still in use (%d)"
@@ -685,20 +935,23 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
685 dentry->d_inode ? 935 dentry->d_inode ?
686 dentry->d_inode->i_ino : 0UL, 936 dentry->d_inode->i_ino : 0UL,
687 dentry->d_name.name, 937 dentry->d_name.name,
688 atomic_read(&dentry->d_count), 938 dentry->d_count,
689 dentry->d_sb->s_type->name, 939 dentry->d_sb->s_type->name,
690 dentry->d_sb->s_id); 940 dentry->d_sb->s_id);
691 BUG(); 941 BUG();
692 } 942 }
693 943
694 if (IS_ROOT(dentry)) 944 if (IS_ROOT(dentry)) {
695 parent = NULL; 945 parent = NULL;
696 else { 946 list_del(&dentry->d_u.d_child);
947 } else {
697 parent = dentry->d_parent; 948 parent = dentry->d_parent;
698 atomic_dec(&parent->d_count); 949 spin_lock(&parent->d_lock);
950 parent->d_count--;
951 list_del(&dentry->d_u.d_child);
952 spin_unlock(&parent->d_lock);
699 } 953 }
700 954
701 list_del(&dentry->d_u.d_child);
702 detached++; 955 detached++;
703 956
704 inode = dentry->d_inode; 957 inode = dentry->d_inode;
@@ -728,8 +981,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
728 981
729/* 982/*
730 * destroy the dentries attached to a superblock on unmounting 983 * destroy the dentries attached to a superblock on unmounting
731 * - we don't need to use dentry->d_lock, and only need dcache_lock when 984 * - we don't need to use dentry->d_lock because:
732 * removing the dentry from the system lists and hashes because:
733 * - the superblock is detached from all mountings and open files, so the 985 * - the superblock is detached from all mountings and open files, so the
734 * dentry trees will not be rearranged by the VFS 986 * dentry trees will not be rearranged by the VFS
735 * - s_umount is write-locked, so the memory pressure shrinker will ignore 987 * - s_umount is write-locked, so the memory pressure shrinker will ignore
@@ -746,11 +998,13 @@ void shrink_dcache_for_umount(struct super_block *sb)
746 998
747 dentry = sb->s_root; 999 dentry = sb->s_root;
748 sb->s_root = NULL; 1000 sb->s_root = NULL;
749 atomic_dec(&dentry->d_count); 1001 spin_lock(&dentry->d_lock);
1002 dentry->d_count--;
1003 spin_unlock(&dentry->d_lock);
750 shrink_dcache_for_umount_subtree(dentry); 1004 shrink_dcache_for_umount_subtree(dentry);
751 1005
752 while (!hlist_empty(&sb->s_anon)) { 1006 while (!hlist_bl_empty(&sb->s_anon)) {
753 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); 1007 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
754 shrink_dcache_for_umount_subtree(dentry); 1008 shrink_dcache_for_umount_subtree(dentry);
755 } 1009 }
756} 1010}
@@ -768,15 +1022,20 @@ void shrink_dcache_for_umount(struct super_block *sb)
768 * Return true if the parent or its subdirectories contain 1022 * Return true if the parent or its subdirectories contain
769 * a mount point 1023 * a mount point
770 */ 1024 */
771
772int have_submounts(struct dentry *parent) 1025int have_submounts(struct dentry *parent)
773{ 1026{
774 struct dentry *this_parent = parent; 1027 struct dentry *this_parent;
775 struct list_head *next; 1028 struct list_head *next;
1029 unsigned seq;
1030 int locked = 0;
1031
1032 seq = read_seqbegin(&rename_lock);
1033again:
1034 this_parent = parent;
776 1035
777 spin_lock(&dcache_lock);
778 if (d_mountpoint(parent)) 1036 if (d_mountpoint(parent))
779 goto positive; 1037 goto positive;
1038 spin_lock(&this_parent->d_lock);
780repeat: 1039repeat:
781 next = this_parent->d_subdirs.next; 1040 next = this_parent->d_subdirs.next;
782resume: 1041resume:
@@ -784,27 +1043,65 @@ resume:
784 struct list_head *tmp = next; 1043 struct list_head *tmp = next;
785 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1044 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
786 next = tmp->next; 1045 next = tmp->next;
1046
1047 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
787 /* Have we found a mount point ? */ 1048 /* Have we found a mount point ? */
788 if (d_mountpoint(dentry)) 1049 if (d_mountpoint(dentry)) {
1050 spin_unlock(&dentry->d_lock);
1051 spin_unlock(&this_parent->d_lock);
789 goto positive; 1052 goto positive;
1053 }
790 if (!list_empty(&dentry->d_subdirs)) { 1054 if (!list_empty(&dentry->d_subdirs)) {
1055 spin_unlock(&this_parent->d_lock);
1056 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
791 this_parent = dentry; 1057 this_parent = dentry;
1058 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
792 goto repeat; 1059 goto repeat;
793 } 1060 }
1061 spin_unlock(&dentry->d_lock);
794 } 1062 }
795 /* 1063 /*
796 * All done at this level ... ascend and resume the search. 1064 * All done at this level ... ascend and resume the search.
797 */ 1065 */
798 if (this_parent != parent) { 1066 if (this_parent != parent) {
799 next = this_parent->d_u.d_child.next; 1067 struct dentry *tmp;
800 this_parent = this_parent->d_parent; 1068 struct dentry *child;
1069
1070 tmp = this_parent->d_parent;
1071 rcu_read_lock();
1072 spin_unlock(&this_parent->d_lock);
1073 child = this_parent;
1074 this_parent = tmp;
1075 spin_lock(&this_parent->d_lock);
1076 /* might go back up the wrong parent if we have had a rename
1077 * or deletion */
1078 if (this_parent != child->d_parent ||
1079 (!locked && read_seqretry(&rename_lock, seq))) {
1080 spin_unlock(&this_parent->d_lock);
1081 rcu_read_unlock();
1082 goto rename_retry;
1083 }
1084 rcu_read_unlock();
1085 next = child->d_u.d_child.next;
801 goto resume; 1086 goto resume;
802 } 1087 }
803 spin_unlock(&dcache_lock); 1088 spin_unlock(&this_parent->d_lock);
1089 if (!locked && read_seqretry(&rename_lock, seq))
1090 goto rename_retry;
1091 if (locked)
1092 write_sequnlock(&rename_lock);
804 return 0; /* No mount points found in tree */ 1093 return 0; /* No mount points found in tree */
805positive: 1094positive:
806 spin_unlock(&dcache_lock); 1095 if (!locked && read_seqretry(&rename_lock, seq))
1096 goto rename_retry;
1097 if (locked)
1098 write_sequnlock(&rename_lock);
807 return 1; 1099 return 1;
1100
1101rename_retry:
1102 locked = 1;
1103 write_seqlock(&rename_lock);
1104 goto again;
808} 1105}
809EXPORT_SYMBOL(have_submounts); 1106EXPORT_SYMBOL(have_submounts);
810 1107
@@ -824,11 +1121,16 @@ EXPORT_SYMBOL(have_submounts);
824 */ 1121 */
825static int select_parent(struct dentry * parent) 1122static int select_parent(struct dentry * parent)
826{ 1123{
827 struct dentry *this_parent = parent; 1124 struct dentry *this_parent;
828 struct list_head *next; 1125 struct list_head *next;
1126 unsigned seq;
829 int found = 0; 1127 int found = 0;
1128 int locked = 0;
830 1129
831 spin_lock(&dcache_lock); 1130 seq = read_seqbegin(&rename_lock);
1131again:
1132 this_parent = parent;
1133 spin_lock(&this_parent->d_lock);
832repeat: 1134repeat:
833 next = this_parent->d_subdirs.next; 1135 next = this_parent->d_subdirs.next;
834resume: 1136resume:
@@ -837,11 +1139,13 @@ resume:
837 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1139 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
838 next = tmp->next; 1140 next = tmp->next;
839 1141
1142 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1143
840 /* 1144 /*
841 * move only zero ref count dentries to the end 1145 * move only zero ref count dentries to the end
842 * of the unused list for prune_dcache 1146 * of the unused list for prune_dcache
843 */ 1147 */
844 if (!atomic_read(&dentry->d_count)) { 1148 if (!dentry->d_count) {
845 dentry_lru_move_tail(dentry); 1149 dentry_lru_move_tail(dentry);
846 found++; 1150 found++;
847 } else { 1151 } else {
@@ -853,28 +1157,63 @@ resume:
853 * ensures forward progress). We'll be coming back to find 1157 * ensures forward progress). We'll be coming back to find
854 * the rest. 1158 * the rest.
855 */ 1159 */
856 if (found && need_resched()) 1160 if (found && need_resched()) {
1161 spin_unlock(&dentry->d_lock);
857 goto out; 1162 goto out;
1163 }
858 1164
859 /* 1165 /*
860 * Descend a level if the d_subdirs list is non-empty. 1166 * Descend a level if the d_subdirs list is non-empty.
861 */ 1167 */
862 if (!list_empty(&dentry->d_subdirs)) { 1168 if (!list_empty(&dentry->d_subdirs)) {
1169 spin_unlock(&this_parent->d_lock);
1170 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
863 this_parent = dentry; 1171 this_parent = dentry;
1172 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
864 goto repeat; 1173 goto repeat;
865 } 1174 }
1175
1176 spin_unlock(&dentry->d_lock);
866 } 1177 }
867 /* 1178 /*
868 * All done at this level ... ascend and resume the search. 1179 * All done at this level ... ascend and resume the search.
869 */ 1180 */
870 if (this_parent != parent) { 1181 if (this_parent != parent) {
871 next = this_parent->d_u.d_child.next; 1182 struct dentry *tmp;
872 this_parent = this_parent->d_parent; 1183 struct dentry *child;
1184
1185 tmp = this_parent->d_parent;
1186 rcu_read_lock();
1187 spin_unlock(&this_parent->d_lock);
1188 child = this_parent;
1189 this_parent = tmp;
1190 spin_lock(&this_parent->d_lock);
1191 /* might go back up the wrong parent if we have had a rename
1192 * or deletion */
1193 if (this_parent != child->d_parent ||
1194 (!locked && read_seqretry(&rename_lock, seq))) {
1195 spin_unlock(&this_parent->d_lock);
1196 rcu_read_unlock();
1197 goto rename_retry;
1198 }
1199 rcu_read_unlock();
1200 next = child->d_u.d_child.next;
873 goto resume; 1201 goto resume;
874 } 1202 }
875out: 1203out:
876 spin_unlock(&dcache_lock); 1204 spin_unlock(&this_parent->d_lock);
1205 if (!locked && read_seqretry(&rename_lock, seq))
1206 goto rename_retry;
1207 if (locked)
1208 write_sequnlock(&rename_lock);
877 return found; 1209 return found;
1210
1211rename_retry:
1212 if (found)
1213 return found;
1214 locked = 1;
1215 write_seqlock(&rename_lock);
1216 goto again;
878} 1217}
879 1218
880/** 1219/**
@@ -908,16 +1247,13 @@ EXPORT_SYMBOL(shrink_dcache_parent);
908 */ 1247 */
909static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1248static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
910{ 1249{
911 int nr_unused;
912
913 if (nr) { 1250 if (nr) {
914 if (!(gfp_mask & __GFP_FS)) 1251 if (!(gfp_mask & __GFP_FS))
915 return -1; 1252 return -1;
916 prune_dcache(nr); 1253 prune_dcache(nr);
917 } 1254 }
918 1255
919 nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); 1256 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
920 return (nr_unused / 100) * sysctl_vfs_cache_pressure;
921} 1257}
922 1258
923static struct shrinker dcache_shrinker = { 1259static struct shrinker dcache_shrinker = {
@@ -960,38 +1296,52 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
960 memcpy(dname, name->name, name->len); 1296 memcpy(dname, name->name, name->len);
961 dname[name->len] = 0; 1297 dname[name->len] = 0;
962 1298
963 atomic_set(&dentry->d_count, 1); 1299 dentry->d_count = 1;
964 dentry->d_flags = DCACHE_UNHASHED; 1300 dentry->d_flags = DCACHE_UNHASHED;
965 spin_lock_init(&dentry->d_lock); 1301 spin_lock_init(&dentry->d_lock);
1302 seqcount_init(&dentry->d_seq);
966 dentry->d_inode = NULL; 1303 dentry->d_inode = NULL;
967 dentry->d_parent = NULL; 1304 dentry->d_parent = NULL;
968 dentry->d_sb = NULL; 1305 dentry->d_sb = NULL;
969 dentry->d_op = NULL; 1306 dentry->d_op = NULL;
970 dentry->d_fsdata = NULL; 1307 dentry->d_fsdata = NULL;
971 dentry->d_mounted = 0; 1308 INIT_HLIST_BL_NODE(&dentry->d_hash);
972 INIT_HLIST_NODE(&dentry->d_hash);
973 INIT_LIST_HEAD(&dentry->d_lru); 1309 INIT_LIST_HEAD(&dentry->d_lru);
974 INIT_LIST_HEAD(&dentry->d_subdirs); 1310 INIT_LIST_HEAD(&dentry->d_subdirs);
975 INIT_LIST_HEAD(&dentry->d_alias); 1311 INIT_LIST_HEAD(&dentry->d_alias);
1312 INIT_LIST_HEAD(&dentry->d_u.d_child);
976 1313
977 if (parent) { 1314 if (parent) {
978 dentry->d_parent = dget(parent); 1315 spin_lock(&parent->d_lock);
1316 /*
1317 * don't need child lock because it is not subject
1318 * to concurrency here
1319 */
1320 __dget_dlock(parent);
1321 dentry->d_parent = parent;
979 dentry->d_sb = parent->d_sb; 1322 dentry->d_sb = parent->d_sb;
980 } else {
981 INIT_LIST_HEAD(&dentry->d_u.d_child);
982 }
983
984 spin_lock(&dcache_lock);
985 if (parent)
986 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1323 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
987 spin_unlock(&dcache_lock); 1324 spin_unlock(&parent->d_lock);
1325 }
988 1326
989 percpu_counter_inc(&nr_dentry); 1327 this_cpu_inc(nr_dentry);
990 1328
991 return dentry; 1329 return dentry;
992} 1330}
993EXPORT_SYMBOL(d_alloc); 1331EXPORT_SYMBOL(d_alloc);
994 1332
1333struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1334{
1335 struct dentry *dentry = d_alloc(NULL, name);
1336 if (dentry) {
1337 dentry->d_sb = sb;
1338 dentry->d_parent = dentry;
1339 dentry->d_flags |= DCACHE_DISCONNECTED;
1340 }
1341 return dentry;
1342}
1343EXPORT_SYMBOL(d_alloc_pseudo);
1344
995struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1345struct dentry *d_alloc_name(struct dentry *parent, const char *name)
996{ 1346{
997 struct qstr q; 1347 struct qstr q;
@@ -1003,12 +1353,36 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1003} 1353}
1004EXPORT_SYMBOL(d_alloc_name); 1354EXPORT_SYMBOL(d_alloc_name);
1005 1355
1006/* the caller must hold dcache_lock */ 1356void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1357{
1358 BUG_ON(dentry->d_op);
1359 BUG_ON(dentry->d_flags & (DCACHE_OP_HASH |
1360 DCACHE_OP_COMPARE |
1361 DCACHE_OP_REVALIDATE |
1362 DCACHE_OP_DELETE ));
1363 dentry->d_op = op;
1364 if (!op)
1365 return;
1366 if (op->d_hash)
1367 dentry->d_flags |= DCACHE_OP_HASH;
1368 if (op->d_compare)
1369 dentry->d_flags |= DCACHE_OP_COMPARE;
1370 if (op->d_revalidate)
1371 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1372 if (op->d_delete)
1373 dentry->d_flags |= DCACHE_OP_DELETE;
1374
1375}
1376EXPORT_SYMBOL(d_set_d_op);
1377
1007static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1378static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1008{ 1379{
1380 spin_lock(&dentry->d_lock);
1009 if (inode) 1381 if (inode)
1010 list_add(&dentry->d_alias, &inode->i_dentry); 1382 list_add(&dentry->d_alias, &inode->i_dentry);
1011 dentry->d_inode = inode; 1383 dentry->d_inode = inode;
1384 dentry_rcuwalk_barrier(dentry);
1385 spin_unlock(&dentry->d_lock);
1012 fsnotify_d_instantiate(dentry, inode); 1386 fsnotify_d_instantiate(dentry, inode);
1013} 1387}
1014 1388
@@ -1030,9 +1404,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1030void d_instantiate(struct dentry *entry, struct inode * inode) 1404void d_instantiate(struct dentry *entry, struct inode * inode)
1031{ 1405{
1032 BUG_ON(!list_empty(&entry->d_alias)); 1406 BUG_ON(!list_empty(&entry->d_alias));
1033 spin_lock(&dcache_lock); 1407 if (inode)
1408 spin_lock(&inode->i_lock);
1034 __d_instantiate(entry, inode); 1409 __d_instantiate(entry, inode);
1035 spin_unlock(&dcache_lock); 1410 if (inode)
1411 spin_unlock(&inode->i_lock);
1036 security_d_instantiate(entry, inode); 1412 security_d_instantiate(entry, inode);
1037} 1413}
1038EXPORT_SYMBOL(d_instantiate); 1414EXPORT_SYMBOL(d_instantiate);
@@ -1069,15 +1445,18 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
1069 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1445 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1070 struct qstr *qstr = &alias->d_name; 1446 struct qstr *qstr = &alias->d_name;
1071 1447
1448 /*
1449 * Don't need alias->d_lock here, because aliases with
1450 * d_parent == entry->d_parent are not subject to name or
1451 * parent changes, because the parent inode i_mutex is held.
1452 */
1072 if (qstr->hash != hash) 1453 if (qstr->hash != hash)
1073 continue; 1454 continue;
1074 if (alias->d_parent != entry->d_parent) 1455 if (alias->d_parent != entry->d_parent)
1075 continue; 1456 continue;
1076 if (qstr->len != len) 1457 if (dentry_cmp(qstr->name, qstr->len, name, len))
1077 continue; 1458 continue;
1078 if (memcmp(qstr->name, name, len)) 1459 __dget(alias);
1079 continue;
1080 dget_locked(alias);
1081 return alias; 1460 return alias;
1082 } 1461 }
1083 1462
@@ -1091,9 +1470,11 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1091 1470
1092 BUG_ON(!list_empty(&entry->d_alias)); 1471 BUG_ON(!list_empty(&entry->d_alias));
1093 1472
1094 spin_lock(&dcache_lock); 1473 if (inode)
1474 spin_lock(&inode->i_lock);
1095 result = __d_instantiate_unique(entry, inode); 1475 result = __d_instantiate_unique(entry, inode);
1096 spin_unlock(&dcache_lock); 1476 if (inode)
1477 spin_unlock(&inode->i_lock);
1097 1478
1098 if (!result) { 1479 if (!result) {
1099 security_d_instantiate(entry, inode); 1480 security_d_instantiate(entry, inode);
@@ -1134,14 +1515,6 @@ struct dentry * d_alloc_root(struct inode * root_inode)
1134} 1515}
1135EXPORT_SYMBOL(d_alloc_root); 1516EXPORT_SYMBOL(d_alloc_root);
1136 1517
1137static inline struct hlist_head *d_hash(struct dentry *parent,
1138 unsigned long hash)
1139{
1140 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
1141 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
1142 return dentry_hashtable + (hash & D_HASHMASK);
1143}
1144
1145/** 1518/**
1146 * d_obtain_alias - find or allocate a dentry for a given inode 1519 * d_obtain_alias - find or allocate a dentry for a given inode
1147 * @inode: inode to allocate the dentry for 1520 * @inode: inode to allocate the dentry for
@@ -1182,10 +1555,11 @@ struct dentry *d_obtain_alias(struct inode *inode)
1182 } 1555 }
1183 tmp->d_parent = tmp; /* make sure dput doesn't croak */ 1556 tmp->d_parent = tmp; /* make sure dput doesn't croak */
1184 1557
1185 spin_lock(&dcache_lock); 1558
1559 spin_lock(&inode->i_lock);
1186 res = __d_find_alias(inode, 0); 1560 res = __d_find_alias(inode, 0);
1187 if (res) { 1561 if (res) {
1188 spin_unlock(&dcache_lock); 1562 spin_unlock(&inode->i_lock);
1189 dput(tmp); 1563 dput(tmp);
1190 goto out_iput; 1564 goto out_iput;
1191 } 1565 }
@@ -1195,12 +1569,14 @@ struct dentry *d_obtain_alias(struct inode *inode)
1195 tmp->d_sb = inode->i_sb; 1569 tmp->d_sb = inode->i_sb;
1196 tmp->d_inode = inode; 1570 tmp->d_inode = inode;
1197 tmp->d_flags |= DCACHE_DISCONNECTED; 1571 tmp->d_flags |= DCACHE_DISCONNECTED;
1198 tmp->d_flags &= ~DCACHE_UNHASHED;
1199 list_add(&tmp->d_alias, &inode->i_dentry); 1572 list_add(&tmp->d_alias, &inode->i_dentry);
1200 hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon); 1573 bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
1574 tmp->d_flags &= ~DCACHE_UNHASHED;
1575 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1576 __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
1201 spin_unlock(&tmp->d_lock); 1577 spin_unlock(&tmp->d_lock);
1578 spin_unlock(&inode->i_lock);
1202 1579
1203 spin_unlock(&dcache_lock);
1204 return tmp; 1580 return tmp;
1205 1581
1206 out_iput: 1582 out_iput:
@@ -1230,18 +1606,18 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1230 struct dentry *new = NULL; 1606 struct dentry *new = NULL;
1231 1607
1232 if (inode && S_ISDIR(inode->i_mode)) { 1608 if (inode && S_ISDIR(inode->i_mode)) {
1233 spin_lock(&dcache_lock); 1609 spin_lock(&inode->i_lock);
1234 new = __d_find_alias(inode, 1); 1610 new = __d_find_alias(inode, 1);
1235 if (new) { 1611 if (new) {
1236 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1612 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1237 spin_unlock(&dcache_lock); 1613 spin_unlock(&inode->i_lock);
1238 security_d_instantiate(new, inode); 1614 security_d_instantiate(new, inode);
1239 d_move(new, dentry); 1615 d_move(new, dentry);
1240 iput(inode); 1616 iput(inode);
1241 } else { 1617 } else {
1242 /* already taking dcache_lock, so d_add() by hand */ 1618 /* already taking inode->i_lock, so d_add() by hand */
1243 __d_instantiate(dentry, inode); 1619 __d_instantiate(dentry, inode);
1244 spin_unlock(&dcache_lock); 1620 spin_unlock(&inode->i_lock);
1245 security_d_instantiate(dentry, inode); 1621 security_d_instantiate(dentry, inode);
1246 d_rehash(dentry); 1622 d_rehash(dentry);
1247 } 1623 }
@@ -1314,10 +1690,10 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1314 * Negative dentry: instantiate it unless the inode is a directory and 1690 * Negative dentry: instantiate it unless the inode is a directory and
1315 * already has a dentry. 1691 * already has a dentry.
1316 */ 1692 */
1317 spin_lock(&dcache_lock); 1693 spin_lock(&inode->i_lock);
1318 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) { 1694 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
1319 __d_instantiate(found, inode); 1695 __d_instantiate(found, inode);
1320 spin_unlock(&dcache_lock); 1696 spin_unlock(&inode->i_lock);
1321 security_d_instantiate(found, inode); 1697 security_d_instantiate(found, inode);
1322 return found; 1698 return found;
1323 } 1699 }
@@ -1327,8 +1703,8 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1327 * reference to it, move it in place and use it. 1703 * reference to it, move it in place and use it.
1328 */ 1704 */
1329 new = list_entry(inode->i_dentry.next, struct dentry, d_alias); 1705 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1330 dget_locked(new); 1706 __dget(new);
1331 spin_unlock(&dcache_lock); 1707 spin_unlock(&inode->i_lock);
1332 security_d_instantiate(found, inode); 1708 security_d_instantiate(found, inode);
1333 d_move(new, found); 1709 d_move(new, found);
1334 iput(inode); 1710 iput(inode);
@@ -1342,6 +1718,112 @@ err_out:
1342EXPORT_SYMBOL(d_add_ci); 1718EXPORT_SYMBOL(d_add_ci);
1343 1719
1344/** 1720/**
1721 * __d_lookup_rcu - search for a dentry (racy, store-free)
1722 * @parent: parent dentry
1723 * @name: qstr of name we wish to find
1724 * @seq: returns d_seq value at the point where the dentry was found
1725 * @inode: returns dentry->d_inode when the inode was found valid.
1726 * Returns: dentry, or NULL
1727 *
1728 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
1729 * resolution (store-free path walking) design described in
1730 * Documentation/filesystems/path-lookup.txt.
1731 *
1732 * This is not to be used outside core vfs.
1733 *
1734 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
1735 * held, and rcu_read_lock held. The returned dentry must not be stored into
1736 * without taking d_lock and checking d_seq sequence count against @seq
1737 * returned here.
1738 *
1739 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
1740 * function.
1741 *
1742 * Alternatively, __d_lookup_rcu may be called again to look up the child of
1743 * the returned dentry, so long as its parent's seqlock is checked after the
1744 * child is looked up. Thus, an interlocking stepping of sequence lock checks
1745 * is formed, giving integrity down the path walk.
1746 */
1747struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1748 unsigned *seq, struct inode **inode)
1749{
1750 unsigned int len = name->len;
1751 unsigned int hash = name->hash;
1752 const unsigned char *str = name->name;
1753 struct dcache_hash_bucket *b = d_hash(parent, hash);
1754 struct hlist_bl_node *node;
1755 struct dentry *dentry;
1756
1757 /*
1758 * Note: There is significant duplication with __d_lookup_rcu which is
1759 * required to prevent single threaded performance regressions
1760 * especially on architectures where smp_rmb (in seqcounts) are costly.
1761 * Keep the two functions in sync.
1762 */
1763
1764 /*
1765 * The hash list is protected using RCU.
1766 *
1767 * Carefully use d_seq when comparing a candidate dentry, to avoid
1768 * races with d_move().
1769 *
1770 * It is possible that concurrent renames can mess up our list
1771 * walk here and result in missing our dentry, resulting in the
1772 * false-negative result. d_lookup() protects against concurrent
1773 * renames using rename_lock seqlock.
1774 *
1775 * See Documentation/vfs/dcache-locking.txt for more details.
1776 */
1777 hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
1778 struct inode *i;
1779 const char *tname;
1780 int tlen;
1781
1782 if (dentry->d_name.hash != hash)
1783 continue;
1784
1785seqretry:
1786 *seq = read_seqcount_begin(&dentry->d_seq);
1787 if (dentry->d_parent != parent)
1788 continue;
1789 if (d_unhashed(dentry))
1790 continue;
1791 tlen = dentry->d_name.len;
1792 tname = dentry->d_name.name;
1793 i = dentry->d_inode;
1794 prefetch(tname);
1795 if (i)
1796 prefetch(i);
1797 /*
1798 * This seqcount check is required to ensure name and
1799 * len are loaded atomically, so as not to walk off the
1800 * edge of memory when walking. If we could load this
1801 * atomically some other way, we could drop this check.
1802 */
1803 if (read_seqcount_retry(&dentry->d_seq, *seq))
1804 goto seqretry;
1805 if (parent->d_flags & DCACHE_OP_COMPARE) {
1806 if (parent->d_op->d_compare(parent, *inode,
1807 dentry, i,
1808 tlen, tname, name))
1809 continue;
1810 } else {
1811 if (dentry_cmp(tname, tlen, str, len))
1812 continue;
1813 }
1814 /*
1815 * No extra seqcount check is required after the name
1816 * compare. The caller must perform a seqcount check in
1817 * order to do anything useful with the returned dentry
1818 * anyway.
1819 */
1820 *inode = i;
1821 return dentry;
1822 }
1823 return NULL;
1824}
1825
1826/**
1345 * d_lookup - search for a dentry 1827 * d_lookup - search for a dentry
1346 * @parent: parent dentry 1828 * @parent: parent dentry
1347 * @name: qstr of name we wish to find 1829 * @name: qstr of name we wish to find
@@ -1352,10 +1834,10 @@ EXPORT_SYMBOL(d_add_ci);
1352 * dentry is returned. The caller must use dput to free the entry when it has 1834 * dentry is returned. The caller must use dput to free the entry when it has
1353 * finished using it. %NULL is returned if the dentry does not exist. 1835 * finished using it. %NULL is returned if the dentry does not exist.
1354 */ 1836 */
1355struct dentry * d_lookup(struct dentry * parent, struct qstr * name) 1837struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
1356{ 1838{
1357 struct dentry * dentry = NULL; 1839 struct dentry *dentry;
1358 unsigned long seq; 1840 unsigned seq;
1359 1841
1360 do { 1842 do {
1361 seq = read_seqbegin(&rename_lock); 1843 seq = read_seqbegin(&rename_lock);
@@ -1367,7 +1849,7 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1367} 1849}
1368EXPORT_SYMBOL(d_lookup); 1850EXPORT_SYMBOL(d_lookup);
1369 1851
1370/* 1852/**
1371 * __d_lookup - search for a dentry (racy) 1853 * __d_lookup - search for a dentry (racy)
1372 * @parent: parent dentry 1854 * @parent: parent dentry
1373 * @name: qstr of name we wish to find 1855 * @name: qstr of name we wish to find
@@ -1382,17 +1864,24 @@ EXPORT_SYMBOL(d_lookup);
1382 * 1864 *
1383 * __d_lookup callers must be commented. 1865 * __d_lookup callers must be commented.
1384 */ 1866 */
1385struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1867struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1386{ 1868{
1387 unsigned int len = name->len; 1869 unsigned int len = name->len;
1388 unsigned int hash = name->hash; 1870 unsigned int hash = name->hash;
1389 const unsigned char *str = name->name; 1871 const unsigned char *str = name->name;
1390 struct hlist_head *head = d_hash(parent,hash); 1872 struct dcache_hash_bucket *b = d_hash(parent, hash);
1873 struct hlist_bl_node *node;
1391 struct dentry *found = NULL; 1874 struct dentry *found = NULL;
1392 struct hlist_node *node;
1393 struct dentry *dentry; 1875 struct dentry *dentry;
1394 1876
1395 /* 1877 /*
1878 * Note: There is significant duplication with __d_lookup_rcu which is
1879 * required to prevent single threaded performance regressions
1880 * especially on architectures where smp_rmb (in seqcounts) are costly.
1881 * Keep the two functions in sync.
1882 */
1883
1884 /*
1396 * The hash list is protected using RCU. 1885 * The hash list is protected using RCU.
1397 * 1886 *
1398 * Take d_lock when comparing a candidate dentry, to avoid races 1887 * Take d_lock when comparing a candidate dentry, to avoid races
@@ -1407,25 +1896,16 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1407 */ 1896 */
1408 rcu_read_lock(); 1897 rcu_read_lock();
1409 1898
1410 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1899 hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
1411 struct qstr *qstr; 1900 const char *tname;
1901 int tlen;
1412 1902
1413 if (dentry->d_name.hash != hash) 1903 if (dentry->d_name.hash != hash)
1414 continue; 1904 continue;
1415 if (dentry->d_parent != parent)
1416 continue;
1417 1905
1418 spin_lock(&dentry->d_lock); 1906 spin_lock(&dentry->d_lock);
1419
1420 /*
1421 * Recheck the dentry after taking the lock - d_move may have
1422 * changed things. Don't bother checking the hash because
1423 * we're about to compare the whole name anyway.
1424 */
1425 if (dentry->d_parent != parent) 1907 if (dentry->d_parent != parent)
1426 goto next; 1908 goto next;
1427
1428 /* non-existing due to RCU? */
1429 if (d_unhashed(dentry)) 1909 if (d_unhashed(dentry))
1430 goto next; 1910 goto next;
1431 1911
@@ -1433,18 +1913,19 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1433 * It is safe to compare names since d_move() cannot 1913 * It is safe to compare names since d_move() cannot
1434 * change the qstr (protected by d_lock). 1914 * change the qstr (protected by d_lock).
1435 */ 1915 */
1436 qstr = &dentry->d_name; 1916 tlen = dentry->d_name.len;
1437 if (parent->d_op && parent->d_op->d_compare) { 1917 tname = dentry->d_name.name;
1438 if (parent->d_op->d_compare(parent, qstr, name)) 1918 if (parent->d_flags & DCACHE_OP_COMPARE) {
1919 if (parent->d_op->d_compare(parent, parent->d_inode,
1920 dentry, dentry->d_inode,
1921 tlen, tname, name))
1439 goto next; 1922 goto next;
1440 } else { 1923 } else {
1441 if (qstr->len != len) 1924 if (dentry_cmp(tname, tlen, str, len))
1442 goto next;
1443 if (memcmp(qstr->name, str, len))
1444 goto next; 1925 goto next;
1445 } 1926 }
1446 1927
1447 atomic_inc(&dentry->d_count); 1928 dentry->d_count++;
1448 found = dentry; 1929 found = dentry;
1449 spin_unlock(&dentry->d_lock); 1930 spin_unlock(&dentry->d_lock);
1450 break; 1931 break;
@@ -1473,8 +1954,8 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1473 * routine may choose to leave the hash value unchanged. 1954 * routine may choose to leave the hash value unchanged.
1474 */ 1955 */
1475 name->hash = full_name_hash(name->name, name->len); 1956 name->hash = full_name_hash(name->name, name->len);
1476 if (dir->d_op && dir->d_op->d_hash) { 1957 if (dir->d_flags & DCACHE_OP_HASH) {
1477 if (dir->d_op->d_hash(dir, name) < 0) 1958 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
1478 goto out; 1959 goto out;
1479 } 1960 }
1480 dentry = d_lookup(dir, name); 1961 dentry = d_lookup(dir, name);
@@ -1483,34 +1964,32 @@ out:
1483} 1964}
1484 1965
1485/** 1966/**
1486 * d_validate - verify dentry provided from insecure source 1967 * d_validate - verify dentry provided from insecure source (deprecated)
1487 * @dentry: The dentry alleged to be valid child of @dparent 1968 * @dentry: The dentry alleged to be valid child of @dparent
1488 * @dparent: The parent dentry (known to be valid) 1969 * @dparent: The parent dentry (known to be valid)
1489 * 1970 *
1490 * An insecure source has sent us a dentry, here we verify it and dget() it. 1971 * An insecure source has sent us a dentry, here we verify it and dget() it.
1491 * This is used by ncpfs in its readdir implementation. 1972 * This is used by ncpfs in its readdir implementation.
1492 * Zero is returned in the dentry is invalid. 1973 * Zero is returned in the dentry is invalid.
1974 *
1975 * This function is slow for big directories, and deprecated, do not use it.
1493 */ 1976 */
1494int d_validate(struct dentry *dentry, struct dentry *parent) 1977int d_validate(struct dentry *dentry, struct dentry *dparent)
1495{ 1978{
1496 struct hlist_head *head = d_hash(parent, dentry->d_name.hash); 1979 struct dentry *child;
1497 struct hlist_node *node;
1498 struct dentry *d;
1499
1500 /* Check whether the ptr might be valid at all.. */
1501 if (!kmem_ptr_validate(dentry_cache, dentry))
1502 return 0;
1503 if (dentry->d_parent != parent)
1504 return 0;
1505 1980
1506 rcu_read_lock(); 1981 spin_lock(&dparent->d_lock);
1507 hlist_for_each_entry_rcu(d, node, head, d_hash) { 1982 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
1508 if (d == dentry) { 1983 if (dentry == child) {
1509 dget(dentry); 1984 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1985 __dget_dlock(dentry);
1986 spin_unlock(&dentry->d_lock);
1987 spin_unlock(&dparent->d_lock);
1510 return 1; 1988 return 1;
1511 } 1989 }
1512 } 1990 }
1513 rcu_read_unlock(); 1991 spin_unlock(&dparent->d_lock);
1992
1514 return 0; 1993 return 0;
1515} 1994}
1516EXPORT_SYMBOL(d_validate); 1995EXPORT_SYMBOL(d_validate);
@@ -1538,16 +2017,23 @@ EXPORT_SYMBOL(d_validate);
1538 2017
1539void d_delete(struct dentry * dentry) 2018void d_delete(struct dentry * dentry)
1540{ 2019{
2020 struct inode *inode;
1541 int isdir = 0; 2021 int isdir = 0;
1542 /* 2022 /*
1543 * Are we the only user? 2023 * Are we the only user?
1544 */ 2024 */
1545 spin_lock(&dcache_lock); 2025again:
1546 spin_lock(&dentry->d_lock); 2026 spin_lock(&dentry->d_lock);
1547 isdir = S_ISDIR(dentry->d_inode->i_mode); 2027 inode = dentry->d_inode;
1548 if (atomic_read(&dentry->d_count) == 1) { 2028 isdir = S_ISDIR(inode->i_mode);
2029 if (dentry->d_count == 1) {
2030 if (inode && !spin_trylock(&inode->i_lock)) {
2031 spin_unlock(&dentry->d_lock);
2032 cpu_relax();
2033 goto again;
2034 }
1549 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 2035 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
1550 dentry_iput(dentry); 2036 dentry_unlink_inode(dentry);
1551 fsnotify_nameremove(dentry, isdir); 2037 fsnotify_nameremove(dentry, isdir);
1552 return; 2038 return;
1553 } 2039 }
@@ -1556,17 +2042,18 @@ void d_delete(struct dentry * dentry)
1556 __d_drop(dentry); 2042 __d_drop(dentry);
1557 2043
1558 spin_unlock(&dentry->d_lock); 2044 spin_unlock(&dentry->d_lock);
1559 spin_unlock(&dcache_lock);
1560 2045
1561 fsnotify_nameremove(dentry, isdir); 2046 fsnotify_nameremove(dentry, isdir);
1562} 2047}
1563EXPORT_SYMBOL(d_delete); 2048EXPORT_SYMBOL(d_delete);
1564 2049
1565static void __d_rehash(struct dentry * entry, struct hlist_head *list) 2050static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
1566{ 2051{
1567 2052 BUG_ON(!d_unhashed(entry));
2053 spin_lock_bucket(b);
1568 entry->d_flags &= ~DCACHE_UNHASHED; 2054 entry->d_flags &= ~DCACHE_UNHASHED;
1569 hlist_add_head_rcu(&entry->d_hash, list); 2055 hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
2056 spin_unlock_bucket(b);
1570} 2057}
1571 2058
1572static void _d_rehash(struct dentry * entry) 2059static void _d_rehash(struct dentry * entry)
@@ -1583,25 +2070,39 @@ static void _d_rehash(struct dentry * entry)
1583 2070
1584void d_rehash(struct dentry * entry) 2071void d_rehash(struct dentry * entry)
1585{ 2072{
1586 spin_lock(&dcache_lock);
1587 spin_lock(&entry->d_lock); 2073 spin_lock(&entry->d_lock);
1588 _d_rehash(entry); 2074 _d_rehash(entry);
1589 spin_unlock(&entry->d_lock); 2075 spin_unlock(&entry->d_lock);
1590 spin_unlock(&dcache_lock);
1591} 2076}
1592EXPORT_SYMBOL(d_rehash); 2077EXPORT_SYMBOL(d_rehash);
1593 2078
1594/* 2079/**
1595 * When switching names, the actual string doesn't strictly have to 2080 * dentry_update_name_case - update case insensitive dentry with a new name
1596 * be preserved in the target - because we're dropping the target 2081 * @dentry: dentry to be updated
1597 * anyway. As such, we can just do a simple memcpy() to copy over 2082 * @name: new name
1598 * the new name before we switch.
1599 * 2083 *
1600 * Note that we have to be a lot more careful about getting the hash 2084 * Update a case insensitive dentry with new case of name.
1601 * switched - we have to switch the hash value properly even if it 2085 *
1602 * then no longer matches the actual (corrupted) string of the target. 2086 * dentry must have been returned by d_lookup with name @name. Old and new
1603 * The hash value has to match the hash queue that the dentry is on.. 2087 * name lengths must match (ie. no d_compare which allows mismatched name
2088 * lengths).
2089 *
2090 * Parent inode i_mutex must be held over d_lookup and into this call (to
2091 * keep renames and concurrent inserts, and readdir(2) away).
1604 */ 2092 */
2093void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2094{
2095 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
2096 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2097
2098 spin_lock(&dentry->d_lock);
2099 write_seqcount_begin(&dentry->d_seq);
2100 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2101 write_seqcount_end(&dentry->d_seq);
2102 spin_unlock(&dentry->d_lock);
2103}
2104EXPORT_SYMBOL(dentry_update_name_case);
2105
1605static void switch_names(struct dentry *dentry, struct dentry *target) 2106static void switch_names(struct dentry *dentry, struct dentry *target)
1606{ 2107{
1607 if (dname_external(target)) { 2108 if (dname_external(target)) {
@@ -1643,54 +2144,84 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
1643 swap(dentry->d_name.len, target->d_name.len); 2144 swap(dentry->d_name.len, target->d_name.len);
1644} 2145}
1645 2146
2147static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2148{
2149 /*
2150 * XXXX: do we really need to take target->d_lock?
2151 */
2152 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2153 spin_lock(&target->d_parent->d_lock);
2154 else {
2155 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2156 spin_lock(&dentry->d_parent->d_lock);
2157 spin_lock_nested(&target->d_parent->d_lock,
2158 DENTRY_D_LOCK_NESTED);
2159 } else {
2160 spin_lock(&target->d_parent->d_lock);
2161 spin_lock_nested(&dentry->d_parent->d_lock,
2162 DENTRY_D_LOCK_NESTED);
2163 }
2164 }
2165 if (target < dentry) {
2166 spin_lock_nested(&target->d_lock, 2);
2167 spin_lock_nested(&dentry->d_lock, 3);
2168 } else {
2169 spin_lock_nested(&dentry->d_lock, 2);
2170 spin_lock_nested(&target->d_lock, 3);
2171 }
2172}
2173
2174static void dentry_unlock_parents_for_move(struct dentry *dentry,
2175 struct dentry *target)
2176{
2177 if (target->d_parent != dentry->d_parent)
2178 spin_unlock(&dentry->d_parent->d_lock);
2179 if (target->d_parent != target)
2180 spin_unlock(&target->d_parent->d_lock);
2181}
2182
1646/* 2183/*
1647 * We cannibalize "target" when moving dentry on top of it, 2184 * When switching names, the actual string doesn't strictly have to
1648 * because it's going to be thrown away anyway. We could be more 2185 * be preserved in the target - because we're dropping the target
1649 * polite about it, though. 2186 * anyway. As such, we can just do a simple memcpy() to copy over
1650 * 2187 * the new name before we switch.
1651 * This forceful removal will result in ugly /proc output if 2188 *
1652 * somebody holds a file open that got deleted due to a rename. 2189 * Note that we have to be a lot more careful about getting the hash
1653 * We could be nicer about the deleted file, and let it show 2190 * switched - we have to switch the hash value properly even if it
1654 * up under the name it had before it was deleted rather than 2191 * then no longer matches the actual (corrupted) string of the target.
1655 * under the original name of the file that was moved on top of it. 2192 * The hash value has to match the hash queue that the dentry is on..
1656 */ 2193 */
1657
1658/* 2194/*
1659 * d_move_locked - move a dentry 2195 * d_move - move a dentry
1660 * @dentry: entry to move 2196 * @dentry: entry to move
1661 * @target: new dentry 2197 * @target: new dentry
1662 * 2198 *
1663 * Update the dcache to reflect the move of a file name. Negative 2199 * Update the dcache to reflect the move of a file name. Negative
1664 * dcache entries should not be moved in this way. 2200 * dcache entries should not be moved in this way.
1665 */ 2201 */
1666static void d_move_locked(struct dentry * dentry, struct dentry * target) 2202void d_move(struct dentry * dentry, struct dentry * target)
1667{ 2203{
1668 struct hlist_head *list;
1669
1670 if (!dentry->d_inode) 2204 if (!dentry->d_inode)
1671 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2205 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
1672 2206
2207 BUG_ON(d_ancestor(dentry, target));
2208 BUG_ON(d_ancestor(target, dentry));
2209
1673 write_seqlock(&rename_lock); 2210 write_seqlock(&rename_lock);
1674 /*
1675 * XXXX: do we really need to take target->d_lock?
1676 */
1677 if (target < dentry) {
1678 spin_lock(&target->d_lock);
1679 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1680 } else {
1681 spin_lock(&dentry->d_lock);
1682 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
1683 }
1684 2211
1685 /* Move the dentry to the target hash queue, if on different bucket */ 2212 dentry_lock_for_move(dentry, target);
1686 if (d_unhashed(dentry))
1687 goto already_unhashed;
1688 2213
1689 hlist_del_rcu(&dentry->d_hash); 2214 write_seqcount_begin(&dentry->d_seq);
2215 write_seqcount_begin(&target->d_seq);
1690 2216
1691already_unhashed: 2217 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
1692 list = d_hash(target->d_parent, target->d_name.hash); 2218
1693 __d_rehash(dentry, list); 2219 /*
2220 * Move the dentry to the target hash queue. Don't bother checking
2221 * for the same hash queue because of how unlikely it is.
2222 */
2223 __d_drop(dentry);
2224 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
1694 2225
1695 /* Unhash the target: dput() will then get rid of it */ 2226 /* Unhash the target: dput() will then get rid of it */
1696 __d_drop(target); 2227 __d_drop(target);
@@ -1715,27 +2246,16 @@ already_unhashed:
1715 } 2246 }
1716 2247
1717 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2248 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2249
2250 write_seqcount_end(&target->d_seq);
2251 write_seqcount_end(&dentry->d_seq);
2252
2253 dentry_unlock_parents_for_move(dentry, target);
1718 spin_unlock(&target->d_lock); 2254 spin_unlock(&target->d_lock);
1719 fsnotify_d_move(dentry); 2255 fsnotify_d_move(dentry);
1720 spin_unlock(&dentry->d_lock); 2256 spin_unlock(&dentry->d_lock);
1721 write_sequnlock(&rename_lock); 2257 write_sequnlock(&rename_lock);
1722} 2258}
1723
1724/**
1725 * d_move - move a dentry
1726 * @dentry: entry to move
1727 * @target: new dentry
1728 *
1729 * Update the dcache to reflect the move of a file name. Negative
1730 * dcache entries should not be moved in this way.
1731 */
1732
1733void d_move(struct dentry * dentry, struct dentry * target)
1734{
1735 spin_lock(&dcache_lock);
1736 d_move_locked(dentry, target);
1737 spin_unlock(&dcache_lock);
1738}
1739EXPORT_SYMBOL(d_move); 2259EXPORT_SYMBOL(d_move);
1740 2260
1741/** 2261/**
@@ -1761,13 +2281,13 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
1761 * This helper attempts to cope with remotely renamed directories 2281 * This helper attempts to cope with remotely renamed directories
1762 * 2282 *
1763 * It assumes that the caller is already holding 2283 * It assumes that the caller is already holding
1764 * dentry->d_parent->d_inode->i_mutex and the dcache_lock 2284 * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
1765 * 2285 *
1766 * Note: If ever the locking in lock_rename() changes, then please 2286 * Note: If ever the locking in lock_rename() changes, then please
1767 * remember to update this too... 2287 * remember to update this too...
1768 */ 2288 */
1769static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) 2289static struct dentry *__d_unalias(struct inode *inode,
1770 __releases(dcache_lock) 2290 struct dentry *dentry, struct dentry *alias)
1771{ 2291{
1772 struct mutex *m1 = NULL, *m2 = NULL; 2292 struct mutex *m1 = NULL, *m2 = NULL;
1773 struct dentry *ret; 2293 struct dentry *ret;
@@ -1790,10 +2310,10 @@ static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
1790 goto out_err; 2310 goto out_err;
1791 m2 = &alias->d_parent->d_inode->i_mutex; 2311 m2 = &alias->d_parent->d_inode->i_mutex;
1792out_unalias: 2312out_unalias:
1793 d_move_locked(alias, dentry); 2313 d_move(alias, dentry);
1794 ret = alias; 2314 ret = alias;
1795out_err: 2315out_err:
1796 spin_unlock(&dcache_lock); 2316 spin_unlock(&inode->i_lock);
1797 if (m2) 2317 if (m2)
1798 mutex_unlock(m2); 2318 mutex_unlock(m2);
1799 if (m1) 2319 if (m1)
@@ -1804,17 +2324,23 @@ out_err:
1804/* 2324/*
1805 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 2325 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
1806 * named dentry in place of the dentry to be replaced. 2326 * named dentry in place of the dentry to be replaced.
2327 * returns with anon->d_lock held!
1807 */ 2328 */
1808static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 2329static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1809{ 2330{
1810 struct dentry *dparent, *aparent; 2331 struct dentry *dparent, *aparent;
1811 2332
1812 switch_names(dentry, anon); 2333 dentry_lock_for_move(anon, dentry);
1813 swap(dentry->d_name.hash, anon->d_name.hash); 2334
2335 write_seqcount_begin(&dentry->d_seq);
2336 write_seqcount_begin(&anon->d_seq);
1814 2337
1815 dparent = dentry->d_parent; 2338 dparent = dentry->d_parent;
1816 aparent = anon->d_parent; 2339 aparent = anon->d_parent;
1817 2340
2341 switch_names(dentry, anon);
2342 swap(dentry->d_name.hash, anon->d_name.hash);
2343
1818 dentry->d_parent = (aparent == anon) ? dentry : aparent; 2344 dentry->d_parent = (aparent == anon) ? dentry : aparent;
1819 list_del(&dentry->d_u.d_child); 2345 list_del(&dentry->d_u.d_child);
1820 if (!IS_ROOT(dentry)) 2346 if (!IS_ROOT(dentry))
@@ -1829,6 +2355,13 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1829 else 2355 else
1830 INIT_LIST_HEAD(&anon->d_u.d_child); 2356 INIT_LIST_HEAD(&anon->d_u.d_child);
1831 2357
2358 write_seqcount_end(&dentry->d_seq);
2359 write_seqcount_end(&anon->d_seq);
2360
2361 dentry_unlock_parents_for_move(anon, dentry);
2362 spin_unlock(&dentry->d_lock);
2363
2364 /* anon->d_lock still locked, returns locked */
1832 anon->d_flags &= ~DCACHE_DISCONNECTED; 2365 anon->d_flags &= ~DCACHE_DISCONNECTED;
1833} 2366}
1834 2367
@@ -1846,14 +2379,15 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
1846 2379
1847 BUG_ON(!d_unhashed(dentry)); 2380 BUG_ON(!d_unhashed(dentry));
1848 2381
1849 spin_lock(&dcache_lock);
1850
1851 if (!inode) { 2382 if (!inode) {
1852 actual = dentry; 2383 actual = dentry;
1853 __d_instantiate(dentry, NULL); 2384 __d_instantiate(dentry, NULL);
1854 goto found_lock; 2385 d_rehash(actual);
2386 goto out_nolock;
1855 } 2387 }
1856 2388
2389 spin_lock(&inode->i_lock);
2390
1857 if (S_ISDIR(inode->i_mode)) { 2391 if (S_ISDIR(inode->i_mode)) {
1858 struct dentry *alias; 2392 struct dentry *alias;
1859 2393
@@ -1864,13 +2398,12 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
1864 /* Is this an anonymous mountpoint that we could splice 2398 /* Is this an anonymous mountpoint that we could splice
1865 * into our tree? */ 2399 * into our tree? */
1866 if (IS_ROOT(alias)) { 2400 if (IS_ROOT(alias)) {
1867 spin_lock(&alias->d_lock);
1868 __d_materialise_dentry(dentry, alias); 2401 __d_materialise_dentry(dentry, alias);
1869 __d_drop(alias); 2402 __d_drop(alias);
1870 goto found; 2403 goto found;
1871 } 2404 }
1872 /* Nope, but we must(!) avoid directory aliasing */ 2405 /* Nope, but we must(!) avoid directory aliasing */
1873 actual = __d_unalias(dentry, alias); 2406 actual = __d_unalias(inode, dentry, alias);
1874 if (IS_ERR(actual)) 2407 if (IS_ERR(actual))
1875 dput(alias); 2408 dput(alias);
1876 goto out_nolock; 2409 goto out_nolock;
@@ -1881,15 +2414,14 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
1881 actual = __d_instantiate_unique(dentry, inode); 2414 actual = __d_instantiate_unique(dentry, inode);
1882 if (!actual) 2415 if (!actual)
1883 actual = dentry; 2416 actual = dentry;
1884 else if (unlikely(!d_unhashed(actual))) 2417 else
1885 goto shouldnt_be_hashed; 2418 BUG_ON(!d_unhashed(actual));
1886 2419
1887found_lock:
1888 spin_lock(&actual->d_lock); 2420 spin_lock(&actual->d_lock);
1889found: 2421found:
1890 _d_rehash(actual); 2422 _d_rehash(actual);
1891 spin_unlock(&actual->d_lock); 2423 spin_unlock(&actual->d_lock);
1892 spin_unlock(&dcache_lock); 2424 spin_unlock(&inode->i_lock);
1893out_nolock: 2425out_nolock:
1894 if (actual == dentry) { 2426 if (actual == dentry) {
1895 security_d_instantiate(dentry, inode); 2427 security_d_instantiate(dentry, inode);
@@ -1898,10 +2430,6 @@ out_nolock:
1898 2430
1899 iput(inode); 2431 iput(inode);
1900 return actual; 2432 return actual;
1901
1902shouldnt_be_hashed:
1903 spin_unlock(&dcache_lock);
1904 BUG();
1905} 2433}
1906EXPORT_SYMBOL_GPL(d_materialise_unique); 2434EXPORT_SYMBOL_GPL(d_materialise_unique);
1907 2435
@@ -1928,7 +2456,7 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
1928 * @buffer: pointer to the end of the buffer 2456 * @buffer: pointer to the end of the buffer
1929 * @buflen: pointer to buffer length 2457 * @buflen: pointer to buffer length
1930 * 2458 *
1931 * Caller holds the dcache_lock. 2459 * Caller holds the rename_lock.
1932 * 2460 *
1933 * If path is not reachable from the supplied root, then the value of 2461 * If path is not reachable from the supplied root, then the value of
1934 * root is changed (without modifying refcounts). 2462 * root is changed (without modifying refcounts).
@@ -1956,7 +2484,9 @@ static int prepend_path(const struct path *path, struct path *root,
1956 } 2484 }
1957 parent = dentry->d_parent; 2485 parent = dentry->d_parent;
1958 prefetch(parent); 2486 prefetch(parent);
2487 spin_lock(&dentry->d_lock);
1959 error = prepend_name(buffer, buflen, &dentry->d_name); 2488 error = prepend_name(buffer, buflen, &dentry->d_name);
2489 spin_unlock(&dentry->d_lock);
1960 if (!error) 2490 if (!error)
1961 error = prepend(buffer, buflen, "/", 1); 2491 error = prepend(buffer, buflen, "/", 1);
1962 if (error) 2492 if (error)
@@ -2012,9 +2542,9 @@ char *__d_path(const struct path *path, struct path *root,
2012 int error; 2542 int error;
2013 2543
2014 prepend(&res, &buflen, "\0", 1); 2544 prepend(&res, &buflen, "\0", 1);
2015 spin_lock(&dcache_lock); 2545 write_seqlock(&rename_lock);
2016 error = prepend_path(path, root, &res, &buflen); 2546 error = prepend_path(path, root, &res, &buflen);
2017 spin_unlock(&dcache_lock); 2547 write_sequnlock(&rename_lock);
2018 2548
2019 if (error) 2549 if (error)
2020 return ERR_PTR(error); 2550 return ERR_PTR(error);
@@ -2076,12 +2606,12 @@ char *d_path(const struct path *path, char *buf, int buflen)
2076 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2606 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2077 2607
2078 get_fs_root(current->fs, &root); 2608 get_fs_root(current->fs, &root);
2079 spin_lock(&dcache_lock); 2609 write_seqlock(&rename_lock);
2080 tmp = root; 2610 tmp = root;
2081 error = path_with_deleted(path, &tmp, &res, &buflen); 2611 error = path_with_deleted(path, &tmp, &res, &buflen);
2082 if (error) 2612 if (error)
2083 res = ERR_PTR(error); 2613 res = ERR_PTR(error);
2084 spin_unlock(&dcache_lock); 2614 write_sequnlock(&rename_lock);
2085 path_put(&root); 2615 path_put(&root);
2086 return res; 2616 return res;
2087} 2617}
@@ -2107,12 +2637,12 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2107 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2637 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2108 2638
2109 get_fs_root(current->fs, &root); 2639 get_fs_root(current->fs, &root);
2110 spin_lock(&dcache_lock); 2640 write_seqlock(&rename_lock);
2111 tmp = root; 2641 tmp = root;
2112 error = path_with_deleted(path, &tmp, &res, &buflen); 2642 error = path_with_deleted(path, &tmp, &res, &buflen);
2113 if (!error && !path_equal(&tmp, &root)) 2643 if (!error && !path_equal(&tmp, &root))
2114 error = prepend_unreachable(&res, &buflen); 2644 error = prepend_unreachable(&res, &buflen);
2115 spin_unlock(&dcache_lock); 2645 write_sequnlock(&rename_lock);
2116 path_put(&root); 2646 path_put(&root);
2117 if (error) 2647 if (error)
2118 res = ERR_PTR(error); 2648 res = ERR_PTR(error);
@@ -2144,7 +2674,7 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2144/* 2674/*
2145 * Write full pathname from the root of the filesystem into the buffer. 2675 * Write full pathname from the root of the filesystem into the buffer.
2146 */ 2676 */
2147char *__dentry_path(struct dentry *dentry, char *buf, int buflen) 2677static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2148{ 2678{
2149 char *end = buf + buflen; 2679 char *end = buf + buflen;
2150 char *retval; 2680 char *retval;
@@ -2158,10 +2688,13 @@ char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2158 2688
2159 while (!IS_ROOT(dentry)) { 2689 while (!IS_ROOT(dentry)) {
2160 struct dentry *parent = dentry->d_parent; 2690 struct dentry *parent = dentry->d_parent;
2691 int error;
2161 2692
2162 prefetch(parent); 2693 prefetch(parent);
2163 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || 2694 spin_lock(&dentry->d_lock);
2164 (prepend(&end, &buflen, "/", 1) != 0)) 2695 error = prepend_name(&end, &buflen, &dentry->d_name);
2696 spin_unlock(&dentry->d_lock);
2697 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
2165 goto Elong; 2698 goto Elong;
2166 2699
2167 retval = end; 2700 retval = end;
@@ -2171,14 +2704,25 @@ char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2171Elong: 2704Elong:
2172 return ERR_PTR(-ENAMETOOLONG); 2705 return ERR_PTR(-ENAMETOOLONG);
2173} 2706}
2174EXPORT_SYMBOL(__dentry_path); 2707
2708char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2709{
2710 char *retval;
2711
2712 write_seqlock(&rename_lock);
2713 retval = __dentry_path(dentry, buf, buflen);
2714 write_sequnlock(&rename_lock);
2715
2716 return retval;
2717}
2718EXPORT_SYMBOL(dentry_path_raw);
2175 2719
2176char *dentry_path(struct dentry *dentry, char *buf, int buflen) 2720char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2177{ 2721{
2178 char *p = NULL; 2722 char *p = NULL;
2179 char *retval; 2723 char *retval;
2180 2724
2181 spin_lock(&dcache_lock); 2725 write_seqlock(&rename_lock);
2182 if (d_unlinked(dentry)) { 2726 if (d_unlinked(dentry)) {
2183 p = buf + buflen; 2727 p = buf + buflen;
2184 if (prepend(&p, &buflen, "//deleted", 10) != 0) 2728 if (prepend(&p, &buflen, "//deleted", 10) != 0)
@@ -2186,12 +2730,11 @@ char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2186 buflen++; 2730 buflen++;
2187 } 2731 }
2188 retval = __dentry_path(dentry, buf, buflen); 2732 retval = __dentry_path(dentry, buf, buflen);
2189 spin_unlock(&dcache_lock); 2733 write_sequnlock(&rename_lock);
2190 if (!IS_ERR(retval) && p) 2734 if (!IS_ERR(retval) && p)
2191 *p = '/'; /* restore '/' overriden with '\0' */ 2735 *p = '/'; /* restore '/' overriden with '\0' */
2192 return retval; 2736 return retval;
2193Elong: 2737Elong:
2194 spin_unlock(&dcache_lock);
2195 return ERR_PTR(-ENAMETOOLONG); 2738 return ERR_PTR(-ENAMETOOLONG);
2196} 2739}
2197 2740
@@ -2225,7 +2768,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2225 get_fs_root_and_pwd(current->fs, &root, &pwd); 2768 get_fs_root_and_pwd(current->fs, &root, &pwd);
2226 2769
2227 error = -ENOENT; 2770 error = -ENOENT;
2228 spin_lock(&dcache_lock); 2771 write_seqlock(&rename_lock);
2229 if (!d_unlinked(pwd.dentry)) { 2772 if (!d_unlinked(pwd.dentry)) {
2230 unsigned long len; 2773 unsigned long len;
2231 struct path tmp = root; 2774 struct path tmp = root;
@@ -2234,7 +2777,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2234 2777
2235 prepend(&cwd, &buflen, "\0", 1); 2778 prepend(&cwd, &buflen, "\0", 1);
2236 error = prepend_path(&pwd, &tmp, &cwd, &buflen); 2779 error = prepend_path(&pwd, &tmp, &cwd, &buflen);
2237 spin_unlock(&dcache_lock); 2780 write_sequnlock(&rename_lock);
2238 2781
2239 if (error) 2782 if (error)
2240 goto out; 2783 goto out;
@@ -2253,8 +2796,9 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2253 if (copy_to_user(buf, cwd, len)) 2796 if (copy_to_user(buf, cwd, len))
2254 error = -EFAULT; 2797 error = -EFAULT;
2255 } 2798 }
2256 } else 2799 } else {
2257 spin_unlock(&dcache_lock); 2800 write_sequnlock(&rename_lock);
2801 }
2258 2802
2259out: 2803out:
2260 path_put(&pwd); 2804 path_put(&pwd);
@@ -2282,25 +2826,25 @@ out:
2282int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 2826int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2283{ 2827{
2284 int result; 2828 int result;
2285 unsigned long seq; 2829 unsigned seq;
2286 2830
2287 if (new_dentry == old_dentry) 2831 if (new_dentry == old_dentry)
2288 return 1; 2832 return 1;
2289 2833
2290 /*
2291 * Need rcu_readlock to protect against the d_parent trashing
2292 * due to d_move
2293 */
2294 rcu_read_lock();
2295 do { 2834 do {
2296 /* for restarting inner loop in case of seq retry */ 2835 /* for restarting inner loop in case of seq retry */
2297 seq = read_seqbegin(&rename_lock); 2836 seq = read_seqbegin(&rename_lock);
2837 /*
2838 * Need rcu_readlock to protect against the d_parent trashing
2839 * due to d_move
2840 */
2841 rcu_read_lock();
2298 if (d_ancestor(old_dentry, new_dentry)) 2842 if (d_ancestor(old_dentry, new_dentry))
2299 result = 1; 2843 result = 1;
2300 else 2844 else
2301 result = 0; 2845 result = 0;
2846 rcu_read_unlock();
2302 } while (read_seqretry(&rename_lock, seq)); 2847 } while (read_seqretry(&rename_lock, seq));
2303 rcu_read_unlock();
2304 2848
2305 return result; 2849 return result;
2306} 2850}
@@ -2332,10 +2876,15 @@ EXPORT_SYMBOL(path_is_under);
2332 2876
2333void d_genocide(struct dentry *root) 2877void d_genocide(struct dentry *root)
2334{ 2878{
2335 struct dentry *this_parent = root; 2879 struct dentry *this_parent;
2336 struct list_head *next; 2880 struct list_head *next;
2881 unsigned seq;
2882 int locked = 0;
2337 2883
2338 spin_lock(&dcache_lock); 2884 seq = read_seqbegin(&rename_lock);
2885again:
2886 this_parent = root;
2887 spin_lock(&this_parent->d_lock);
2339repeat: 2888repeat:
2340 next = this_parent->d_subdirs.next; 2889 next = this_parent->d_subdirs.next;
2341resume: 2890resume:
@@ -2343,21 +2892,62 @@ resume:
2343 struct list_head *tmp = next; 2892 struct list_head *tmp = next;
2344 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2893 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2345 next = tmp->next; 2894 next = tmp->next;
2346 if (d_unhashed(dentry)||!dentry->d_inode) 2895
2896 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2897 if (d_unhashed(dentry) || !dentry->d_inode) {
2898 spin_unlock(&dentry->d_lock);
2347 continue; 2899 continue;
2900 }
2348 if (!list_empty(&dentry->d_subdirs)) { 2901 if (!list_empty(&dentry->d_subdirs)) {
2902 spin_unlock(&this_parent->d_lock);
2903 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
2349 this_parent = dentry; 2904 this_parent = dentry;
2905 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
2350 goto repeat; 2906 goto repeat;
2351 } 2907 }
2352 atomic_dec(&dentry->d_count); 2908 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2909 dentry->d_flags |= DCACHE_GENOCIDE;
2910 dentry->d_count--;
2911 }
2912 spin_unlock(&dentry->d_lock);
2353 } 2913 }
2354 if (this_parent != root) { 2914 if (this_parent != root) {
2355 next = this_parent->d_u.d_child.next; 2915 struct dentry *tmp;
2356 atomic_dec(&this_parent->d_count); 2916 struct dentry *child;
2357 this_parent = this_parent->d_parent; 2917
2918 tmp = this_parent->d_parent;
2919 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2920 this_parent->d_flags |= DCACHE_GENOCIDE;
2921 this_parent->d_count--;
2922 }
2923 rcu_read_lock();
2924 spin_unlock(&this_parent->d_lock);
2925 child = this_parent;
2926 this_parent = tmp;
2927 spin_lock(&this_parent->d_lock);
2928 /* might go back up the wrong parent if we have had a rename
2929 * or deletion */
2930 if (this_parent != child->d_parent ||
2931 (!locked && read_seqretry(&rename_lock, seq))) {
2932 spin_unlock(&this_parent->d_lock);
2933 rcu_read_unlock();
2934 goto rename_retry;
2935 }
2936 rcu_read_unlock();
2937 next = child->d_u.d_child.next;
2358 goto resume; 2938 goto resume;
2359 } 2939 }
2360 spin_unlock(&dcache_lock); 2940 spin_unlock(&this_parent->d_lock);
2941 if (!locked && read_seqretry(&rename_lock, seq))
2942 goto rename_retry;
2943 if (locked)
2944 write_sequnlock(&rename_lock);
2945 return;
2946
2947rename_retry:
2948 locked = 1;
2949 write_seqlock(&rename_lock);
2950 goto again;
2361} 2951}
2362 2952
2363/** 2953/**
@@ -2411,7 +3001,7 @@ static void __init dcache_init_early(void)
2411 3001
2412 dentry_hashtable = 3002 dentry_hashtable =
2413 alloc_large_system_hash("Dentry cache", 3003 alloc_large_system_hash("Dentry cache",
2414 sizeof(struct hlist_head), 3004 sizeof(struct dcache_hash_bucket),
2415 dhash_entries, 3005 dhash_entries,
2416 13, 3006 13,
2417 HASH_EARLY, 3007 HASH_EARLY,
@@ -2420,16 +3010,13 @@ static void __init dcache_init_early(void)
2420 0); 3010 0);
2421 3011
2422 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3012 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2423 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 3013 INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
2424} 3014}
2425 3015
2426static void __init dcache_init(void) 3016static void __init dcache_init(void)
2427{ 3017{
2428 int loop; 3018 int loop;
2429 3019
2430 percpu_counter_init(&nr_dentry, 0);
2431 percpu_counter_init(&nr_dentry_unused, 0);
2432
2433 /* 3020 /*
2434 * A constructor could be added for stable state like the lists, 3021 * A constructor could be added for stable state like the lists,
2435 * but it is probably not worth it because of the cache nature 3022 * but it is probably not worth it because of the cache nature
@@ -2446,7 +3033,7 @@ static void __init dcache_init(void)
2446 3033
2447 dentry_hashtable = 3034 dentry_hashtable =
2448 alloc_large_system_hash("Dentry cache", 3035 alloc_large_system_hash("Dentry cache",
2449 sizeof(struct hlist_head), 3036 sizeof(struct dcache_hash_bucket),
2450 dhash_entries, 3037 dhash_entries,
2451 13, 3038 13,
2452 0, 3039 0,
@@ -2455,7 +3042,7 @@ static void __init dcache_init(void)
2455 0); 3042 0);
2456 3043
2457 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3044 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2458 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 3045 INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
2459} 3046}
2460 3047
2461/* SLAB cache for __getname() consumers */ 3048/* SLAB cache for __getname() consumers */
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 906e803f7f79..6fc4f319b550 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -44,12 +44,17 @@
44 */ 44 */
45static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) 45static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
46{ 46{
47 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); 47 struct dentry *lower_dentry;
48 struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); 48 struct vfsmount *lower_mnt;
49 struct dentry *dentry_save; 49 struct dentry *dentry_save;
50 struct vfsmount *vfsmount_save; 50 struct vfsmount *vfsmount_save;
51 int rc = 1; 51 int rc = 1;
52 52
53 if (nd->flags & LOOKUP_RCU)
54 return -ECHILD;
55
56 lower_dentry = ecryptfs_dentry_to_lower(dentry);
57 lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
53 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) 58 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
54 goto out; 59 goto out;
55 dentry_save = nd->path.dentry; 60 dentry_save = nd->path.dentry;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 9d1a22d62765..337352a94751 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -260,7 +260,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
260 ecryptfs_dentry->d_parent)); 260 ecryptfs_dentry->d_parent));
261 lower_inode = lower_dentry->d_inode; 261 lower_inode = lower_dentry->d_inode;
262 fsstack_copy_attr_atime(ecryptfs_dir_inode, lower_dir_dentry->d_inode); 262 fsstack_copy_attr_atime(ecryptfs_dir_inode, lower_dir_dentry->d_inode);
263 BUG_ON(!atomic_read(&lower_dentry->d_count)); 263 BUG_ON(!lower_dentry->d_count);
264 ecryptfs_set_dentry_private(ecryptfs_dentry, 264 ecryptfs_set_dentry_private(ecryptfs_dentry,
265 kmem_cache_alloc(ecryptfs_dentry_info_cache, 265 kmem_cache_alloc(ecryptfs_dentry_info_cache,
266 GFP_KERNEL)); 266 GFP_KERNEL));
@@ -441,7 +441,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
441 struct qstr lower_name; 441 struct qstr lower_name;
442 int rc = 0; 442 int rc = 0;
443 443
444 ecryptfs_dentry->d_op = &ecryptfs_dops; 444 d_set_d_op(ecryptfs_dentry, &ecryptfs_dops);
445 if ((ecryptfs_dentry->d_name.len == 1 445 if ((ecryptfs_dentry->d_name.len == 1
446 && !strcmp(ecryptfs_dentry->d_name.name, ".")) 446 && !strcmp(ecryptfs_dentry->d_name.name, "."))
447 || (ecryptfs_dentry->d_name.len == 2 447 || (ecryptfs_dentry->d_name.len == 2
@@ -454,7 +454,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
454 lower_name.hash = ecryptfs_dentry->d_name.hash; 454 lower_name.hash = ecryptfs_dentry->d_name.hash;
455 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { 455 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
456 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, 456 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
457 &lower_name); 457 lower_dir_dentry->d_inode, &lower_name);
458 if (rc < 0) 458 if (rc < 0)
459 goto out_d_drop; 459 goto out_d_drop;
460 } 460 }
@@ -489,7 +489,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
489 lower_name.hash = full_name_hash(lower_name.name, lower_name.len); 489 lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
490 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { 490 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
491 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, 491 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
492 &lower_name); 492 lower_dir_dentry->d_inode, &lower_name);
493 if (rc < 0) 493 if (rc < 0)
494 goto out_d_drop; 494 goto out_d_drop;
495 } 495 }
@@ -980,8 +980,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
980} 980}
981 981
982static int 982static int
983ecryptfs_permission(struct inode *inode, int mask) 983ecryptfs_permission(struct inode *inode, int mask, unsigned int flags)
984{ 984{
985 if (flags & IPERM_FLAG_RCU)
986 return -ECHILD;
985 return inode_permission(ecryptfs_inode_to_lower(inode), mask); 987 return inode_permission(ecryptfs_inode_to_lower(inode), mask);
986} 988}
987 989
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index a9dbd62518e6..351038675376 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -189,7 +189,7 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
189 if (special_file(lower_inode->i_mode)) 189 if (special_file(lower_inode->i_mode))
190 init_special_inode(inode, lower_inode->i_mode, 190 init_special_inode(inode, lower_inode->i_mode,
191 lower_inode->i_rdev); 191 lower_inode->i_rdev);
192 dentry->d_op = &ecryptfs_dops; 192 d_set_d_op(dentry, &ecryptfs_dops);
193 fsstack_copy_attr_all(inode, lower_inode); 193 fsstack_copy_attr_all(inode, lower_inode);
194 /* This size will be overwritten for real files w/ headers and 194 /* This size will be overwritten for real files w/ headers and
195 * other metadata */ 195 * other metadata */
@@ -594,7 +594,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
594 deactivate_locked_super(s); 594 deactivate_locked_super(s);
595 goto out; 595 goto out;
596 } 596 }
597 s->s_root->d_op = &ecryptfs_dops; 597 d_set_d_op(s->s_root, &ecryptfs_dops);
598 s->s_root->d_sb = s; 598 s->s_root->d_sb = s;
599 s->s_root->d_parent = s->s_root; 599 s->s_root->d_parent = s->s_root;
600 600
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 2720178b7718..3042fe123a34 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -62,6 +62,16 @@ out:
62 return inode; 62 return inode;
63} 63}
64 64
65static void ecryptfs_i_callback(struct rcu_head *head)
66{
67 struct inode *inode = container_of(head, struct inode, i_rcu);
68 struct ecryptfs_inode_info *inode_info;
69 inode_info = ecryptfs_inode_to_private(inode);
70
71 INIT_LIST_HEAD(&inode->i_dentry);
72 kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
73}
74
65/** 75/**
66 * ecryptfs_destroy_inode 76 * ecryptfs_destroy_inode
67 * @inode: The ecryptfs inode 77 * @inode: The ecryptfs inode
@@ -88,7 +98,7 @@ static void ecryptfs_destroy_inode(struct inode *inode)
88 } 98 }
89 } 99 }
90 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); 100 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
91 kmem_cache_free(ecryptfs_inode_info_cache, inode_info); 101 call_rcu(&inode->i_rcu, ecryptfs_i_callback);
92} 102}
93 103
94/** 104/**
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 5073a07652cc..0f31acb0131c 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -65,11 +65,18 @@ static struct inode *efs_alloc_inode(struct super_block *sb)
65 return &ei->vfs_inode; 65 return &ei->vfs_inode;
66} 66}
67 67
68static void efs_destroy_inode(struct inode *inode) 68static void efs_i_callback(struct rcu_head *head)
69{ 69{
70 struct inode *inode = container_of(head, struct inode, i_rcu);
71 INIT_LIST_HEAD(&inode->i_dentry);
70 kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); 72 kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
71} 73}
72 74
75static void efs_destroy_inode(struct inode *inode)
76{
77 call_rcu(&inode->i_rcu, efs_i_callback);
78}
79
73static void init_once(void *foo) 80static void init_once(void *foo)
74{ 81{
75 struct efs_inode_info *ei = (struct efs_inode_info *) foo; 82 struct efs_inode_info *ei = (struct efs_inode_info *) foo;
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 79c3ae6e0456..8c6c4669b381 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -150,12 +150,19 @@ static struct inode *exofs_alloc_inode(struct super_block *sb)
150 return &oi->vfs_inode; 150 return &oi->vfs_inode;
151} 151}
152 152
153static void exofs_i_callback(struct rcu_head *head)
154{
155 struct inode *inode = container_of(head, struct inode, i_rcu);
156 INIT_LIST_HEAD(&inode->i_dentry);
157 kmem_cache_free(exofs_inode_cachep, exofs_i(inode));
158}
159
153/* 160/*
154 * Remove an inode from the cache 161 * Remove an inode from the cache
155 */ 162 */
156static void exofs_destroy_inode(struct inode *inode) 163static void exofs_destroy_inode(struct inode *inode)
157{ 164{
158 kmem_cache_free(exofs_inode_cachep, exofs_i(inode)); 165 call_rcu(&inode->i_rcu, exofs_i_callback);
159} 166}
160 167
161/* 168/*
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 51b304056f10..4b6825740dd5 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -43,24 +43,26 @@ find_acceptable_alias(struct dentry *result,
43 void *context) 43 void *context)
44{ 44{
45 struct dentry *dentry, *toput = NULL; 45 struct dentry *dentry, *toput = NULL;
46 struct inode *inode;
46 47
47 if (acceptable(context, result)) 48 if (acceptable(context, result))
48 return result; 49 return result;
49 50
50 spin_lock(&dcache_lock); 51 inode = result->d_inode;
51 list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) { 52 spin_lock(&inode->i_lock);
52 dget_locked(dentry); 53 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
53 spin_unlock(&dcache_lock); 54 dget(dentry);
55 spin_unlock(&inode->i_lock);
54 if (toput) 56 if (toput)
55 dput(toput); 57 dput(toput);
56 if (dentry != result && acceptable(context, dentry)) { 58 if (dentry != result && acceptable(context, dentry)) {
57 dput(result); 59 dput(result);
58 return dentry; 60 return dentry;
59 } 61 }
60 spin_lock(&dcache_lock); 62 spin_lock(&inode->i_lock);
61 toput = dentry; 63 toput = dentry;
62 } 64 }
63 spin_unlock(&dcache_lock); 65 spin_unlock(&inode->i_lock);
64 66
65 if (toput) 67 if (toput)
66 dput(toput); 68 dput(toput);
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 2bcc0431bada..7b4180554a62 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -232,10 +232,17 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
232} 232}
233 233
234int 234int
235ext2_check_acl(struct inode *inode, int mask) 235ext2_check_acl(struct inode *inode, int mask, unsigned int flags)
236{ 236{
237 struct posix_acl *acl = ext2_get_acl(inode, ACL_TYPE_ACCESS); 237 struct posix_acl *acl;
238
239 if (flags & IPERM_FLAG_RCU) {
240 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
241 return -ECHILD;
242 return -EAGAIN;
243 }
238 244
245 acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
239 if (IS_ERR(acl)) 246 if (IS_ERR(acl))
240 return PTR_ERR(acl); 247 return PTR_ERR(acl);
241 if (acl) { 248 if (acl) {
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index 3ff6cbb9ac44..c939b7b12099 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -54,7 +54,7 @@ static inline int ext2_acl_count(size_t size)
54#ifdef CONFIG_EXT2_FS_POSIX_ACL 54#ifdef CONFIG_EXT2_FS_POSIX_ACL
55 55
56/* acl.c */ 56/* acl.c */
57extern int ext2_check_acl (struct inode *, int); 57extern int ext2_check_acl (struct inode *, int, unsigned int);
58extern int ext2_acl_chmod (struct inode *); 58extern int ext2_acl_chmod (struct inode *);
59extern int ext2_init_acl (struct inode *, struct inode *); 59extern int ext2_init_acl (struct inode *, struct inode *);
60 60
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index d89e0b6a2d78..e0c6380ff992 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -161,11 +161,18 @@ static struct inode *ext2_alloc_inode(struct super_block *sb)
161 return &ei->vfs_inode; 161 return &ei->vfs_inode;
162} 162}
163 163
164static void ext2_destroy_inode(struct inode *inode) 164static void ext2_i_callback(struct rcu_head *head)
165{ 165{
166 struct inode *inode = container_of(head, struct inode, i_rcu);
167 INIT_LIST_HEAD(&inode->i_dentry);
166 kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); 168 kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
167} 169}
168 170
171static void ext2_destroy_inode(struct inode *inode)
172{
173 call_rcu(&inode->i_rcu, ext2_i_callback);
174}
175
169static void init_once(void *foo) 176static void init_once(void *foo)
170{ 177{
171 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; 178 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 8a11fe212183..e4fa49e6c539 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -240,10 +240,17 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
240} 240}
241 241
242int 242int
243ext3_check_acl(struct inode *inode, int mask) 243ext3_check_acl(struct inode *inode, int mask, unsigned int flags)
244{ 244{
245 struct posix_acl *acl = ext3_get_acl(inode, ACL_TYPE_ACCESS); 245 struct posix_acl *acl;
246
247 if (flags & IPERM_FLAG_RCU) {
248 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
249 return -ECHILD;
250 return -EAGAIN;
251 }
246 252
253 acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
247 if (IS_ERR(acl)) 254 if (IS_ERR(acl))
248 return PTR_ERR(acl); 255 return PTR_ERR(acl);
249 if (acl) { 256 if (acl) {
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
index 597334626de9..5faf8048e906 100644
--- a/fs/ext3/acl.h
+++ b/fs/ext3/acl.h
@@ -54,7 +54,7 @@ static inline int ext3_acl_count(size_t size)
54#ifdef CONFIG_EXT3_FS_POSIX_ACL 54#ifdef CONFIG_EXT3_FS_POSIX_ACL
55 55
56/* acl.c */ 56/* acl.c */
57extern int ext3_check_acl (struct inode *, int); 57extern int ext3_check_acl (struct inode *, int, unsigned int);
58extern int ext3_acl_chmod (struct inode *); 58extern int ext3_acl_chmod (struct inode *);
59extern int ext3_init_acl (handle_t *, struct inode *, struct inode *); 59extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
60 60
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index acf8695fa8f0..77ce1616f725 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -479,6 +479,13 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
479 return &ei->vfs_inode; 479 return &ei->vfs_inode;
480} 480}
481 481
482static void ext3_i_callback(struct rcu_head *head)
483{
484 struct inode *inode = container_of(head, struct inode, i_rcu);
485 INIT_LIST_HEAD(&inode->i_dentry);
486 kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
487}
488
482static void ext3_destroy_inode(struct inode *inode) 489static void ext3_destroy_inode(struct inode *inode)
483{ 490{
484 if (!list_empty(&(EXT3_I(inode)->i_orphan))) { 491 if (!list_empty(&(EXT3_I(inode)->i_orphan))) {
@@ -489,7 +496,7 @@ static void ext3_destroy_inode(struct inode *inode)
489 false); 496 false);
490 dump_stack(); 497 dump_stack();
491 } 498 }
492 kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); 499 call_rcu(&inode->i_rcu, ext3_i_callback);
493} 500}
494 501
495static void init_once(void *foo) 502static void init_once(void *foo)
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 5e2ed4504ead..e0270d1f8d82 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -238,10 +238,17 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
238} 238}
239 239
240int 240int
241ext4_check_acl(struct inode *inode, int mask) 241ext4_check_acl(struct inode *inode, int mask, unsigned int flags)
242{ 242{
243 struct posix_acl *acl = ext4_get_acl(inode, ACL_TYPE_ACCESS); 243 struct posix_acl *acl;
244
245 if (flags & IPERM_FLAG_RCU) {
246 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
247 return -ECHILD;
248 return -EAGAIN;
249 }
244 250
251 acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
245 if (IS_ERR(acl)) 252 if (IS_ERR(acl))
246 return PTR_ERR(acl); 253 return PTR_ERR(acl);
247 if (acl) { 254 if (acl) {
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index 9d843d5deac4..dec821168fd4 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -54,7 +54,7 @@ static inline int ext4_acl_count(size_t size)
54#ifdef CONFIG_EXT4_FS_POSIX_ACL 54#ifdef CONFIG_EXT4_FS_POSIX_ACL
55 55
56/* acl.c */ 56/* acl.c */
57extern int ext4_check_acl(struct inode *, int); 57extern int ext4_check_acl(struct inode *, int, unsigned int);
58extern int ext4_acl_chmod(struct inode *); 58extern int ext4_acl_chmod(struct inode *);
59extern int ext4_init_acl(handle_t *, struct inode *, struct inode *); 59extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
60 60
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index fb15c9c0be74..cd37f9d5e447 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -841,6 +841,13 @@ static int ext4_drop_inode(struct inode *inode)
841 return drop; 841 return drop;
842} 842}
843 843
844static void ext4_i_callback(struct rcu_head *head)
845{
846 struct inode *inode = container_of(head, struct inode, i_rcu);
847 INIT_LIST_HEAD(&inode->i_dentry);
848 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
849}
850
844static void ext4_destroy_inode(struct inode *inode) 851static void ext4_destroy_inode(struct inode *inode)
845{ 852{
846 ext4_ioend_wait(inode); 853 ext4_ioend_wait(inode);
@@ -853,7 +860,7 @@ static void ext4_destroy_inode(struct inode *inode)
853 true); 860 true);
854 dump_stack(); 861 dump_stack();
855 } 862 }
856 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 863 call_rcu(&inode->i_rcu, ext4_i_callback);
857} 864}
858 865
859static void init_once(void *foo) 866static void init_once(void *foo)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index ad6998a92c30..206351af7c58 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -514,11 +514,18 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
514 return &ei->vfs_inode; 514 return &ei->vfs_inode;
515} 515}
516 516
517static void fat_destroy_inode(struct inode *inode) 517static void fat_i_callback(struct rcu_head *head)
518{ 518{
519 struct inode *inode = container_of(head, struct inode, i_rcu);
520 INIT_LIST_HEAD(&inode->i_dentry);
519 kmem_cache_free(fat_inode_cachep, MSDOS_I(inode)); 521 kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
520} 522}
521 523
524static void fat_destroy_inode(struct inode *inode)
525{
526 call_rcu(&inode->i_rcu, fat_i_callback);
527}
528
522static void init_once(void *foo) 529static void init_once(void *foo)
523{ 530{
524 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; 531 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
@@ -743,7 +750,7 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb,
743 */ 750 */
744 result = d_obtain_alias(inode); 751 result = d_obtain_alias(inode);
745 if (!IS_ERR(result)) 752 if (!IS_ERR(result))
746 result->d_op = sb->s_root->d_op; 753 d_set_d_op(result, sb->s_root->d_op);
747 return result; 754 return result;
748} 755}
749 756
@@ -793,7 +800,7 @@ static struct dentry *fat_get_parent(struct dentry *child)
793 800
794 parent = d_obtain_alias(inode); 801 parent = d_obtain_alias(inode);
795 if (!IS_ERR(parent)) 802 if (!IS_ERR(parent))
796 parent->d_op = sb->s_root->d_op; 803 d_set_d_op(parent, sb->s_root->d_op);
797out: 804out:
798 unlock_super(sb); 805 unlock_super(sb);
799 806
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 3345aabd1dd7..35ffe43afa4b 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -148,7 +148,8 @@ static int msdos_find(struct inode *dir, const unsigned char *name, int len,
148 * that the existing dentry can be used. The msdos fs routines will 148 * that the existing dentry can be used. The msdos fs routines will
149 * return ENOENT or EINVAL as appropriate. 149 * return ENOENT or EINVAL as appropriate.
150 */ 150 */
151static int msdos_hash(struct dentry *dentry, struct qstr *qstr) 151static int msdos_hash(const struct dentry *dentry, const struct inode *inode,
152 struct qstr *qstr)
152{ 153{
153 struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options; 154 struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
154 unsigned char msdos_name[MSDOS_NAME]; 155 unsigned char msdos_name[MSDOS_NAME];
@@ -164,16 +165,18 @@ static int msdos_hash(struct dentry *dentry, struct qstr *qstr)
164 * Compare two msdos names. If either of the names are invalid, 165 * Compare two msdos names. If either of the names are invalid,
165 * we fall back to doing the standard name comparison. 166 * we fall back to doing the standard name comparison.
166 */ 167 */
167static int msdos_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b) 168static int msdos_cmp(const struct dentry *parent, const struct inode *pinode,
169 const struct dentry *dentry, const struct inode *inode,
170 unsigned int len, const char *str, const struct qstr *name)
168{ 171{
169 struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options; 172 struct fat_mount_options *options = &MSDOS_SB(parent->d_sb)->options;
170 unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME]; 173 unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME];
171 int error; 174 int error;
172 175
173 error = msdos_format_name(a->name, a->len, a_msdos_name, options); 176 error = msdos_format_name(name->name, name->len, a_msdos_name, options);
174 if (error) 177 if (error)
175 goto old_compare; 178 goto old_compare;
176 error = msdos_format_name(b->name, b->len, b_msdos_name, options); 179 error = msdos_format_name(str, len, b_msdos_name, options);
177 if (error) 180 if (error)
178 goto old_compare; 181 goto old_compare;
179 error = memcmp(a_msdos_name, b_msdos_name, MSDOS_NAME); 182 error = memcmp(a_msdos_name, b_msdos_name, MSDOS_NAME);
@@ -182,8 +185,8 @@ out:
182 185
183old_compare: 186old_compare:
184 error = 1; 187 error = 1;
185 if (a->len == b->len) 188 if (name->len == len)
186 error = memcmp(a->name, b->name, a->len); 189 error = memcmp(name->name, str, len);
187 goto out; 190 goto out;
188} 191}
189 192
@@ -224,10 +227,10 @@ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
224 } 227 }
225out: 228out:
226 unlock_super(sb); 229 unlock_super(sb);
227 dentry->d_op = &msdos_dentry_operations; 230 d_set_d_op(dentry, &msdos_dentry_operations);
228 dentry = d_splice_alias(inode, dentry); 231 dentry = d_splice_alias(inode, dentry);
229 if (dentry) 232 if (dentry)
230 dentry->d_op = &msdos_dentry_operations; 233 d_set_d_op(dentry, &msdos_dentry_operations);
231 return dentry; 234 return dentry;
232 235
233error: 236error:
@@ -670,7 +673,7 @@ static int msdos_fill_super(struct super_block *sb, void *data, int silent)
670 } 673 }
671 674
672 sb->s_flags |= MS_NOATIME; 675 sb->s_flags |= MS_NOATIME;
673 sb->s_root->d_op = &msdos_dentry_operations; 676 d_set_d_op(sb->s_root, &msdos_dentry_operations);
674 unlock_super(sb); 677 unlock_super(sb);
675 return 0; 678 return 0;
676} 679}
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index b936703b8924..e3ffc5e12332 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -43,6 +43,9 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
43 43
44static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) 44static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
45{ 45{
46 if (nd->flags & LOOKUP_RCU)
47 return -ECHILD;
48
46 /* This is not negative dentry. Always valid. */ 49 /* This is not negative dentry. Always valid. */
47 if (dentry->d_inode) 50 if (dentry->d_inode)
48 return 1; 51 return 1;
@@ -51,6 +54,9 @@ static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
51 54
52static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) 55static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
53{ 56{
57 if (nd->flags & LOOKUP_RCU)
58 return -ECHILD;
59
54 /* 60 /*
55 * This is not negative dentry. Always valid. 61 * This is not negative dentry. Always valid.
56 * 62 *
@@ -85,22 +91,26 @@ static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
85} 91}
86 92
87/* returns the length of a struct qstr, ignoring trailing dots */ 93/* returns the length of a struct qstr, ignoring trailing dots */
88static unsigned int vfat_striptail_len(struct qstr *qstr) 94static unsigned int __vfat_striptail_len(unsigned int len, const char *name)
89{ 95{
90 unsigned int len = qstr->len; 96 while (len && name[len - 1] == '.')
91
92 while (len && qstr->name[len - 1] == '.')
93 len--; 97 len--;
94 return len; 98 return len;
95} 99}
96 100
101static unsigned int vfat_striptail_len(const struct qstr *qstr)
102{
103 return __vfat_striptail_len(qstr->len, qstr->name);
104}
105
97/* 106/*
98 * Compute the hash for the vfat name corresponding to the dentry. 107 * Compute the hash for the vfat name corresponding to the dentry.
99 * Note: if the name is invalid, we leave the hash code unchanged so 108 * Note: if the name is invalid, we leave the hash code unchanged so
100 * that the existing dentry can be used. The vfat fs routines will 109 * that the existing dentry can be used. The vfat fs routines will
101 * return ENOENT or EINVAL as appropriate. 110 * return ENOENT or EINVAL as appropriate.
102 */ 111 */
103static int vfat_hash(struct dentry *dentry, struct qstr *qstr) 112static int vfat_hash(const struct dentry *dentry, const struct inode *inode,
113 struct qstr *qstr)
104{ 114{
105 qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr)); 115 qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr));
106 return 0; 116 return 0;
@@ -112,9 +122,10 @@ static int vfat_hash(struct dentry *dentry, struct qstr *qstr)
112 * that the existing dentry can be used. The vfat fs routines will 122 * that the existing dentry can be used. The vfat fs routines will
113 * return ENOENT or EINVAL as appropriate. 123 * return ENOENT or EINVAL as appropriate.
114 */ 124 */
115static int vfat_hashi(struct dentry *dentry, struct qstr *qstr) 125static int vfat_hashi(const struct dentry *dentry, const struct inode *inode,
126 struct qstr *qstr)
116{ 127{
117 struct nls_table *t = MSDOS_SB(dentry->d_inode->i_sb)->nls_io; 128 struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
118 const unsigned char *name; 129 const unsigned char *name;
119 unsigned int len; 130 unsigned int len;
120 unsigned long hash; 131 unsigned long hash;
@@ -133,16 +144,18 @@ static int vfat_hashi(struct dentry *dentry, struct qstr *qstr)
133/* 144/*
134 * Case insensitive compare of two vfat names. 145 * Case insensitive compare of two vfat names.
135 */ 146 */
136static int vfat_cmpi(struct dentry *dentry, struct qstr *a, struct qstr *b) 147static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode,
148 const struct dentry *dentry, const struct inode *inode,
149 unsigned int len, const char *str, const struct qstr *name)
137{ 150{
138 struct nls_table *t = MSDOS_SB(dentry->d_inode->i_sb)->nls_io; 151 struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
139 unsigned int alen, blen; 152 unsigned int alen, blen;
140 153
141 /* A filename cannot end in '.' or we treat it like it has none */ 154 /* A filename cannot end in '.' or we treat it like it has none */
142 alen = vfat_striptail_len(a); 155 alen = vfat_striptail_len(name);
143 blen = vfat_striptail_len(b); 156 blen = __vfat_striptail_len(len, str);
144 if (alen == blen) { 157 if (alen == blen) {
145 if (nls_strnicmp(t, a->name, b->name, alen) == 0) 158 if (nls_strnicmp(t, name->name, str, alen) == 0)
146 return 0; 159 return 0;
147 } 160 }
148 return 1; 161 return 1;
@@ -151,15 +164,17 @@ static int vfat_cmpi(struct dentry *dentry, struct qstr *a, struct qstr *b)
151/* 164/*
152 * Case sensitive compare of two vfat names. 165 * Case sensitive compare of two vfat names.
153 */ 166 */
154static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b) 167static int vfat_cmp(const struct dentry *parent, const struct inode *pinode,
168 const struct dentry *dentry, const struct inode *inode,
169 unsigned int len, const char *str, const struct qstr *name)
155{ 170{
156 unsigned int alen, blen; 171 unsigned int alen, blen;
157 172
158 /* A filename cannot end in '.' or we treat it like it has none */ 173 /* A filename cannot end in '.' or we treat it like it has none */
159 alen = vfat_striptail_len(a); 174 alen = vfat_striptail_len(name);
160 blen = vfat_striptail_len(b); 175 blen = __vfat_striptail_len(len, str);
161 if (alen == blen) { 176 if (alen == blen) {
162 if (strncmp(a->name, b->name, alen) == 0) 177 if (strncmp(name->name, str, alen) == 0)
163 return 0; 178 return 0;
164 } 179 }
165 return 1; 180 return 1;
@@ -757,11 +772,11 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
757 772
758out: 773out:
759 unlock_super(sb); 774 unlock_super(sb);
760 dentry->d_op = sb->s_root->d_op; 775 d_set_d_op(dentry, sb->s_root->d_op);
761 dentry->d_time = dentry->d_parent->d_inode->i_version; 776 dentry->d_time = dentry->d_parent->d_inode->i_version;
762 dentry = d_splice_alias(inode, dentry); 777 dentry = d_splice_alias(inode, dentry);
763 if (dentry) { 778 if (dentry) {
764 dentry->d_op = sb->s_root->d_op; 779 d_set_d_op(dentry, sb->s_root->d_op);
765 dentry->d_time = dentry->d_parent->d_inode->i_version; 780 dentry->d_time = dentry->d_parent->d_inode->i_version;
766 } 781 }
767 return dentry; 782 return dentry;
@@ -1063,9 +1078,9 @@ static int vfat_fill_super(struct super_block *sb, void *data, int silent)
1063 } 1078 }
1064 1079
1065 if (MSDOS_SB(sb)->options.name_check != 's') 1080 if (MSDOS_SB(sb)->options.name_check != 's')
1066 sb->s_root->d_op = &vfat_ci_dentry_ops; 1081 d_set_d_op(sb->s_root, &vfat_ci_dentry_ops);
1067 else 1082 else
1068 sb->s_root->d_op = &vfat_dentry_ops; 1083 d_set_d_op(sb->s_root, &vfat_dentry_ops);
1069 1084
1070 unlock_super(sb); 1085 unlock_super(sb);
1071 return 0; 1086 return 0;
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 68ba492d8eef..751d6b255a12 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -115,6 +115,9 @@ int unregister_filesystem(struct file_system_type * fs)
115 tmp = &(*tmp)->next; 115 tmp = &(*tmp)->next;
116 } 116 }
117 write_unlock(&file_systems_lock); 117 write_unlock(&file_systems_lock);
118
119 synchronize_rcu();
120
118 return -EINVAL; 121 return -EINVAL;
119} 122}
120 123
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 8c04eac5079d..2ba6719ac612 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -337,6 +337,13 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
337 return ip; 337 return ip;
338} 338}
339 339
340static void vxfs_i_callback(struct rcu_head *head)
341{
342 struct inode *inode = container_of(head, struct inode, i_rcu);
343 INIT_LIST_HEAD(&inode->i_dentry);
344 kmem_cache_free(vxfs_inode_cachep, inode->i_private);
345}
346
340/** 347/**
341 * vxfs_evict_inode - remove inode from main memory 348 * vxfs_evict_inode - remove inode from main memory
342 * @ip: inode to discard. 349 * @ip: inode to discard.
@@ -350,5 +357,5 @@ vxfs_evict_inode(struct inode *ip)
350{ 357{
351 truncate_inode_pages(&ip->i_data, 0); 358 truncate_inode_pages(&ip->i_data, 0);
352 end_writeback(ip); 359 end_writeback(ip);
353 kmem_cache_free(vxfs_inode_cachep, ip->i_private); 360 call_rcu(&ip->i_rcu, vxfs_i_callback);
354} 361}
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index ed45a9cf5f3d..68ca487bedb1 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -14,12 +14,14 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
14 struct path old_root; 14 struct path old_root;
15 15
16 spin_lock(&fs->lock); 16 spin_lock(&fs->lock);
17 write_seqcount_begin(&fs->seq);
17 old_root = fs->root; 18 old_root = fs->root;
18 fs->root = *path; 19 fs->root = *path;
19 path_get(path); 20 path_get_long(path);
21 write_seqcount_end(&fs->seq);
20 spin_unlock(&fs->lock); 22 spin_unlock(&fs->lock);
21 if (old_root.dentry) 23 if (old_root.dentry)
22 path_put(&old_root); 24 path_put_long(&old_root);
23} 25}
24 26
25/* 27/*
@@ -31,13 +33,15 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
31 struct path old_pwd; 33 struct path old_pwd;
32 34
33 spin_lock(&fs->lock); 35 spin_lock(&fs->lock);
36 write_seqcount_begin(&fs->seq);
34 old_pwd = fs->pwd; 37 old_pwd = fs->pwd;
35 fs->pwd = *path; 38 fs->pwd = *path;
36 path_get(path); 39 path_get_long(path);
40 write_seqcount_end(&fs->seq);
37 spin_unlock(&fs->lock); 41 spin_unlock(&fs->lock);
38 42
39 if (old_pwd.dentry) 43 if (old_pwd.dentry)
40 path_put(&old_pwd); 44 path_put_long(&old_pwd);
41} 45}
42 46
43void chroot_fs_refs(struct path *old_root, struct path *new_root) 47void chroot_fs_refs(struct path *old_root, struct path *new_root)
@@ -52,31 +56,33 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
52 fs = p->fs; 56 fs = p->fs;
53 if (fs) { 57 if (fs) {
54 spin_lock(&fs->lock); 58 spin_lock(&fs->lock);
59 write_seqcount_begin(&fs->seq);
55 if (fs->root.dentry == old_root->dentry 60 if (fs->root.dentry == old_root->dentry
56 && fs->root.mnt == old_root->mnt) { 61 && fs->root.mnt == old_root->mnt) {
57 path_get(new_root); 62 path_get_long(new_root);
58 fs->root = *new_root; 63 fs->root = *new_root;
59 count++; 64 count++;
60 } 65 }
61 if (fs->pwd.dentry == old_root->dentry 66 if (fs->pwd.dentry == old_root->dentry
62 && fs->pwd.mnt == old_root->mnt) { 67 && fs->pwd.mnt == old_root->mnt) {
63 path_get(new_root); 68 path_get_long(new_root);
64 fs->pwd = *new_root; 69 fs->pwd = *new_root;
65 count++; 70 count++;
66 } 71 }
72 write_seqcount_end(&fs->seq);
67 spin_unlock(&fs->lock); 73 spin_unlock(&fs->lock);
68 } 74 }
69 task_unlock(p); 75 task_unlock(p);
70 } while_each_thread(g, p); 76 } while_each_thread(g, p);
71 read_unlock(&tasklist_lock); 77 read_unlock(&tasklist_lock);
72 while (count--) 78 while (count--)
73 path_put(old_root); 79 path_put_long(old_root);
74} 80}
75 81
76void free_fs_struct(struct fs_struct *fs) 82void free_fs_struct(struct fs_struct *fs)
77{ 83{
78 path_put(&fs->root); 84 path_put_long(&fs->root);
79 path_put(&fs->pwd); 85 path_put_long(&fs->pwd);
80 kmem_cache_free(fs_cachep, fs); 86 kmem_cache_free(fs_cachep, fs);
81} 87}
82 88
@@ -88,8 +94,10 @@ void exit_fs(struct task_struct *tsk)
88 int kill; 94 int kill;
89 task_lock(tsk); 95 task_lock(tsk);
90 spin_lock(&fs->lock); 96 spin_lock(&fs->lock);
97 write_seqcount_begin(&fs->seq);
91 tsk->fs = NULL; 98 tsk->fs = NULL;
92 kill = !--fs->users; 99 kill = !--fs->users;
100 write_seqcount_end(&fs->seq);
93 spin_unlock(&fs->lock); 101 spin_unlock(&fs->lock);
94 task_unlock(tsk); 102 task_unlock(tsk);
95 if (kill) 103 if (kill)
@@ -105,8 +113,15 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
105 fs->users = 1; 113 fs->users = 1;
106 fs->in_exec = 0; 114 fs->in_exec = 0;
107 spin_lock_init(&fs->lock); 115 spin_lock_init(&fs->lock);
116 seqcount_init(&fs->seq);
108 fs->umask = old->umask; 117 fs->umask = old->umask;
109 get_fs_root_and_pwd(old, &fs->root, &fs->pwd); 118
119 spin_lock(&old->lock);
120 fs->root = old->root;
121 path_get_long(&fs->root);
122 fs->pwd = old->pwd;
123 path_get_long(&fs->pwd);
124 spin_unlock(&old->lock);
110 } 125 }
111 return fs; 126 return fs;
112} 127}
@@ -144,6 +159,7 @@ EXPORT_SYMBOL(current_umask);
144struct fs_struct init_fs = { 159struct fs_struct init_fs = {
145 .users = 1, 160 .users = 1,
146 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), 161 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
162 .seq = SEQCNT_ZERO,
147 .umask = 0022, 163 .umask = 0022,
148}; 164};
149 165
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c9627c95482d..f738599fd8cd 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -156,8 +156,12 @@ u64 fuse_get_attr_version(struct fuse_conn *fc)
156 */ 156 */
157static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) 157static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
158{ 158{
159 struct inode *inode = entry->d_inode; 159 struct inode *inode;
160 160
161 if (nd->flags & LOOKUP_RCU)
162 return -ECHILD;
163
164 inode = entry->d_inode;
161 if (inode && is_bad_inode(inode)) 165 if (inode && is_bad_inode(inode))
162 return 0; 166 return 0;
163 else if (fuse_dentry_time(entry) < get_jiffies_64()) { 167 else if (fuse_dentry_time(entry) < get_jiffies_64()) {
@@ -347,7 +351,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
347 } 351 }
348 352
349 entry = newent ? newent : entry; 353 entry = newent ? newent : entry;
350 entry->d_op = &fuse_dentry_operations; 354 d_set_d_op(entry, &fuse_dentry_operations);
351 if (outarg_valid) 355 if (outarg_valid)
352 fuse_change_entry_timeout(entry, &outarg); 356 fuse_change_entry_timeout(entry, &outarg);
353 else 357 else
@@ -981,12 +985,15 @@ static int fuse_access(struct inode *inode, int mask)
981 * access request is sent. Execute permission is still checked 985 * access request is sent. Execute permission is still checked
982 * locally based on file mode. 986 * locally based on file mode.
983 */ 987 */
984static int fuse_permission(struct inode *inode, int mask) 988static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
985{ 989{
986 struct fuse_conn *fc = get_fuse_conn(inode); 990 struct fuse_conn *fc = get_fuse_conn(inode);
987 bool refreshed = false; 991 bool refreshed = false;
988 int err = 0; 992 int err = 0;
989 993
994 if (flags & IPERM_FLAG_RCU)
995 return -ECHILD;
996
990 if (!fuse_allow_task(fc, current)) 997 if (!fuse_allow_task(fc, current))
991 return -EACCES; 998 return -EACCES;
992 999
@@ -1001,7 +1008,7 @@ static int fuse_permission(struct inode *inode, int mask)
1001 } 1008 }
1002 1009
1003 if (fc->flags & FUSE_DEFAULT_PERMISSIONS) { 1010 if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
1004 err = generic_permission(inode, mask, NULL); 1011 err = generic_permission(inode, mask, flags, NULL);
1005 1012
1006 /* If permission is denied, try to refresh file 1013 /* If permission is denied, try to refresh file
1007 attributes. This is also needed, because the root 1014 attributes. This is also needed, because the root
@@ -1009,7 +1016,8 @@ static int fuse_permission(struct inode *inode, int mask)
1009 if (err == -EACCES && !refreshed) { 1016 if (err == -EACCES && !refreshed) {
1010 err = fuse_do_getattr(inode, NULL, NULL); 1017 err = fuse_do_getattr(inode, NULL, NULL);
1011 if (!err) 1018 if (!err)
1012 err = generic_permission(inode, mask, NULL); 1019 err = generic_permission(inode, mask,
1020 flags, NULL);
1013 } 1021 }
1014 1022
1015 /* Note: the opposite of the above test does not 1023 /* Note: the opposite of the above test does not
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index cfce3ad86a92..a8b31da19b93 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -99,6 +99,13 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
99 return inode; 99 return inode;
100} 100}
101 101
102static void fuse_i_callback(struct rcu_head *head)
103{
104 struct inode *inode = container_of(head, struct inode, i_rcu);
105 INIT_LIST_HEAD(&inode->i_dentry);
106 kmem_cache_free(fuse_inode_cachep, inode);
107}
108
102static void fuse_destroy_inode(struct inode *inode) 109static void fuse_destroy_inode(struct inode *inode)
103{ 110{
104 struct fuse_inode *fi = get_fuse_inode(inode); 111 struct fuse_inode *fi = get_fuse_inode(inode);
@@ -106,7 +113,7 @@ static void fuse_destroy_inode(struct inode *inode)
106 BUG_ON(!list_empty(&fi->queued_writes)); 113 BUG_ON(!list_empty(&fi->queued_writes));
107 if (fi->forget_req) 114 if (fi->forget_req)
108 fuse_request_free(fi->forget_req); 115 fuse_request_free(fi->forget_req);
109 kmem_cache_free(fuse_inode_cachep, inode); 116 call_rcu(&inode->i_rcu, fuse_i_callback);
110} 117}
111 118
112void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, 119void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
@@ -619,7 +626,7 @@ static struct dentry *fuse_get_dentry(struct super_block *sb,
619 626
620 entry = d_obtain_alias(inode); 627 entry = d_obtain_alias(inode);
621 if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) { 628 if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) {
622 entry->d_op = &fuse_dentry_operations; 629 d_set_d_op(entry, &fuse_dentry_operations);
623 fuse_invalidate_entry_cache(entry); 630 fuse_invalidate_entry_cache(entry);
624 } 631 }
625 632
@@ -721,7 +728,7 @@ static struct dentry *fuse_get_parent(struct dentry *child)
721 728
722 parent = d_obtain_alias(inode); 729 parent = d_obtain_alias(inode);
723 if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) { 730 if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) {
724 parent->d_op = &fuse_dentry_operations; 731 d_set_d_op(parent, &fuse_dentry_operations);
725 fuse_invalidate_entry_cache(parent); 732 fuse_invalidate_entry_cache(parent);
726 } 733 }
727 734
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 6bc9e3a5a693..06c48a891832 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -190,14 +190,20 @@ generic_acl_chmod(struct inode *inode)
190} 190}
191 191
192int 192int
193generic_check_acl(struct inode *inode, int mask) 193generic_check_acl(struct inode *inode, int mask, unsigned int flags)
194{ 194{
195 struct posix_acl *acl = get_cached_acl(inode, ACL_TYPE_ACCESS); 195 if (flags & IPERM_FLAG_RCU) {
196 196 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
197 if (acl) { 197 return -ECHILD;
198 int error = posix_acl_permission(inode, acl, mask); 198 } else {
199 posix_acl_release(acl); 199 struct posix_acl *acl;
200 return error; 200
201 acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
202 if (acl) {
203 int error = posix_acl_permission(inode, acl, mask);
204 posix_acl_release(acl);
205 return error;
206 }
201 } 207 }
202 return -EAGAIN; 208 return -EAGAIN;
203} 209}
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 48171f4c943d..7118f1a780a9 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -75,11 +75,14 @@ static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
75 * Returns: errno 75 * Returns: errno
76 */ 76 */
77 77
78int gfs2_check_acl(struct inode *inode, int mask) 78int gfs2_check_acl(struct inode *inode, int mask, unsigned int flags)
79{ 79{
80 struct posix_acl *acl; 80 struct posix_acl *acl;
81 int error; 81 int error;
82 82
83 if (flags & IPERM_FLAG_RCU)
84 return -ECHILD;
85
83 acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS); 86 acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
84 if (IS_ERR(acl)) 87 if (IS_ERR(acl))
85 return PTR_ERR(acl); 88 return PTR_ERR(acl);
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index b522b0cb39ea..a93907c8159b 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -16,7 +16,7 @@
16#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" 16#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
17#define GFS2_ACL_MAX_ENTRIES 25 17#define GFS2_ACL_MAX_ENTRIES 25
18 18
19extern int gfs2_check_acl(struct inode *inode, int mask); 19extern int gfs2_check_acl(struct inode *inode, int mask, unsigned int);
20extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode); 20extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
21extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr); 21extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
22extern const struct xattr_handler gfs2_xattr_system_handler; 22extern const struct xattr_handler gfs2_xattr_system_handler;
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 6798755b3858..4a456338b873 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -11,6 +11,7 @@
11#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/buffer_head.h> 12#include <linux/buffer_head.h>
13#include <linux/gfs2_ondisk.h> 13#include <linux/gfs2_ondisk.h>
14#include <linux/namei.h>
14#include <linux/crc32.h> 15#include <linux/crc32.h>
15 16
16#include "gfs2.h" 17#include "gfs2.h"
@@ -34,15 +35,23 @@
34 35
35static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd) 36static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
36{ 37{
37 struct dentry *parent = dget_parent(dentry); 38 struct dentry *parent;
38 struct gfs2_sbd *sdp = GFS2_SB(parent->d_inode); 39 struct gfs2_sbd *sdp;
39 struct gfs2_inode *dip = GFS2_I(parent->d_inode); 40 struct gfs2_inode *dip;
40 struct inode *inode = dentry->d_inode; 41 struct inode *inode;
41 struct gfs2_holder d_gh; 42 struct gfs2_holder d_gh;
42 struct gfs2_inode *ip = NULL; 43 struct gfs2_inode *ip = NULL;
43 int error; 44 int error;
44 int had_lock = 0; 45 int had_lock = 0;
45 46
47 if (nd->flags & LOOKUP_RCU)
48 return -ECHILD;
49
50 parent = dget_parent(dentry);
51 sdp = GFS2_SB(parent->d_inode);
52 dip = GFS2_I(parent->d_inode);
53 inode = dentry->d_inode;
54
46 if (inode) { 55 if (inode) {
47 if (is_bad_inode(inode)) 56 if (is_bad_inode(inode))
48 goto invalid; 57 goto invalid;
@@ -100,13 +109,14 @@ fail:
100 return 0; 109 return 0;
101} 110}
102 111
103static int gfs2_dhash(struct dentry *dentry, struct qstr *str) 112static int gfs2_dhash(const struct dentry *dentry, const struct inode *inode,
113 struct qstr *str)
104{ 114{
105 str->hash = gfs2_disk_hash(str->name, str->len); 115 str->hash = gfs2_disk_hash(str->name, str->len);
106 return 0; 116 return 0;
107} 117}
108 118
109static int gfs2_dentry_delete(struct dentry *dentry) 119static int gfs2_dentry_delete(const struct dentry *dentry)
110{ 120{
111 struct gfs2_inode *ginode; 121 struct gfs2_inode *ginode;
112 122
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 5ab3839dfcb9..97012ecff560 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -130,7 +130,7 @@ static struct dentry *gfs2_get_parent(struct dentry *child)
130 130
131 dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1)); 131 dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
132 if (!IS_ERR(dentry)) 132 if (!IS_ERR(dentry))
133 dentry->d_op = &gfs2_dops; 133 d_set_d_op(dentry, &gfs2_dops);
134 return dentry; 134 return dentry;
135} 135}
136 136
@@ -158,7 +158,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
158out_inode: 158out_inode:
159 dentry = d_obtain_alias(inode); 159 dentry = d_obtain_alias(inode);
160 if (!IS_ERR(dentry)) 160 if (!IS_ERR(dentry))
161 dentry->d_op = &gfs2_dops; 161 d_set_d_op(dentry, &gfs2_dops);
162 return dentry; 162 return dentry;
163} 163}
164 164
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index aa996471ec5c..fca6689e12e6 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -241,7 +241,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
241 !capable(CAP_LINUX_IMMUTABLE)) 241 !capable(CAP_LINUX_IMMUTABLE))
242 goto out; 242 goto out;
243 if (!IS_IMMUTABLE(inode)) { 243 if (!IS_IMMUTABLE(inode)) {
244 error = gfs2_permission(inode, MAY_WRITE); 244 error = gfs2_permission(inode, MAY_WRITE, 0);
245 if (error) 245 if (error)
246 goto out; 246 goto out;
247 } 247 }
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 14e682dbe8bf..2232b3c780bd 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -509,7 +509,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
509 } 509 }
510 510
511 if (!is_root) { 511 if (!is_root) {
512 error = gfs2_permission(dir, MAY_EXEC); 512 error = gfs2_permission(dir, MAY_EXEC, 0);
513 if (error) 513 if (error)
514 goto out; 514 goto out;
515 } 515 }
@@ -539,7 +539,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
539{ 539{
540 int error; 540 int error;
541 541
542 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); 542 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0);
543 if (error) 543 if (error)
544 return error; 544 return error;
545 545
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index d8499fadcc53..732a183efdb3 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -113,7 +113,7 @@ extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
113extern struct inode *gfs2_createi(struct gfs2_holder *ghs, 113extern struct inode *gfs2_createi(struct gfs2_holder *ghs,
114 const struct qstr *name, 114 const struct qstr *name,
115 unsigned int mode, dev_t dev); 115 unsigned int mode, dev_t dev);
116extern int gfs2_permission(struct inode *inode, int mask); 116extern int gfs2_permission(struct inode *inode, int mask, unsigned int flags);
117extern int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr); 117extern int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
118extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); 118extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
119extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); 119extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 3eb1393f7b81..2aeabd4218cc 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -440,7 +440,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
440 iput(inode); 440 iput(inode);
441 return -ENOMEM; 441 return -ENOMEM;
442 } 442 }
443 dentry->d_op = &gfs2_dops; 443 d_set_d_op(dentry, &gfs2_dops);
444 *dptr = dentry; 444 *dptr = dentry;
445 return 0; 445 return 0;
446} 446}
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 1db6b7343229..1501db4f0e6d 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -106,7 +106,7 @@ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
106{ 106{
107 struct inode *inode = NULL; 107 struct inode *inode = NULL;
108 108
109 dentry->d_op = &gfs2_dops; 109 d_set_d_op(dentry, &gfs2_dops);
110 110
111 inode = gfs2_lookupi(dir, &dentry->d_name, 0); 111 inode = gfs2_lookupi(dir, &dentry->d_name, 0);
112 if (inode && IS_ERR(inode)) 112 if (inode && IS_ERR(inode))
@@ -166,7 +166,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
166 if (error) 166 if (error)
167 goto out_child; 167 goto out_child;
168 168
169 error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); 169 error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC, 0);
170 if (error) 170 if (error)
171 goto out_gunlock; 171 goto out_gunlock;
172 172
@@ -289,7 +289,7 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
289 if (IS_APPEND(&dip->i_inode)) 289 if (IS_APPEND(&dip->i_inode))
290 return -EPERM; 290 return -EPERM;
291 291
292 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); 292 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0);
293 if (error) 293 if (error)
294 return error; 294 return error;
295 295
@@ -822,7 +822,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
822 } 822 }
823 } 823 }
824 } else { 824 } else {
825 error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC); 825 error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC, 0);
826 if (error) 826 if (error)
827 goto out_gunlock; 827 goto out_gunlock;
828 828
@@ -857,7 +857,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
857 /* Check out the dir to be renamed */ 857 /* Check out the dir to be renamed */
858 858
859 if (dir_rename) { 859 if (dir_rename) {
860 error = gfs2_permission(odentry->d_inode, MAY_WRITE); 860 error = gfs2_permission(odentry->d_inode, MAY_WRITE, 0);
861 if (error) 861 if (error)
862 goto out_gunlock; 862 goto out_gunlock;
863 } 863 }
@@ -1041,13 +1041,17 @@ static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
1041 * Returns: errno 1041 * Returns: errno
1042 */ 1042 */
1043 1043
1044int gfs2_permission(struct inode *inode, int mask) 1044int gfs2_permission(struct inode *inode, int mask, unsigned int flags)
1045{ 1045{
1046 struct gfs2_inode *ip = GFS2_I(inode); 1046 struct gfs2_inode *ip;
1047 struct gfs2_holder i_gh; 1047 struct gfs2_holder i_gh;
1048 int error; 1048 int error;
1049 int unlock = 0; 1049 int unlock = 0;
1050 1050
1051 if (flags & IPERM_FLAG_RCU)
1052 return -ECHILD;
1053
1054 ip = GFS2_I(inode);
1051 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) { 1055 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
1052 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 1056 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
1053 if (error) 1057 if (error)
@@ -1058,7 +1062,7 @@ int gfs2_permission(struct inode *inode, int mask)
1058 if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode)) 1062 if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
1059 error = -EACCES; 1063 error = -EACCES;
1060 else 1064 else
1061 error = generic_permission(inode, mask, gfs2_check_acl); 1065 error = generic_permission(inode, mask, flags, gfs2_check_acl);
1062 if (unlock) 1066 if (unlock)
1063 gfs2_glock_dq_uninit(&i_gh); 1067 gfs2_glock_dq_uninit(&i_gh);
1064 1068
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2b2c4997430b..16c2ecac7eb7 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1405,11 +1405,18 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
1405 return &ip->i_inode; 1405 return &ip->i_inode;
1406} 1406}
1407 1407
1408static void gfs2_destroy_inode(struct inode *inode) 1408static void gfs2_i_callback(struct rcu_head *head)
1409{ 1409{
1410 struct inode *inode = container_of(head, struct inode, i_rcu);
1411 INIT_LIST_HEAD(&inode->i_dentry);
1410 kmem_cache_free(gfs2_inode_cachep, inode); 1412 kmem_cache_free(gfs2_inode_cachep, inode);
1411} 1413}
1412 1414
1415static void gfs2_destroy_inode(struct inode *inode)
1416{
1417 call_rcu(&inode->i_rcu, gfs2_i_callback);
1418}
1419
1413const struct super_operations gfs2_super_ops = { 1420const struct super_operations gfs2_super_ops = {
1414 .alloc_inode = gfs2_alloc_inode, 1421 .alloc_inode = gfs2_alloc_inode,
1415 .destroy_inode = gfs2_destroy_inode, 1422 .destroy_inode = gfs2_destroy_inode,
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 2b3b8611b41b..ea4aefe7c652 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -25,7 +25,7 @@ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry,
25 struct inode *inode = NULL; 25 struct inode *inode = NULL;
26 int res; 26 int res;
27 27
28 dentry->d_op = &hfs_dentry_operations; 28 d_set_d_op(dentry, &hfs_dentry_operations);
29 29
30 hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); 30 hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
31 hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name); 31 hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name);
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index c8cffb81e849..ad97c2d58287 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -213,10 +213,14 @@ extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
213/* string.c */ 213/* string.c */
214extern const struct dentry_operations hfs_dentry_operations; 214extern const struct dentry_operations hfs_dentry_operations;
215 215
216extern int hfs_hash_dentry(struct dentry *, struct qstr *); 216extern int hfs_hash_dentry(const struct dentry *, const struct inode *,
217 struct qstr *);
217extern int hfs_strcmp(const unsigned char *, unsigned int, 218extern int hfs_strcmp(const unsigned char *, unsigned int,
218 const unsigned char *, unsigned int); 219 const unsigned char *, unsigned int);
219extern int hfs_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 220extern int hfs_compare_dentry(const struct dentry *parent,
221 const struct inode *pinode,
222 const struct dentry *dentry, const struct inode *inode,
223 unsigned int len, const char *str, const struct qstr *name);
220 224
221/* trans.c */ 225/* trans.c */
222extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *); 226extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *);
diff --git a/fs/hfs/string.c b/fs/hfs/string.c
index 927a5af79428..495a976a3cc9 100644
--- a/fs/hfs/string.c
+++ b/fs/hfs/string.c
@@ -51,7 +51,8 @@ static unsigned char caseorder[256] = {
51/* 51/*
52 * Hash a string to an integer in a case-independent way 52 * Hash a string to an integer in a case-independent way
53 */ 53 */
54int hfs_hash_dentry(struct dentry *dentry, struct qstr *this) 54int hfs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
55 struct qstr *this)
55{ 56{
56 const unsigned char *name = this->name; 57 const unsigned char *name = this->name;
57 unsigned int hash, len = this->len; 58 unsigned int hash, len = this->len;
@@ -92,21 +93,21 @@ int hfs_strcmp(const unsigned char *s1, unsigned int len1,
92 * Test for equality of two strings in the HFS filename character ordering. 93 * Test for equality of two strings in the HFS filename character ordering.
93 * return 1 on failure and 0 on success 94 * return 1 on failure and 0 on success
94 */ 95 */
95int hfs_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *s2) 96int hfs_compare_dentry(const struct dentry *parent, const struct inode *pinode,
97 const struct dentry *dentry, const struct inode *inode,
98 unsigned int len, const char *str, const struct qstr *name)
96{ 99{
97 const unsigned char *n1, *n2; 100 const unsigned char *n1, *n2;
98 int len;
99 101
100 len = s1->len;
101 if (len >= HFS_NAMELEN) { 102 if (len >= HFS_NAMELEN) {
102 if (s2->len < HFS_NAMELEN) 103 if (name->len < HFS_NAMELEN)
103 return 1; 104 return 1;
104 len = HFS_NAMELEN; 105 len = HFS_NAMELEN;
105 } else if (len != s2->len) 106 } else if (len != name->len)
106 return 1; 107 return 1;
107 108
108 n1 = s1->name; 109 n1 = str;
109 n2 = s2->name; 110 n2 = name->name;
110 while (len--) { 111 while (len--) {
111 if (caseorder[*n1++] != caseorder[*n2++]) 112 if (caseorder[*n1++] != caseorder[*n2++])
112 return 1; 113 return 1;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 4824c27cebb8..0bef62aa4f42 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -167,11 +167,18 @@ static struct inode *hfs_alloc_inode(struct super_block *sb)
167 return i ? &i->vfs_inode : NULL; 167 return i ? &i->vfs_inode : NULL;
168} 168}
169 169
170static void hfs_destroy_inode(struct inode *inode) 170static void hfs_i_callback(struct rcu_head *head)
171{ 171{
172 struct inode *inode = container_of(head, struct inode, i_rcu);
173 INIT_LIST_HEAD(&inode->i_dentry);
172 kmem_cache_free(hfs_inode_cachep, HFS_I(inode)); 174 kmem_cache_free(hfs_inode_cachep, HFS_I(inode));
173} 175}
174 176
177static void hfs_destroy_inode(struct inode *inode)
178{
179 call_rcu(&inode->i_rcu, hfs_i_callback);
180}
181
175static const struct super_operations hfs_super_operations = { 182static const struct super_operations hfs_super_operations = {
176 .alloc_inode = hfs_alloc_inode, 183 .alloc_inode = hfs_alloc_inode,
177 .destroy_inode = hfs_destroy_inode, 184 .destroy_inode = hfs_destroy_inode,
@@ -427,7 +434,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
427 if (!sb->s_root) 434 if (!sb->s_root)
428 goto bail_iput; 435 goto bail_iput;
429 436
430 sb->s_root->d_op = &hfs_dentry_operations; 437 d_set_d_op(sb->s_root, &hfs_dentry_operations);
431 438
432 /* everything's okay */ 439 /* everything's okay */
433 return 0; 440 return 0;
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 7478f5c219aa..19cf291eb91f 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -8,15 +8,20 @@
8 * This file contains the code to do various system dependent things. 8 * This file contains the code to do various system dependent things.
9 */ 9 */
10 10
11#include <linux/namei.h>
11#include "hfs_fs.h" 12#include "hfs_fs.h"
12 13
13/* dentry case-handling: just lowercase everything */ 14/* dentry case-handling: just lowercase everything */
14 15
15static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd) 16static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
16{ 17{
17 struct inode *inode = dentry->d_inode; 18 struct inode *inode;
18 int diff; 19 int diff;
19 20
21 if (nd->flags & LOOKUP_RCU)
22 return -ECHILD;
23
24 inode = dentry->d_inode;
20 if(!inode) 25 if(!inode)
21 return 1; 26 return 1;
22 27
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 9d59c0571f59..ccab87145f7a 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -37,7 +37,7 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
37 37
38 sb = dir->i_sb; 38 sb = dir->i_sb;
39 39
40 dentry->d_op = &hfsplus_dentry_operations; 40 d_set_d_op(dentry, &hfsplus_dentry_operations);
41 dentry->d_fsdata = NULL; 41 dentry->d_fsdata = NULL;
42 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); 42 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); 43 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index cb3653efb57a..a5308f491e3e 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -379,8 +379,12 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *, const struct hfsplus_unist
379int hfsplus_strcmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *); 379int hfsplus_strcmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
380int hfsplus_uni2asc(struct super_block *, const struct hfsplus_unistr *, char *, int *); 380int hfsplus_uni2asc(struct super_block *, const struct hfsplus_unistr *, char *, int *);
381int hfsplus_asc2uni(struct super_block *, struct hfsplus_unistr *, const char *, int); 381int hfsplus_asc2uni(struct super_block *, struct hfsplus_unistr *, const char *, int);
382int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str); 382int hfsplus_hash_dentry(const struct dentry *dentry, const struct inode *inode,
383int hfsplus_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *s2); 383 struct qstr *str);
384int hfsplus_compare_dentry(const struct dentry *parent,
385 const struct inode *pinode,
386 const struct dentry *dentry, const struct inode *inode,
387 unsigned int len, const char *str, const struct qstr *name);
384 388
385/* wrapper.c */ 389/* wrapper.c */
386int hfsplus_read_wrapper(struct super_block *); 390int hfsplus_read_wrapper(struct super_block *);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 52cc746d3ba3..ddf712e4700e 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -419,7 +419,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
419 err = -ENOMEM; 419 err = -ENOMEM;
420 goto cleanup; 420 goto cleanup;
421 } 421 }
422 sb->s_root->d_op = &hfsplus_dentry_operations; 422 d_set_d_op(sb->s_root, &hfsplus_dentry_operations);
423 423
424 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; 424 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
425 str.name = HFSP_HIDDENDIR_NAME; 425 str.name = HFSP_HIDDENDIR_NAME;
@@ -488,11 +488,19 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb)
488 return i ? &i->vfs_inode : NULL; 488 return i ? &i->vfs_inode : NULL;
489} 489}
490 490
491static void hfsplus_destroy_inode(struct inode *inode) 491static void hfsplus_i_callback(struct rcu_head *head)
492{ 492{
493 struct inode *inode = container_of(head, struct inode, i_rcu);
494
495 INIT_LIST_HEAD(&inode->i_dentry);
493 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode)); 496 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
494} 497}
495 498
499static void hfsplus_destroy_inode(struct inode *inode)
500{
501 call_rcu(&inode->i_rcu, hfsplus_i_callback);
502}
503
496#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info) 504#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
497 505
498static struct dentry *hfsplus_mount(struct file_system_type *fs_type, 506static struct dentry *hfsplus_mount(struct file_system_type *fs_type,
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index b66d67de882c..d800aa0f2c80 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -320,7 +320,8 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
320 * Composed unicode characters are decomposed and case-folding is performed 320 * Composed unicode characters are decomposed and case-folding is performed
321 * if the appropriate bits are (un)set on the superblock. 321 * if the appropriate bits are (un)set on the superblock.
322 */ 322 */
323int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str) 323int hfsplus_hash_dentry(const struct dentry *dentry, const struct inode *inode,
324 struct qstr *str)
324{ 325{
325 struct super_block *sb = dentry->d_sb; 326 struct super_block *sb = dentry->d_sb;
326 const char *astr; 327 const char *astr;
@@ -363,9 +364,12 @@ int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str)
363 * Composed unicode characters are decomposed and case-folding is performed 364 * Composed unicode characters are decomposed and case-folding is performed
364 * if the appropriate bits are (un)set on the superblock. 365 * if the appropriate bits are (un)set on the superblock.
365 */ 366 */
366int hfsplus_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *s2) 367int hfsplus_compare_dentry(const struct dentry *parent,
368 const struct inode *pinode,
369 const struct dentry *dentry, const struct inode *inode,
370 unsigned int len, const char *str, const struct qstr *name)
367{ 371{
368 struct super_block *sb = dentry->d_sb; 372 struct super_block *sb = parent->d_sb;
369 int casefold, decompose, size; 373 int casefold, decompose, size;
370 int dsize1, dsize2, len1, len2; 374 int dsize1, dsize2, len1, len2;
371 const u16 *dstr1, *dstr2; 375 const u16 *dstr1, *dstr2;
@@ -375,10 +379,10 @@ int hfsplus_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *
375 379
376 casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); 380 casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
377 decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); 381 decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
378 astr1 = s1->name; 382 astr1 = str;
379 len1 = s1->len; 383 len1 = len;
380 astr2 = s2->name; 384 astr2 = name->name;
381 len2 = s2->len; 385 len2 = name->len;
382 dsize1 = dsize2 = 0; 386 dsize1 = dsize2 = 0;
383 dstr1 = dstr2 = NULL; 387 dstr1 = dstr2 = NULL;
384 388
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2c0f148a49e6..d3244d949a4e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -32,7 +32,7 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
32 32
33#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode) 33#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
34 34
35static int hostfs_d_delete(struct dentry *dentry) 35static int hostfs_d_delete(const struct dentry *dentry)
36{ 36{
37 return 1; 37 return 1;
38} 38}
@@ -92,12 +92,10 @@ __uml_setup("hostfs=", hostfs_args,
92 92
93static char *__dentry_name(struct dentry *dentry, char *name) 93static char *__dentry_name(struct dentry *dentry, char *name)
94{ 94{
95 char *p = __dentry_path(dentry, name, PATH_MAX); 95 char *p = dentry_path_raw(dentry, name, PATH_MAX);
96 char *root; 96 char *root;
97 size_t len; 97 size_t len;
98 98
99 spin_unlock(&dcache_lock);
100
101 root = dentry->d_sb->s_fs_info; 99 root = dentry->d_sb->s_fs_info;
102 len = strlen(root); 100 len = strlen(root);
103 if (IS_ERR(p)) { 101 if (IS_ERR(p)) {
@@ -123,25 +121,23 @@ static char *dentry_name(struct dentry *dentry)
123 if (!name) 121 if (!name)
124 return NULL; 122 return NULL;
125 123
126 spin_lock(&dcache_lock);
127 return __dentry_name(dentry, name); /* will unlock */ 124 return __dentry_name(dentry, name); /* will unlock */
128} 125}
129 126
130static char *inode_name(struct inode *ino) 127static char *inode_name(struct inode *ino)
131{ 128{
132 struct dentry *dentry; 129 struct dentry *dentry;
133 char *name = __getname(); 130 char *name;
134 if (!name)
135 return NULL;
136 131
137 spin_lock(&dcache_lock); 132 dentry = d_find_alias(ino);
138 if (list_empty(&ino->i_dentry)) { 133 if (!dentry)
139 spin_unlock(&dcache_lock);
140 __putname(name);
141 return NULL; 134 return NULL;
142 } 135
143 dentry = list_first_entry(&ino->i_dentry, struct dentry, d_alias); 136 name = dentry_name(dentry);
144 return __dentry_name(dentry, name); /* will unlock */ 137
138 dput(dentry);
139
140 return name;
145} 141}
146 142
147static char *follow_link(char *link) 143static char *follow_link(char *link)
@@ -251,11 +247,18 @@ static void hostfs_evict_inode(struct inode *inode)
251 } 247 }
252} 248}
253 249
254static void hostfs_destroy_inode(struct inode *inode) 250static void hostfs_i_callback(struct rcu_head *head)
255{ 251{
252 struct inode *inode = container_of(head, struct inode, i_rcu);
253 INIT_LIST_HEAD(&inode->i_dentry);
256 kfree(HOSTFS_I(inode)); 254 kfree(HOSTFS_I(inode));
257} 255}
258 256
257static void hostfs_destroy_inode(struct inode *inode)
258{
259 call_rcu(&inode->i_rcu, hostfs_i_callback);
260}
261
259static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs) 262static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
260{ 263{
261 const char *root_path = vfs->mnt_sb->s_fs_info; 264 const char *root_path = vfs->mnt_sb->s_fs_info;
@@ -609,7 +612,7 @@ struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
609 goto out_put; 612 goto out_put;
610 613
611 d_add(dentry, inode); 614 d_add(dentry, inode);
612 dentry->d_op = &hostfs_dentry_ops; 615 d_set_d_op(dentry, &hostfs_dentry_ops);
613 return NULL; 616 return NULL;
614 617
615 out_put: 618 out_put:
@@ -746,11 +749,14 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from,
746 return err; 749 return err;
747} 750}
748 751
749int hostfs_permission(struct inode *ino, int desired) 752int hostfs_permission(struct inode *ino, int desired, unsigned int flags)
750{ 753{
751 char *name; 754 char *name;
752 int r = 0, w = 0, x = 0, err; 755 int r = 0, w = 0, x = 0, err;
753 756
757 if (flags & IPERM_FLAG_RCU)
758 return -ECHILD;
759
754 if (desired & MAY_READ) r = 1; 760 if (desired & MAY_READ) r = 1;
755 if (desired & MAY_WRITE) w = 1; 761 if (desired & MAY_WRITE) w = 1;
756 if (desired & MAY_EXEC) x = 1; 762 if (desired & MAY_EXEC) x = 1;
@@ -765,7 +771,7 @@ int hostfs_permission(struct inode *ino, int desired)
765 err = access_file(name, r, w, x); 771 err = access_file(name, r, w, x);
766 __putname(name); 772 __putname(name);
767 if (!err) 773 if (!err)
768 err = generic_permission(ino, desired, NULL); 774 err = generic_permission(ino, desired, flags, NULL);
769 return err; 775 return err;
770} 776}
771 777
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 67d9d36b3d5f..32c13a94e1e9 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -12,7 +12,8 @@
12 * Note: the dentry argument is the parent dentry. 12 * Note: the dentry argument is the parent dentry.
13 */ 13 */
14 14
15static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr) 15static int hpfs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
16 struct qstr *qstr)
16{ 17{
17 unsigned long hash; 18 unsigned long hash;
18 int i; 19 int i;
@@ -34,19 +35,25 @@ static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
34 return 0; 35 return 0;
35} 36}
36 37
37static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b) 38static int hpfs_compare_dentry(const struct dentry *parent,
39 const struct inode *pinode,
40 const struct dentry *dentry, const struct inode *inode,
41 unsigned int len, const char *str, const struct qstr *name)
38{ 42{
39 unsigned al=a->len; 43 unsigned al = len;
40 unsigned bl=b->len; 44 unsigned bl = name->len;
41 hpfs_adjust_length(a->name, &al); 45
46 hpfs_adjust_length(str, &al);
42 /*hpfs_adjust_length(b->name, &bl);*/ 47 /*hpfs_adjust_length(b->name, &bl);*/
43 /* 'a' is the qstr of an already existing dentry, so the name 48
44 * must be valid. 'b' must be validated first. 49 /*
50 * 'str' is the nane of an already existing dentry, so the name
51 * must be valid. 'name' must be validated first.
45 */ 52 */
46 53
47 if (hpfs_chk_name(b->name, &bl)) 54 if (hpfs_chk_name(name->name, &bl))
48 return 1; 55 return 1;
49 if (hpfs_compare_names(dentry->d_sb, a->name, al, b->name, bl, 0)) 56 if (hpfs_compare_names(parent->d_sb, str, al, name->name, bl, 0))
50 return 1; 57 return 1;
51 return 0; 58 return 0;
52} 59}
@@ -58,5 +65,5 @@ static const struct dentry_operations hpfs_dentry_operations = {
58 65
59void hpfs_set_dentry_operations(struct dentry *dentry) 66void hpfs_set_dentry_operations(struct dentry *dentry)
60{ 67{
61 dentry->d_op = &hpfs_dentry_operations; 68 d_set_d_op(dentry, &hpfs_dentry_operations);
62} 69}
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 11c2b4080f65..f4ad9e31ddc4 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -419,7 +419,7 @@ again:
419 unlock_kernel(); 419 unlock_kernel();
420 return -ENOSPC; 420 return -ENOSPC;
421 } 421 }
422 if (generic_permission(inode, MAY_WRITE, NULL) || 422 if (generic_permission(inode, MAY_WRITE, 0, NULL) ||
423 !S_ISREG(inode->i_mode) || 423 !S_ISREG(inode->i_mode) ||
424 get_write_access(inode)) { 424 get_write_access(inode)) {
425 d_rehash(dentry); 425 d_rehash(dentry);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 6c5f01597c3a..49935ba78db8 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -177,11 +177,18 @@ static struct inode *hpfs_alloc_inode(struct super_block *sb)
177 return &ei->vfs_inode; 177 return &ei->vfs_inode;
178} 178}
179 179
180static void hpfs_destroy_inode(struct inode *inode) 180static void hpfs_i_callback(struct rcu_head *head)
181{ 181{
182 struct inode *inode = container_of(head, struct inode, i_rcu);
183 INIT_LIST_HEAD(&inode->i_dentry);
182 kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); 184 kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
183} 185}
184 186
187static void hpfs_destroy_inode(struct inode *inode)
188{
189 call_rcu(&inode->i_rcu, hpfs_i_callback);
190}
191
185static void init_once(void *foo) 192static void init_once(void *foo)
186{ 193{
187 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 194 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index f702b5f713fc..87ed48e0343d 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -632,11 +632,18 @@ void hppfs_evict_inode(struct inode *ino)
632 mntput(ino->i_sb->s_fs_info); 632 mntput(ino->i_sb->s_fs_info);
633} 633}
634 634
635static void hppfs_destroy_inode(struct inode *inode) 635static void hppfs_i_callback(struct rcu_head *head)
636{ 636{
637 struct inode *inode = container_of(head, struct inode, i_rcu);
638 INIT_LIST_HEAD(&inode->i_dentry);
637 kfree(HPPFS_I(inode)); 639 kfree(HPPFS_I(inode));
638} 640}
639 641
642static void hppfs_destroy_inode(struct inode *inode)
643{
644 call_rcu(&inode->i_rcu, hppfs_i_callback);
645}
646
640static const struct super_operations hppfs_sbops = { 647static const struct super_operations hppfs_sbops = {
641 .alloc_inode = hppfs_alloc_inode, 648 .alloc_inode = hppfs_alloc_inode,
642 .destroy_inode = hppfs_destroy_inode, 649 .destroy_inode = hppfs_destroy_inode,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a5fe68189eed..9885082b470f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -663,11 +663,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
663 return &p->vfs_inode; 663 return &p->vfs_inode;
664} 664}
665 665
666static void hugetlbfs_i_callback(struct rcu_head *head)
667{
668 struct inode *inode = container_of(head, struct inode, i_rcu);
669 INIT_LIST_HEAD(&inode->i_dentry);
670 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
671}
672
666static void hugetlbfs_destroy_inode(struct inode *inode) 673static void hugetlbfs_destroy_inode(struct inode *inode)
667{ 674{
668 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 675 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
669 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 676 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
670 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 677 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
671} 678}
672 679
673static const struct address_space_operations hugetlbfs_aops = { 680static const struct address_space_operations hugetlbfs_aops = {
diff --git a/fs/inode.c b/fs/inode.c
index ae2727ab0c3a..da85e56378f3 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -102,26 +102,29 @@ static DECLARE_RWSEM(iprune_sem);
102 */ 102 */
103struct inodes_stat_t inodes_stat; 103struct inodes_stat_t inodes_stat;
104 104
105static struct percpu_counter nr_inodes __cacheline_aligned_in_smp; 105static DEFINE_PER_CPU(unsigned int, nr_inodes);
106static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp;
107 106
108static struct kmem_cache *inode_cachep __read_mostly; 107static struct kmem_cache *inode_cachep __read_mostly;
109 108
110static inline int get_nr_inodes(void) 109static int get_nr_inodes(void)
111{ 110{
112 return percpu_counter_sum_positive(&nr_inodes); 111 int i;
112 int sum = 0;
113 for_each_possible_cpu(i)
114 sum += per_cpu(nr_inodes, i);
115 return sum < 0 ? 0 : sum;
113} 116}
114 117
115static inline int get_nr_inodes_unused(void) 118static inline int get_nr_inodes_unused(void)
116{ 119{
117 return percpu_counter_sum_positive(&nr_inodes_unused); 120 return inodes_stat.nr_unused;
118} 121}
119 122
120int get_nr_dirty_inodes(void) 123int get_nr_dirty_inodes(void)
121{ 124{
125 /* not actually dirty inodes, but a wild approximation */
122 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 126 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
123 return nr_dirty > 0 ? nr_dirty : 0; 127 return nr_dirty > 0 ? nr_dirty : 0;
124
125} 128}
126 129
127/* 130/*
@@ -132,7 +135,6 @@ int proc_nr_inodes(ctl_table *table, int write,
132 void __user *buffer, size_t *lenp, loff_t *ppos) 135 void __user *buffer, size_t *lenp, loff_t *ppos)
133{ 136{
134 inodes_stat.nr_inodes = get_nr_inodes(); 137 inodes_stat.nr_inodes = get_nr_inodes();
135 inodes_stat.nr_unused = get_nr_inodes_unused();
136 return proc_dointvec(table, write, buffer, lenp, ppos); 138 return proc_dointvec(table, write, buffer, lenp, ppos);
137} 139}
138#endif 140#endif
@@ -224,7 +226,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
224 inode->i_fsnotify_mask = 0; 226 inode->i_fsnotify_mask = 0;
225#endif 227#endif
226 228
227 percpu_counter_inc(&nr_inodes); 229 this_cpu_inc(nr_inodes);
228 230
229 return 0; 231 return 0;
230out: 232out:
@@ -255,6 +257,12 @@ static struct inode *alloc_inode(struct super_block *sb)
255 return inode; 257 return inode;
256} 258}
257 259
260void free_inode_nonrcu(struct inode *inode)
261{
262 kmem_cache_free(inode_cachep, inode);
263}
264EXPORT_SYMBOL(free_inode_nonrcu);
265
258void __destroy_inode(struct inode *inode) 266void __destroy_inode(struct inode *inode)
259{ 267{
260 BUG_ON(inode_has_buffers(inode)); 268 BUG_ON(inode_has_buffers(inode));
@@ -266,10 +274,17 @@ void __destroy_inode(struct inode *inode)
266 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 274 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
267 posix_acl_release(inode->i_default_acl); 275 posix_acl_release(inode->i_default_acl);
268#endif 276#endif
269 percpu_counter_dec(&nr_inodes); 277 this_cpu_dec(nr_inodes);
270} 278}
271EXPORT_SYMBOL(__destroy_inode); 279EXPORT_SYMBOL(__destroy_inode);
272 280
281static void i_callback(struct rcu_head *head)
282{
283 struct inode *inode = container_of(head, struct inode, i_rcu);
284 INIT_LIST_HEAD(&inode->i_dentry);
285 kmem_cache_free(inode_cachep, inode);
286}
287
273static void destroy_inode(struct inode *inode) 288static void destroy_inode(struct inode *inode)
274{ 289{
275 BUG_ON(!list_empty(&inode->i_lru)); 290 BUG_ON(!list_empty(&inode->i_lru));
@@ -277,7 +292,7 @@ static void destroy_inode(struct inode *inode)
277 if (inode->i_sb->s_op->destroy_inode) 292 if (inode->i_sb->s_op->destroy_inode)
278 inode->i_sb->s_op->destroy_inode(inode); 293 inode->i_sb->s_op->destroy_inode(inode);
279 else 294 else
280 kmem_cache_free(inode_cachep, (inode)); 295 call_rcu(&inode->i_rcu, i_callback);
281} 296}
282 297
283/* 298/*
@@ -335,7 +350,7 @@ static void inode_lru_list_add(struct inode *inode)
335{ 350{
336 if (list_empty(&inode->i_lru)) { 351 if (list_empty(&inode->i_lru)) {
337 list_add(&inode->i_lru, &inode_lru); 352 list_add(&inode->i_lru, &inode_lru);
338 percpu_counter_inc(&nr_inodes_unused); 353 inodes_stat.nr_unused++;
339 } 354 }
340} 355}
341 356
@@ -343,7 +358,7 @@ static void inode_lru_list_del(struct inode *inode)
343{ 358{
344 if (!list_empty(&inode->i_lru)) { 359 if (!list_empty(&inode->i_lru)) {
345 list_del_init(&inode->i_lru); 360 list_del_init(&inode->i_lru);
346 percpu_counter_dec(&nr_inodes_unused); 361 inodes_stat.nr_unused--;
347 } 362 }
348} 363}
349 364
@@ -430,6 +445,7 @@ void end_writeback(struct inode *inode)
430 BUG_ON(!(inode->i_state & I_FREEING)); 445 BUG_ON(!(inode->i_state & I_FREEING));
431 BUG_ON(inode->i_state & I_CLEAR); 446 BUG_ON(inode->i_state & I_CLEAR);
432 inode_sync_wait(inode); 447 inode_sync_wait(inode);
448 /* don't need i_lock here, no concurrent mods to i_state */
433 inode->i_state = I_FREEING | I_CLEAR; 449 inode->i_state = I_FREEING | I_CLEAR;
434} 450}
435EXPORT_SYMBOL(end_writeback); 451EXPORT_SYMBOL(end_writeback);
@@ -513,7 +529,7 @@ void evict_inodes(struct super_block *sb)
513 list_move(&inode->i_lru, &dispose); 529 list_move(&inode->i_lru, &dispose);
514 list_del_init(&inode->i_wb_list); 530 list_del_init(&inode->i_wb_list);
515 if (!(inode->i_state & (I_DIRTY | I_SYNC))) 531 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
516 percpu_counter_dec(&nr_inodes_unused); 532 inodes_stat.nr_unused--;
517 } 533 }
518 spin_unlock(&inode_lock); 534 spin_unlock(&inode_lock);
519 535
@@ -554,7 +570,7 @@ int invalidate_inodes(struct super_block *sb)
554 list_move(&inode->i_lru, &dispose); 570 list_move(&inode->i_lru, &dispose);
555 list_del_init(&inode->i_wb_list); 571 list_del_init(&inode->i_wb_list);
556 if (!(inode->i_state & (I_DIRTY | I_SYNC))) 572 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
557 percpu_counter_dec(&nr_inodes_unused); 573 inodes_stat.nr_unused--;
558 } 574 }
559 spin_unlock(&inode_lock); 575 spin_unlock(&inode_lock);
560 576
@@ -616,7 +632,7 @@ static void prune_icache(int nr_to_scan)
616 if (atomic_read(&inode->i_count) || 632 if (atomic_read(&inode->i_count) ||
617 (inode->i_state & ~I_REFERENCED)) { 633 (inode->i_state & ~I_REFERENCED)) {
618 list_del_init(&inode->i_lru); 634 list_del_init(&inode->i_lru);
619 percpu_counter_dec(&nr_inodes_unused); 635 inodes_stat.nr_unused--;
620 continue; 636 continue;
621 } 637 }
622 638
@@ -650,7 +666,7 @@ static void prune_icache(int nr_to_scan)
650 */ 666 */
651 list_move(&inode->i_lru, &freeable); 667 list_move(&inode->i_lru, &freeable);
652 list_del_init(&inode->i_wb_list); 668 list_del_init(&inode->i_wb_list);
653 percpu_counter_dec(&nr_inodes_unused); 669 inodes_stat.nr_unused--;
654 } 670 }
655 if (current_is_kswapd()) 671 if (current_is_kswapd())
656 __count_vm_events(KSWAPD_INODESTEAL, reap); 672 __count_vm_events(KSWAPD_INODESTEAL, reap);
@@ -1648,8 +1664,6 @@ void __init inode_init(void)
1648 SLAB_MEM_SPREAD), 1664 SLAB_MEM_SPREAD),
1649 init_once); 1665 init_once);
1650 register_shrinker(&icache_shrinker); 1666 register_shrinker(&icache_shrinker);
1651 percpu_counter_init(&nr_inodes, 0);
1652 percpu_counter_init(&nr_inodes_unused, 0);
1653 1667
1654 /* Hash may have been set up in inode_init_early */ 1668 /* Hash may have been set up in inode_init_early */
1655 if (!hashdist) 1669 if (!hashdist)
diff --git a/fs/internal.h b/fs/internal.h
index e43b9a4dbf4e..9687c2ee2735 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -63,6 +63,7 @@ extern int copy_mount_string(const void __user *, char **);
63 63
64extern void free_vfsmnt(struct vfsmount *); 64extern void free_vfsmnt(struct vfsmount *);
65extern struct vfsmount *alloc_vfsmnt(const char *); 65extern struct vfsmount *alloc_vfsmnt(const char *);
66extern unsigned int mnt_get_count(struct vfsmount *mnt);
66extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int); 67extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
67extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *, 68extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
68 struct vfsmount *); 69 struct vfsmount *);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index bfdeb82a53be..844a7903c72f 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -26,16 +26,32 @@
26 26
27#define BEQUIET 27#define BEQUIET
28 28
29static int isofs_hashi(struct dentry *parent, struct qstr *qstr); 29static int isofs_hashi(const struct dentry *parent, const struct inode *inode,
30static int isofs_hash(struct dentry *parent, struct qstr *qstr); 30 struct qstr *qstr);
31static int isofs_dentry_cmpi(struct dentry *dentry, struct qstr *a, struct qstr *b); 31static int isofs_hash(const struct dentry *parent, const struct inode *inode,
32static int isofs_dentry_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b); 32 struct qstr *qstr);
33static int isofs_dentry_cmpi(const struct dentry *parent,
34 const struct inode *pinode,
35 const struct dentry *dentry, const struct inode *inode,
36 unsigned int len, const char *str, const struct qstr *name);
37static int isofs_dentry_cmp(const struct dentry *parent,
38 const struct inode *pinode,
39 const struct dentry *dentry, const struct inode *inode,
40 unsigned int len, const char *str, const struct qstr *name);
33 41
34#ifdef CONFIG_JOLIET 42#ifdef CONFIG_JOLIET
35static int isofs_hashi_ms(struct dentry *parent, struct qstr *qstr); 43static int isofs_hashi_ms(const struct dentry *parent, const struct inode *inode,
36static int isofs_hash_ms(struct dentry *parent, struct qstr *qstr); 44 struct qstr *qstr);
37static int isofs_dentry_cmpi_ms(struct dentry *dentry, struct qstr *a, struct qstr *b); 45static int isofs_hash_ms(const struct dentry *parent, const struct inode *inode,
38static int isofs_dentry_cmp_ms(struct dentry *dentry, struct qstr *a, struct qstr *b); 46 struct qstr *qstr);
47static int isofs_dentry_cmpi_ms(const struct dentry *parent,
48 const struct inode *pinode,
49 const struct dentry *dentry, const struct inode *inode,
50 unsigned int len, const char *str, const struct qstr *name);
51static int isofs_dentry_cmp_ms(const struct dentry *parent,
52 const struct inode *pinode,
53 const struct dentry *dentry, const struct inode *inode,
54 unsigned int len, const char *str, const struct qstr *name);
39#endif 55#endif
40 56
41static void isofs_put_super(struct super_block *sb) 57static void isofs_put_super(struct super_block *sb)
@@ -65,11 +81,18 @@ static struct inode *isofs_alloc_inode(struct super_block *sb)
65 return &ei->vfs_inode; 81 return &ei->vfs_inode;
66} 82}
67 83
68static void isofs_destroy_inode(struct inode *inode) 84static void isofs_i_callback(struct rcu_head *head)
69{ 85{
86 struct inode *inode = container_of(head, struct inode, i_rcu);
87 INIT_LIST_HEAD(&inode->i_dentry);
70 kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode)); 88 kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
71} 89}
72 90
91static void isofs_destroy_inode(struct inode *inode)
92{
93 call_rcu(&inode->i_rcu, isofs_i_callback);
94}
95
73static void init_once(void *foo) 96static void init_once(void *foo)
74{ 97{
75 struct iso_inode_info *ei = foo; 98 struct iso_inode_info *ei = foo;
@@ -160,7 +183,7 @@ struct iso9660_options{
160 * Compute the hash for the isofs name corresponding to the dentry. 183 * Compute the hash for the isofs name corresponding to the dentry.
161 */ 184 */
162static int 185static int
163isofs_hash_common(struct dentry *dentry, struct qstr *qstr, int ms) 186isofs_hash_common(const struct dentry *dentry, struct qstr *qstr, int ms)
164{ 187{
165 const char *name; 188 const char *name;
166 int len; 189 int len;
@@ -181,7 +204,7 @@ isofs_hash_common(struct dentry *dentry, struct qstr *qstr, int ms)
181 * Compute the hash for the isofs name corresponding to the dentry. 204 * Compute the hash for the isofs name corresponding to the dentry.
182 */ 205 */
183static int 206static int
184isofs_hashi_common(struct dentry *dentry, struct qstr *qstr, int ms) 207isofs_hashi_common(const struct dentry *dentry, struct qstr *qstr, int ms)
185{ 208{
186 const char *name; 209 const char *name;
187 int len; 210 int len;
@@ -206,100 +229,94 @@ isofs_hashi_common(struct dentry *dentry, struct qstr *qstr, int ms)
206} 229}
207 230
208/* 231/*
209 * Case insensitive compare of two isofs names. 232 * Compare of two isofs names.
210 */
211static int isofs_dentry_cmpi_common(struct dentry *dentry, struct qstr *a,
212 struct qstr *b, int ms)
213{
214 int alen, blen;
215
216 /* A filename cannot end in '.' or we treat it like it has none */
217 alen = a->len;
218 blen = b->len;
219 if (ms) {
220 while (alen && a->name[alen-1] == '.')
221 alen--;
222 while (blen && b->name[blen-1] == '.')
223 blen--;
224 }
225 if (alen == blen) {
226 if (strnicmp(a->name, b->name, alen) == 0)
227 return 0;
228 }
229 return 1;
230}
231
232/*
233 * Case sensitive compare of two isofs names.
234 */ 233 */
235static int isofs_dentry_cmp_common(struct dentry *dentry, struct qstr *a, 234static int isofs_dentry_cmp_common(
236 struct qstr *b, int ms) 235 unsigned int len, const char *str,
236 const struct qstr *name, int ms, int ci)
237{ 237{
238 int alen, blen; 238 int alen, blen;
239 239
240 /* A filename cannot end in '.' or we treat it like it has none */ 240 /* A filename cannot end in '.' or we treat it like it has none */
241 alen = a->len; 241 alen = name->len;
242 blen = b->len; 242 blen = len;
243 if (ms) { 243 if (ms) {
244 while (alen && a->name[alen-1] == '.') 244 while (alen && name->name[alen-1] == '.')
245 alen--; 245 alen--;
246 while (blen && b->name[blen-1] == '.') 246 while (blen && str[blen-1] == '.')
247 blen--; 247 blen--;
248 } 248 }
249 if (alen == blen) { 249 if (alen == blen) {
250 if (strncmp(a->name, b->name, alen) == 0) 250 if (ci) {
251 return 0; 251 if (strnicmp(name->name, str, alen) == 0)
252 return 0;
253 } else {
254 if (strncmp(name->name, str, alen) == 0)
255 return 0;
256 }
252 } 257 }
253 return 1; 258 return 1;
254} 259}
255 260
256static int 261static int
257isofs_hash(struct dentry *dentry, struct qstr *qstr) 262isofs_hash(const struct dentry *dentry, const struct inode *inode,
263 struct qstr *qstr)
258{ 264{
259 return isofs_hash_common(dentry, qstr, 0); 265 return isofs_hash_common(dentry, qstr, 0);
260} 266}
261 267
262static int 268static int
263isofs_hashi(struct dentry *dentry, struct qstr *qstr) 269isofs_hashi(const struct dentry *dentry, const struct inode *inode,
270 struct qstr *qstr)
264{ 271{
265 return isofs_hashi_common(dentry, qstr, 0); 272 return isofs_hashi_common(dentry, qstr, 0);
266} 273}
267 274
268static int 275static int
269isofs_dentry_cmp(struct dentry *dentry,struct qstr *a,struct qstr *b) 276isofs_dentry_cmp(const struct dentry *parent, const struct inode *pinode,
277 const struct dentry *dentry, const struct inode *inode,
278 unsigned int len, const char *str, const struct qstr *name)
270{ 279{
271 return isofs_dentry_cmp_common(dentry, a, b, 0); 280 return isofs_dentry_cmp_common(len, str, name, 0, 0);
272} 281}
273 282
274static int 283static int
275isofs_dentry_cmpi(struct dentry *dentry,struct qstr *a,struct qstr *b) 284isofs_dentry_cmpi(const struct dentry *parent, const struct inode *pinode,
285 const struct dentry *dentry, const struct inode *inode,
286 unsigned int len, const char *str, const struct qstr *name)
276{ 287{
277 return isofs_dentry_cmpi_common(dentry, a, b, 0); 288 return isofs_dentry_cmp_common(len, str, name, 0, 1);
278} 289}
279 290
280#ifdef CONFIG_JOLIET 291#ifdef CONFIG_JOLIET
281static int 292static int
282isofs_hash_ms(struct dentry *dentry, struct qstr *qstr) 293isofs_hash_ms(const struct dentry *dentry, const struct inode *inode,
294 struct qstr *qstr)
283{ 295{
284 return isofs_hash_common(dentry, qstr, 1); 296 return isofs_hash_common(dentry, qstr, 1);
285} 297}
286 298
287static int 299static int
288isofs_hashi_ms(struct dentry *dentry, struct qstr *qstr) 300isofs_hashi_ms(const struct dentry *dentry, const struct inode *inode,
301 struct qstr *qstr)
289{ 302{
290 return isofs_hashi_common(dentry, qstr, 1); 303 return isofs_hashi_common(dentry, qstr, 1);
291} 304}
292 305
293static int 306static int
294isofs_dentry_cmp_ms(struct dentry *dentry,struct qstr *a,struct qstr *b) 307isofs_dentry_cmp_ms(const struct dentry *parent, const struct inode *pinode,
308 const struct dentry *dentry, const struct inode *inode,
309 unsigned int len, const char *str, const struct qstr *name)
295{ 310{
296 return isofs_dentry_cmp_common(dentry, a, b, 1); 311 return isofs_dentry_cmp_common(len, str, name, 1, 0);
297} 312}
298 313
299static int 314static int
300isofs_dentry_cmpi_ms(struct dentry *dentry,struct qstr *a,struct qstr *b) 315isofs_dentry_cmpi_ms(const struct dentry *parent, const struct inode *pinode,
316 const struct dentry *dentry, const struct inode *inode,
317 unsigned int len, const char *str, const struct qstr *name)
301{ 318{
302 return isofs_dentry_cmpi_common(dentry, a, b, 1); 319 return isofs_dentry_cmp_common(len, str, name, 1, 1);
303} 320}
304#endif 321#endif
305 322
@@ -932,7 +949,7 @@ root_found:
932 table += 2; 949 table += 2;
933 if (opt.check == 'r') 950 if (opt.check == 'r')
934 table++; 951 table++;
935 s->s_root->d_op = &isofs_dentry_ops[table]; 952 d_set_d_op(s->s_root, &isofs_dentry_ops[table]);
936 953
937 kfree(opt.iocharset); 954 kfree(opt.iocharset);
938 955
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 0d23abfd4280..679a849c3b27 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -37,7 +37,8 @@ isofs_cmp(struct dentry *dentry, const char *compare, int dlen)
37 37
38 qstr.name = compare; 38 qstr.name = compare;
39 qstr.len = dlen; 39 qstr.len = dlen;
40 return dentry->d_op->d_compare(dentry, &dentry->d_name, &qstr); 40 return dentry->d_op->d_compare(NULL, NULL, NULL, NULL,
41 dentry->d_name.len, dentry->d_name.name, &qstr);
41} 42}
42 43
43/* 44/*
@@ -171,7 +172,7 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam
171 struct inode *inode; 172 struct inode *inode;
172 struct page *page; 173 struct page *page;
173 174
174 dentry->d_op = dir->i_sb->s_root->d_op; 175 d_set_d_op(dentry, dir->i_sb->s_root->d_op);
175 176
176 page = alloc_page(GFP_USER); 177 page = alloc_page(GFP_USER);
177 if (!page) 178 if (!page)
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 54a92fd02bbd..95b79672150a 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -259,11 +259,14 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
259 return rc; 259 return rc;
260} 260}
261 261
262int jffs2_check_acl(struct inode *inode, int mask) 262int jffs2_check_acl(struct inode *inode, int mask, unsigned int flags)
263{ 263{
264 struct posix_acl *acl; 264 struct posix_acl *acl;
265 int rc; 265 int rc;
266 266
267 if (flags & IPERM_FLAG_RCU)
268 return -ECHILD;
269
267 acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS); 270 acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS);
268 if (IS_ERR(acl)) 271 if (IS_ERR(acl))
269 return PTR_ERR(acl); 272 return PTR_ERR(acl);
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 5e42de8d9541..3119f59253d3 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -26,7 +26,7 @@ struct jffs2_acl_header {
26 26
27#ifdef CONFIG_JFFS2_FS_POSIX_ACL 27#ifdef CONFIG_JFFS2_FS_POSIX_ACL
28 28
29extern int jffs2_check_acl(struct inode *, int); 29extern int jffs2_check_acl(struct inode *, int, unsigned int);
30extern int jffs2_acl_chmod(struct inode *); 30extern int jffs2_acl_chmod(struct inode *);
31extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); 31extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
32extern int jffs2_init_acl_post(struct inode *); 32extern int jffs2_init_acl_post(struct inode *);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index c86041b866a4..853b8e300084 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -40,11 +40,18 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
40 return &f->vfs_inode; 40 return &f->vfs_inode;
41} 41}
42 42
43static void jffs2_destroy_inode(struct inode *inode) 43static void jffs2_i_callback(struct rcu_head *head)
44{ 44{
45 struct inode *inode = container_of(head, struct inode, i_rcu);
46 INIT_LIST_HEAD(&inode->i_dentry);
45 kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); 47 kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
46} 48}
47 49
50static void jffs2_destroy_inode(struct inode *inode)
51{
52 call_rcu(&inode->i_rcu, jffs2_i_callback);
53}
54
48static void jffs2_i_init_once(void *foo) 55static void jffs2_i_init_once(void *foo)
49{ 56{
50 struct jffs2_inode_info *f = foo; 57 struct jffs2_inode_info *f = foo;
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 1057a4998e4e..e5de9422fa32 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -114,10 +114,14 @@ out:
114 return rc; 114 return rc;
115} 115}
116 116
117int jfs_check_acl(struct inode *inode, int mask) 117int jfs_check_acl(struct inode *inode, int mask, unsigned int flags)
118{ 118{
119 struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); 119 struct posix_acl *acl;
120
121 if (flags & IPERM_FLAG_RCU)
122 return -ECHILD;
120 123
124 acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
121 if (IS_ERR(acl)) 125 if (IS_ERR(acl))
122 return PTR_ERR(acl); 126 return PTR_ERR(acl);
123 if (acl) { 127 if (acl) {
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h
index 54e07559878d..f9285c4900fa 100644
--- a/fs/jfs/jfs_acl.h
+++ b/fs/jfs/jfs_acl.h
@@ -20,7 +20,7 @@
20 20
21#ifdef CONFIG_JFS_POSIX_ACL 21#ifdef CONFIG_JFS_POSIX_ACL
22 22
23int jfs_check_acl(struct inode *, int); 23int jfs_check_acl(struct inode *, int, unsigned int flags);
24int jfs_init_acl(tid_t, struct inode *, struct inode *); 24int jfs_init_acl(tid_t, struct inode *, struct inode *);
25int jfs_acl_chmod(struct inode *inode); 25int jfs_acl_chmod(struct inode *inode);
26 26
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 231ca4af9bce..4414e3a42264 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/namei.h>
21#include <linux/ctype.h> 22#include <linux/ctype.h>
22#include <linux/quotaops.h> 23#include <linux/quotaops.h>
23#include <linux/exportfs.h> 24#include <linux/exportfs.h>
@@ -1465,7 +1466,7 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
1465 jfs_info("jfs_lookup: name = %s", name); 1466 jfs_info("jfs_lookup: name = %s", name);
1466 1467
1467 if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2) 1468 if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
1468 dentry->d_op = &jfs_ci_dentry_operations; 1469 d_set_d_op(dentry, &jfs_ci_dentry_operations);
1469 1470
1470 if ((name[0] == '.') && (len == 1)) 1471 if ((name[0] == '.') && (len == 1))
1471 inum = dip->i_ino; 1472 inum = dip->i_ino;
@@ -1494,7 +1495,7 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
1494 dentry = d_splice_alias(ip, dentry); 1495 dentry = d_splice_alias(ip, dentry);
1495 1496
1496 if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)) 1497 if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
1497 dentry->d_op = &jfs_ci_dentry_operations; 1498 d_set_d_op(dentry, &jfs_ci_dentry_operations);
1498 1499
1499 return dentry; 1500 return dentry;
1500} 1501}
@@ -1573,7 +1574,8 @@ const struct file_operations jfs_dir_operations = {
1573 .llseek = generic_file_llseek, 1574 .llseek = generic_file_llseek,
1574}; 1575};
1575 1576
1576static int jfs_ci_hash(struct dentry *dir, struct qstr *this) 1577static int jfs_ci_hash(const struct dentry *dir, const struct inode *inode,
1578 struct qstr *this)
1577{ 1579{
1578 unsigned long hash; 1580 unsigned long hash;
1579 int i; 1581 int i;
@@ -1586,32 +1588,63 @@ static int jfs_ci_hash(struct dentry *dir, struct qstr *this)
1586 return 0; 1588 return 0;
1587} 1589}
1588 1590
1589static int jfs_ci_compare(struct dentry *dir, struct qstr *a, struct qstr *b) 1591static int jfs_ci_compare(const struct dentry *parent,
1592 const struct inode *pinode,
1593 const struct dentry *dentry, const struct inode *inode,
1594 unsigned int len, const char *str, const struct qstr *name)
1590{ 1595{
1591 int i, result = 1; 1596 int i, result = 1;
1592 1597
1593 if (a->len != b->len) 1598 if (len != name->len)
1594 goto out; 1599 goto out;
1595 for (i=0; i < a->len; i++) { 1600 for (i=0; i < len; i++) {
1596 if (tolower(a->name[i]) != tolower(b->name[i])) 1601 if (tolower(str[i]) != tolower(name->name[i]))
1597 goto out; 1602 goto out;
1598 } 1603 }
1599 result = 0; 1604 result = 0;
1605out:
1606 return result;
1607}
1600 1608
1609static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
1610{
1611 if (nd->flags & LOOKUP_RCU)
1612 return -ECHILD;
1601 /* 1613 /*
1602 * We want creates to preserve case. A negative dentry, a, that 1614 * This is not negative dentry. Always valid.
1603 * has a different case than b may cause a new entry to be created 1615 *
1604 * with the wrong case. Since we can't tell if a comes from a negative 1616 * Note, rename() to existing directory entry will have ->d_inode,
1605 * dentry, we blindly replace it with b. This should be harmless if 1617 * and will use existing name which isn't specified name by user.
1606 * a is not a negative dentry. 1618 *
1619 * We may be able to drop this positive dentry here. But dropping
1620 * positive dentry isn't good idea. So it's unsupported like
1621 * rename("filename", "FILENAME") for now.
1607 */ 1622 */
1608 memcpy((unsigned char *)a->name, b->name, a->len); 1623 if (dentry->d_inode)
1609out: 1624 return 1;
1610 return result; 1625
1626 /*
1627 * This may be nfsd (or something), anyway, we can't see the
1628 * intent of this. So, since this can be for creation, drop it.
1629 */
1630 if (!nd)
1631 return 0;
1632
1633 /*
1634 * Drop the negative dentry, in order to make sure to use the
1635 * case sensitive name which is specified by user if this is
1636 * for creation.
1637 */
1638 if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) {
1639 if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
1640 return 0;
1641 }
1642 return 1;
1611} 1643}
1612 1644
1613const struct dentry_operations jfs_ci_dentry_operations = 1645const struct dentry_operations jfs_ci_dentry_operations =
1614{ 1646{
1615 .d_hash = jfs_ci_hash, 1647 .d_hash = jfs_ci_hash,
1616 .d_compare = jfs_ci_compare, 1648 .d_compare = jfs_ci_compare,
1649 .d_revalidate = jfs_ci_revalidate,
1617}; 1650};
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 0669fc1cc3bf..3150d766e0d4 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -115,6 +115,14 @@ static struct inode *jfs_alloc_inode(struct super_block *sb)
115 return &jfs_inode->vfs_inode; 115 return &jfs_inode->vfs_inode;
116} 116}
117 117
118static void jfs_i_callback(struct rcu_head *head)
119{
120 struct inode *inode = container_of(head, struct inode, i_rcu);
121 struct jfs_inode_info *ji = JFS_IP(inode);
122 INIT_LIST_HEAD(&inode->i_dentry);
123 kmem_cache_free(jfs_inode_cachep, ji);
124}
125
118static void jfs_destroy_inode(struct inode *inode) 126static void jfs_destroy_inode(struct inode *inode)
119{ 127{
120 struct jfs_inode_info *ji = JFS_IP(inode); 128 struct jfs_inode_info *ji = JFS_IP(inode);
@@ -128,7 +136,7 @@ static void jfs_destroy_inode(struct inode *inode)
128 ji->active_ag = -1; 136 ji->active_ag = -1;
129 } 137 }
130 spin_unlock_irq(&ji->ag_lock); 138 spin_unlock_irq(&ji->ag_lock);
131 kmem_cache_free(jfs_inode_cachep, ji); 139 call_rcu(&inode->i_rcu, jfs_i_callback);
132} 140}
133 141
134static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) 142static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -517,7 +525,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
517 goto out_no_root; 525 goto out_no_root;
518 526
519 if (sbi->mntflag & JFS_OS2) 527 if (sbi->mntflag & JFS_OS2)
520 sb->s_root->d_op = &jfs_ci_dentry_operations; 528 d_set_d_op(sb->s_root, &jfs_ci_dentry_operations);
521 529
522 /* logical blocks are represented by 40 bits in pxd_t, etc. */ 530 /* logical blocks are represented by 40 bits in pxd_t, etc. */
523 sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; 531 sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
diff --git a/fs/libfs.c b/fs/libfs.c
index a3accdf528ad..889311e3d06b 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -16,6 +16,11 @@
16 16
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18 18
19static inline int simple_positive(struct dentry *dentry)
20{
21 return dentry->d_inode && !d_unhashed(dentry);
22}
23
19int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, 24int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
20 struct kstat *stat) 25 struct kstat *stat)
21{ 26{
@@ -37,7 +42,7 @@ int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
37 * Retaining negative dentries for an in-memory filesystem just wastes 42 * Retaining negative dentries for an in-memory filesystem just wastes
38 * memory and lookup time: arrange for them to be deleted immediately. 43 * memory and lookup time: arrange for them to be deleted immediately.
39 */ 44 */
40static int simple_delete_dentry(struct dentry *dentry) 45static int simple_delete_dentry(const struct dentry *dentry)
41{ 46{
42 return 1; 47 return 1;
43} 48}
@@ -54,7 +59,7 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct na
54 59
55 if (dentry->d_name.len > NAME_MAX) 60 if (dentry->d_name.len > NAME_MAX)
56 return ERR_PTR(-ENAMETOOLONG); 61 return ERR_PTR(-ENAMETOOLONG);
57 dentry->d_op = &simple_dentry_operations; 62 d_set_d_op(dentry, &simple_dentry_operations);
58 d_add(dentry, NULL); 63 d_add(dentry, NULL);
59 return NULL; 64 return NULL;
60} 65}
@@ -76,7 +81,8 @@ int dcache_dir_close(struct inode *inode, struct file *file)
76 81
77loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) 82loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
78{ 83{
79 mutex_lock(&file->f_path.dentry->d_inode->i_mutex); 84 struct dentry *dentry = file->f_path.dentry;
85 mutex_lock(&dentry->d_inode->i_mutex);
80 switch (origin) { 86 switch (origin) {
81 case 1: 87 case 1:
82 offset += file->f_pos; 88 offset += file->f_pos;
@@ -84,7 +90,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
84 if (offset >= 0) 90 if (offset >= 0)
85 break; 91 break;
86 default: 92 default:
87 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); 93 mutex_unlock(&dentry->d_inode->i_mutex);
88 return -EINVAL; 94 return -EINVAL;
89 } 95 }
90 if (offset != file->f_pos) { 96 if (offset != file->f_pos) {
@@ -94,21 +100,24 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
94 struct dentry *cursor = file->private_data; 100 struct dentry *cursor = file->private_data;
95 loff_t n = file->f_pos - 2; 101 loff_t n = file->f_pos - 2;
96 102
97 spin_lock(&dcache_lock); 103 spin_lock(&dentry->d_lock);
104 /* d_lock not required for cursor */
98 list_del(&cursor->d_u.d_child); 105 list_del(&cursor->d_u.d_child);
99 p = file->f_path.dentry->d_subdirs.next; 106 p = dentry->d_subdirs.next;
100 while (n && p != &file->f_path.dentry->d_subdirs) { 107 while (n && p != &dentry->d_subdirs) {
101 struct dentry *next; 108 struct dentry *next;
102 next = list_entry(p, struct dentry, d_u.d_child); 109 next = list_entry(p, struct dentry, d_u.d_child);
103 if (!d_unhashed(next) && next->d_inode) 110 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
111 if (simple_positive(next))
104 n--; 112 n--;
113 spin_unlock(&next->d_lock);
105 p = p->next; 114 p = p->next;
106 } 115 }
107 list_add_tail(&cursor->d_u.d_child, p); 116 list_add_tail(&cursor->d_u.d_child, p);
108 spin_unlock(&dcache_lock); 117 spin_unlock(&dentry->d_lock);
109 } 118 }
110 } 119 }
111 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); 120 mutex_unlock(&dentry->d_inode->i_mutex);
112 return offset; 121 return offset;
113} 122}
114 123
@@ -148,29 +157,35 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
148 i++; 157 i++;
149 /* fallthrough */ 158 /* fallthrough */
150 default: 159 default:
151 spin_lock(&dcache_lock); 160 spin_lock(&dentry->d_lock);
152 if (filp->f_pos == 2) 161 if (filp->f_pos == 2)
153 list_move(q, &dentry->d_subdirs); 162 list_move(q, &dentry->d_subdirs);
154 163
155 for (p=q->next; p != &dentry->d_subdirs; p=p->next) { 164 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
156 struct dentry *next; 165 struct dentry *next;
157 next = list_entry(p, struct dentry, d_u.d_child); 166 next = list_entry(p, struct dentry, d_u.d_child);
158 if (d_unhashed(next) || !next->d_inode) 167 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
168 if (!simple_positive(next)) {
169 spin_unlock(&next->d_lock);
159 continue; 170 continue;
171 }
160 172
161 spin_unlock(&dcache_lock); 173 spin_unlock(&next->d_lock);
174 spin_unlock(&dentry->d_lock);
162 if (filldir(dirent, next->d_name.name, 175 if (filldir(dirent, next->d_name.name,
163 next->d_name.len, filp->f_pos, 176 next->d_name.len, filp->f_pos,
164 next->d_inode->i_ino, 177 next->d_inode->i_ino,
165 dt_type(next->d_inode)) < 0) 178 dt_type(next->d_inode)) < 0)
166 return 0; 179 return 0;
167 spin_lock(&dcache_lock); 180 spin_lock(&dentry->d_lock);
181 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
168 /* next is still alive */ 182 /* next is still alive */
169 list_move(q, p); 183 list_move(q, p);
184 spin_unlock(&next->d_lock);
170 p = q; 185 p = q;
171 filp->f_pos++; 186 filp->f_pos++;
172 } 187 }
173 spin_unlock(&dcache_lock); 188 spin_unlock(&dentry->d_lock);
174 } 189 }
175 return 0; 190 return 0;
176} 191}
@@ -259,23 +274,23 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
259 return 0; 274 return 0;
260} 275}
261 276
262static inline int simple_positive(struct dentry *dentry)
263{
264 return dentry->d_inode && !d_unhashed(dentry);
265}
266
267int simple_empty(struct dentry *dentry) 277int simple_empty(struct dentry *dentry)
268{ 278{
269 struct dentry *child; 279 struct dentry *child;
270 int ret = 0; 280 int ret = 0;
271 281
272 spin_lock(&dcache_lock); 282 spin_lock(&dentry->d_lock);
273 list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) 283 list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
274 if (simple_positive(child)) 284 spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
285 if (simple_positive(child)) {
286 spin_unlock(&child->d_lock);
275 goto out; 287 goto out;
288 }
289 spin_unlock(&child->d_lock);
290 }
276 ret = 1; 291 ret = 1;
277out: 292out:
278 spin_unlock(&dcache_lock); 293 spin_unlock(&dentry->d_lock);
279 return ret; 294 return ret;
280} 295}
281 296
diff --git a/fs/locks.c b/fs/locks.c
index 8729347bcd1a..08415b2a6d36 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1389,7 +1389,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1389 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1389 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1390 goto out; 1390 goto out;
1391 if ((arg == F_WRLCK) 1391 if ((arg == F_WRLCK)
1392 && ((atomic_read(&dentry->d_count) > 1) 1392 && ((dentry->d_count > 1)
1393 || (atomic_read(&inode->i_count) > 1))) 1393 || (atomic_read(&inode->i_count) > 1)))
1394 goto out; 1394 goto out;
1395 } 1395 }
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 409dfd65e9a1..f9ddf0c388c8 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -555,9 +555,11 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry,
555 return __logfs_create(dir, dentry, inode, target, destlen); 555 return __logfs_create(dir, dentry, inode, target, destlen);
556} 556}
557 557
558static int logfs_permission(struct inode *inode, int mask) 558static int logfs_permission(struct inode *inode, int mask, unsigned int flags)
559{ 559{
560 return generic_permission(inode, mask, NULL); 560 if (flags & IPERM_FLAG_RCU)
561 return -ECHILD;
562 return generic_permission(inode, mask, flags, NULL);
561} 563}
562 564
563static int logfs_link(struct dentry *old_dentry, struct inode *dir, 565static int logfs_link(struct dentry *old_dentry, struct inode *dir,
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index d8c71ece098f..03b8c240aeda 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -141,13 +141,20 @@ struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *is_cached)
141 return __logfs_iget(sb, ino); 141 return __logfs_iget(sb, ino);
142} 142}
143 143
144static void logfs_i_callback(struct rcu_head *head)
145{
146 struct inode *inode = container_of(head, struct inode, i_rcu);
147 INIT_LIST_HEAD(&inode->i_dentry);
148 kmem_cache_free(logfs_inode_cache, logfs_inode(inode));
149}
150
144static void __logfs_destroy_inode(struct inode *inode) 151static void __logfs_destroy_inode(struct inode *inode)
145{ 152{
146 struct logfs_inode *li = logfs_inode(inode); 153 struct logfs_inode *li = logfs_inode(inode);
147 154
148 BUG_ON(li->li_block); 155 BUG_ON(li->li_block);
149 list_del(&li->li_freeing_list); 156 list_del(&li->li_freeing_list);
150 kmem_cache_free(logfs_inode_cache, li); 157 call_rcu(&inode->i_rcu, logfs_i_callback);
151} 158}
152 159
153static void logfs_destroy_inode(struct inode *inode) 160static void logfs_destroy_inode(struct inode *inode)
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index fb2020858a34..ae0b83f476a6 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -68,11 +68,18 @@ static struct inode *minix_alloc_inode(struct super_block *sb)
68 return &ei->vfs_inode; 68 return &ei->vfs_inode;
69} 69}
70 70
71static void minix_destroy_inode(struct inode *inode) 71static void minix_i_callback(struct rcu_head *head)
72{ 72{
73 struct inode *inode = container_of(head, struct inode, i_rcu);
74 INIT_LIST_HEAD(&inode->i_dentry);
73 kmem_cache_free(minix_inode_cachep, minix_i(inode)); 75 kmem_cache_free(minix_inode_cachep, minix_i(inode));
74} 76}
75 77
78static void minix_destroy_inode(struct inode *inode)
79{
80 call_rcu(&inode->i_rcu, minix_i_callback);
81}
82
76static void init_once(void *foo) 83static void init_once(void *foo)
77{ 84{
78 struct minix_inode_info *ei = (struct minix_inode_info *) foo; 85 struct minix_inode_info *ei = (struct minix_inode_info *) foo;
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index c0d35a3accef..1b9e07728a9f 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -23,7 +23,7 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, st
23 struct inode * inode = NULL; 23 struct inode * inode = NULL;
24 ino_t ino; 24 ino_t ino;
25 25
26 dentry->d_op = dir->i_sb->s_root->d_op; 26 d_set_d_op(dentry, dir->i_sb->s_root->d_op);
27 27
28 if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen) 28 if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen)
29 return ERR_PTR(-ENAMETOOLONG); 29 return ERR_PTR(-ENAMETOOLONG);
diff --git a/fs/namei.c b/fs/namei.c
index 4ff7ca530533..19433cdba011 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -169,8 +169,8 @@ EXPORT_SYMBOL(putname);
169/* 169/*
170 * This does basic POSIX ACL permission checking 170 * This does basic POSIX ACL permission checking
171 */ 171 */
172static int acl_permission_check(struct inode *inode, int mask, 172static int acl_permission_check(struct inode *inode, int mask, unsigned int flags,
173 int (*check_acl)(struct inode *inode, int mask)) 173 int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
174{ 174{
175 umode_t mode = inode->i_mode; 175 umode_t mode = inode->i_mode;
176 176
@@ -180,7 +180,7 @@ static int acl_permission_check(struct inode *inode, int mask,
180 mode >>= 6; 180 mode >>= 6;
181 else { 181 else {
182 if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { 182 if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) {
183 int error = check_acl(inode, mask); 183 int error = check_acl(inode, mask, flags);
184 if (error != -EAGAIN) 184 if (error != -EAGAIN)
185 return error; 185 return error;
186 } 186 }
@@ -198,25 +198,30 @@ static int acl_permission_check(struct inode *inode, int mask,
198} 198}
199 199
200/** 200/**
201 * generic_permission - check for access rights on a Posix-like filesystem 201 * generic_permission - check for access rights on a Posix-like filesystem
202 * @inode: inode to check access rights for 202 * @inode: inode to check access rights for
203 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 203 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
204 * @check_acl: optional callback to check for Posix ACLs 204 * @check_acl: optional callback to check for Posix ACLs
205 * @flags IPERM_FLAG_ flags.
205 * 206 *
206 * Used to check for read/write/execute permissions on a file. 207 * Used to check for read/write/execute permissions on a file.
207 * We use "fsuid" for this, letting us set arbitrary permissions 208 * We use "fsuid" for this, letting us set arbitrary permissions
208 * for filesystem access without changing the "normal" uids which 209 * for filesystem access without changing the "normal" uids which
209 * are used for other things.. 210 * are used for other things.
211 *
212 * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
213 * request cannot be satisfied (eg. requires blocking or too much complexity).
214 * It would then be called again in ref-walk mode.
210 */ 215 */
211int generic_permission(struct inode *inode, int mask, 216int generic_permission(struct inode *inode, int mask, unsigned int flags,
212 int (*check_acl)(struct inode *inode, int mask)) 217 int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
213{ 218{
214 int ret; 219 int ret;
215 220
216 /* 221 /*
217 * Do the basic POSIX ACL permission checks. 222 * Do the basic POSIX ACL permission checks.
218 */ 223 */
219 ret = acl_permission_check(inode, mask, check_acl); 224 ret = acl_permission_check(inode, mask, flags, check_acl);
220 if (ret != -EACCES) 225 if (ret != -EACCES)
221 return ret; 226 return ret;
222 227
@@ -271,9 +276,10 @@ int inode_permission(struct inode *inode, int mask)
271 } 276 }
272 277
273 if (inode->i_op->permission) 278 if (inode->i_op->permission)
274 retval = inode->i_op->permission(inode, mask); 279 retval = inode->i_op->permission(inode, mask, 0);
275 else 280 else
276 retval = generic_permission(inode, mask, inode->i_op->check_acl); 281 retval = generic_permission(inode, mask, 0,
282 inode->i_op->check_acl);
277 283
278 if (retval) 284 if (retval)
279 return retval; 285 return retval;
@@ -362,6 +368,18 @@ void path_get(struct path *path)
362EXPORT_SYMBOL(path_get); 368EXPORT_SYMBOL(path_get);
363 369
364/** 370/**
371 * path_get_long - get a long reference to a path
372 * @path: path to get the reference to
373 *
374 * Given a path increment the reference count to the dentry and the vfsmount.
375 */
376void path_get_long(struct path *path)
377{
378 mntget_long(path->mnt);
379 dget(path->dentry);
380}
381
382/**
365 * path_put - put a reference to a path 383 * path_put - put a reference to a path
366 * @path: path to put the reference to 384 * @path: path to put the reference to
367 * 385 *
@@ -375,6 +393,185 @@ void path_put(struct path *path)
375EXPORT_SYMBOL(path_put); 393EXPORT_SYMBOL(path_put);
376 394
377/** 395/**
396 * path_put_long - put a long reference to a path
397 * @path: path to put the reference to
398 *
399 * Given a path decrement the reference count to the dentry and the vfsmount.
400 */
401void path_put_long(struct path *path)
402{
403 dput(path->dentry);
404 mntput_long(path->mnt);
405}
406
407/**
408 * nameidata_drop_rcu - drop this nameidata out of rcu-walk
409 * @nd: nameidata pathwalk data to drop
410 * @Returns: 0 on success, -ECHLID on failure
411 *
412 * Path walking has 2 modes, rcu-walk and ref-walk (see
413 * Documentation/filesystems/path-lookup.txt). __drop_rcu* functions attempt
414 * to drop out of rcu-walk mode and take normal reference counts on dentries
415 * and vfsmounts to transition to rcu-walk mode. __drop_rcu* functions take
416 * refcounts at the last known good point before rcu-walk got stuck, so
417 * ref-walk may continue from there. If this is not successful (eg. a seqcount
418 * has changed), then failure is returned and path walk restarts from the
419 * beginning in ref-walk mode.
420 *
421 * nameidata_drop_rcu attempts to drop the current nd->path and nd->root into
422 * ref-walk. Must be called from rcu-walk context.
423 */
424static int nameidata_drop_rcu(struct nameidata *nd)
425{
426 struct fs_struct *fs = current->fs;
427 struct dentry *dentry = nd->path.dentry;
428
429 BUG_ON(!(nd->flags & LOOKUP_RCU));
430 if (nd->root.mnt) {
431 spin_lock(&fs->lock);
432 if (nd->root.mnt != fs->root.mnt ||
433 nd->root.dentry != fs->root.dentry)
434 goto err_root;
435 }
436 spin_lock(&dentry->d_lock);
437 if (!__d_rcu_to_refcount(dentry, nd->seq))
438 goto err;
439 BUG_ON(nd->inode != dentry->d_inode);
440 spin_unlock(&dentry->d_lock);
441 if (nd->root.mnt) {
442 path_get(&nd->root);
443 spin_unlock(&fs->lock);
444 }
445 mntget(nd->path.mnt);
446
447 rcu_read_unlock();
448 br_read_unlock(vfsmount_lock);
449 nd->flags &= ~LOOKUP_RCU;
450 return 0;
451err:
452 spin_unlock(&dentry->d_lock);
453err_root:
454 if (nd->root.mnt)
455 spin_unlock(&fs->lock);
456 return -ECHILD;
457}
458
459/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */
460static inline int nameidata_drop_rcu_maybe(struct nameidata *nd)
461{
462 if (nd->flags & LOOKUP_RCU)
463 return nameidata_drop_rcu(nd);
464 return 0;
465}
466
467/**
468 * nameidata_dentry_drop_rcu - drop nameidata and dentry out of rcu-walk
469 * @nd: nameidata pathwalk data to drop
470 * @dentry: dentry to drop
471 * @Returns: 0 on success, -ECHLID on failure
472 *
473 * nameidata_dentry_drop_rcu attempts to drop the current nd->path and nd->root,
474 * and dentry into ref-walk. @dentry must be a path found by a do_lookup call on
475 * @nd. Must be called from rcu-walk context.
476 */
477static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry)
478{
479 struct fs_struct *fs = current->fs;
480 struct dentry *parent = nd->path.dentry;
481
482 BUG_ON(!(nd->flags & LOOKUP_RCU));
483 if (nd->root.mnt) {
484 spin_lock(&fs->lock);
485 if (nd->root.mnt != fs->root.mnt ||
486 nd->root.dentry != fs->root.dentry)
487 goto err_root;
488 }
489 spin_lock(&parent->d_lock);
490 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
491 if (!__d_rcu_to_refcount(dentry, nd->seq))
492 goto err;
493 /*
494 * If the sequence check on the child dentry passed, then the child has
495 * not been removed from its parent. This means the parent dentry must
496 * be valid and able to take a reference at this point.
497 */
498 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
499 BUG_ON(!parent->d_count);
500 parent->d_count++;
501 spin_unlock(&dentry->d_lock);
502 spin_unlock(&parent->d_lock);
503 if (nd->root.mnt) {
504 path_get(&nd->root);
505 spin_unlock(&fs->lock);
506 }
507 mntget(nd->path.mnt);
508
509 rcu_read_unlock();
510 br_read_unlock(vfsmount_lock);
511 nd->flags &= ~LOOKUP_RCU;
512 return 0;
513err:
514 spin_unlock(&dentry->d_lock);
515 spin_unlock(&parent->d_lock);
516err_root:
517 if (nd->root.mnt)
518 spin_unlock(&fs->lock);
519 return -ECHILD;
520}
521
522/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */
523static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct dentry *dentry)
524{
525 if (nd->flags & LOOKUP_RCU)
526 return nameidata_dentry_drop_rcu(nd, dentry);
527 return 0;
528}
529
530/**
531 * nameidata_drop_rcu_last - drop nameidata ending path walk out of rcu-walk
532 * @nd: nameidata pathwalk data to drop
533 * @Returns: 0 on success, -ECHLID on failure
534 *
535 * nameidata_drop_rcu_last attempts to drop the current nd->path into ref-walk.
536 * nd->path should be the final element of the lookup, so nd->root is discarded.
537 * Must be called from rcu-walk context.
538 */
539static int nameidata_drop_rcu_last(struct nameidata *nd)
540{
541 struct dentry *dentry = nd->path.dentry;
542
543 BUG_ON(!(nd->flags & LOOKUP_RCU));
544 nd->flags &= ~LOOKUP_RCU;
545 nd->root.mnt = NULL;
546 spin_lock(&dentry->d_lock);
547 if (!__d_rcu_to_refcount(dentry, nd->seq))
548 goto err_unlock;
549 BUG_ON(nd->inode != dentry->d_inode);
550 spin_unlock(&dentry->d_lock);
551
552 mntget(nd->path.mnt);
553
554 rcu_read_unlock();
555 br_read_unlock(vfsmount_lock);
556
557 return 0;
558
559err_unlock:
560 spin_unlock(&dentry->d_lock);
561 rcu_read_unlock();
562 br_read_unlock(vfsmount_lock);
563 return -ECHILD;
564}
565
566/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */
567static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd)
568{
569 if (likely(nd->flags & LOOKUP_RCU))
570 return nameidata_drop_rcu_last(nd);
571 return 0;
572}
573
574/**
378 * release_open_intent - free up open intent resources 575 * release_open_intent - free up open intent resources
379 * @nd: pointer to nameidata 576 * @nd: pointer to nameidata
380 */ 577 */
@@ -386,10 +583,26 @@ void release_open_intent(struct nameidata *nd)
386 fput(nd->intent.open.file); 583 fput(nd->intent.open.file);
387} 584}
388 585
586static int d_revalidate(struct dentry *dentry, struct nameidata *nd)
587{
588 int status;
589
590 status = dentry->d_op->d_revalidate(dentry, nd);
591 if (status == -ECHILD) {
592 if (nameidata_dentry_drop_rcu(nd, dentry))
593 return status;
594 status = dentry->d_op->d_revalidate(dentry, nd);
595 }
596
597 return status;
598}
599
389static inline struct dentry * 600static inline struct dentry *
390do_revalidate(struct dentry *dentry, struct nameidata *nd) 601do_revalidate(struct dentry *dentry, struct nameidata *nd)
391{ 602{
392 int status = dentry->d_op->d_revalidate(dentry, nd); 603 int status;
604
605 status = d_revalidate(dentry, nd);
393 if (unlikely(status <= 0)) { 606 if (unlikely(status <= 0)) {
394 /* 607 /*
395 * The dentry failed validation. 608 * The dentry failed validation.
@@ -397,19 +610,36 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd)
397 * the dentry otherwise d_revalidate is asking us 610 * the dentry otherwise d_revalidate is asking us
398 * to return a fail status. 611 * to return a fail status.
399 */ 612 */
400 if (!status) { 613 if (status < 0) {
614 /* If we're in rcu-walk, we don't have a ref */
615 if (!(nd->flags & LOOKUP_RCU))
616 dput(dentry);
617 dentry = ERR_PTR(status);
618
619 } else {
620 /* Don't d_invalidate in rcu-walk mode */
621 if (nameidata_dentry_drop_rcu_maybe(nd, dentry))
622 return ERR_PTR(-ECHILD);
401 if (!d_invalidate(dentry)) { 623 if (!d_invalidate(dentry)) {
402 dput(dentry); 624 dput(dentry);
403 dentry = NULL; 625 dentry = NULL;
404 } 626 }
405 } else {
406 dput(dentry);
407 dentry = ERR_PTR(status);
408 } 627 }
409 } 628 }
410 return dentry; 629 return dentry;
411} 630}
412 631
632static inline int need_reval_dot(struct dentry *dentry)
633{
634 if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
635 return 0;
636
637 if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
638 return 0;
639
640 return 1;
641}
642
413/* 643/*
414 * force_reval_path - force revalidation of a dentry 644 * force_reval_path - force revalidation of a dentry
415 * 645 *
@@ -433,13 +663,12 @@ force_reval_path(struct path *path, struct nameidata *nd)
433 663
434 /* 664 /*
435 * only check on filesystems where it's possible for the dentry to 665 * only check on filesystems where it's possible for the dentry to
436 * become stale. It's assumed that if this flag is set then the 666 * become stale.
437 * d_revalidate op will also be defined.
438 */ 667 */
439 if (!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) 668 if (!need_reval_dot(dentry))
440 return 0; 669 return 0;
441 670
442 status = dentry->d_op->d_revalidate(dentry, nd); 671 status = d_revalidate(dentry, nd);
443 if (status > 0) 672 if (status > 0)
444 return 0; 673 return 0;
445 674
@@ -459,26 +688,27 @@ force_reval_path(struct path *path, struct nameidata *nd)
459 * short-cut DAC fails, then call ->permission() to do more 688 * short-cut DAC fails, then call ->permission() to do more
460 * complete permission check. 689 * complete permission check.
461 */ 690 */
462static int exec_permission(struct inode *inode) 691static inline int exec_permission(struct inode *inode, unsigned int flags)
463{ 692{
464 int ret; 693 int ret;
465 694
466 if (inode->i_op->permission) { 695 if (inode->i_op->permission) {
467 ret = inode->i_op->permission(inode, MAY_EXEC); 696 ret = inode->i_op->permission(inode, MAY_EXEC, flags);
468 if (!ret) 697 } else {
469 goto ok; 698 ret = acl_permission_check(inode, MAY_EXEC, flags,
470 return ret; 699 inode->i_op->check_acl);
471 } 700 }
472 ret = acl_permission_check(inode, MAY_EXEC, inode->i_op->check_acl); 701 if (likely(!ret))
473 if (!ret)
474 goto ok; 702 goto ok;
703 if (ret == -ECHILD)
704 return ret;
475 705
476 if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH)) 706 if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
477 goto ok; 707 goto ok;
478 708
479 return ret; 709 return ret;
480ok: 710ok:
481 return security_inode_permission(inode, MAY_EXEC); 711 return security_inode_exec_permission(inode, flags);
482} 712}
483 713
484static __always_inline void set_root(struct nameidata *nd) 714static __always_inline void set_root(struct nameidata *nd)
@@ -489,8 +719,23 @@ static __always_inline void set_root(struct nameidata *nd)
489 719
490static int link_path_walk(const char *, struct nameidata *); 720static int link_path_walk(const char *, struct nameidata *);
491 721
722static __always_inline void set_root_rcu(struct nameidata *nd)
723{
724 if (!nd->root.mnt) {
725 struct fs_struct *fs = current->fs;
726 unsigned seq;
727
728 do {
729 seq = read_seqcount_begin(&fs->seq);
730 nd->root = fs->root;
731 } while (read_seqcount_retry(&fs->seq, seq));
732 }
733}
734
492static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) 735static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
493{ 736{
737 int ret;
738
494 if (IS_ERR(link)) 739 if (IS_ERR(link))
495 goto fail; 740 goto fail;
496 741
@@ -500,8 +745,10 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l
500 nd->path = nd->root; 745 nd->path = nd->root;
501 path_get(&nd->root); 746 path_get(&nd->root);
502 } 747 }
748 nd->inode = nd->path.dentry->d_inode;
503 749
504 return link_path_walk(link, nd); 750 ret = link_path_walk(link, nd);
751 return ret;
505fail: 752fail:
506 path_put(&nd->path); 753 path_put(&nd->path);
507 return PTR_ERR(link); 754 return PTR_ERR(link);
@@ -516,11 +763,12 @@ static void path_put_conditional(struct path *path, struct nameidata *nd)
516 763
517static inline void path_to_nameidata(struct path *path, struct nameidata *nd) 764static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
518{ 765{
519 dput(nd->path.dentry); 766 if (!(nd->flags & LOOKUP_RCU)) {
520 if (nd->path.mnt != path->mnt) { 767 dput(nd->path.dentry);
521 mntput(nd->path.mnt); 768 if (nd->path.mnt != path->mnt)
522 nd->path.mnt = path->mnt; 769 mntput(nd->path.mnt);
523 } 770 }
771 nd->path.mnt = path->mnt;
524 nd->path.dentry = path->dentry; 772 nd->path.dentry = path->dentry;
525} 773}
526 774
@@ -535,9 +783,11 @@ __do_follow_link(struct path *path, struct nameidata *nd, void **p)
535 783
536 if (path->mnt != nd->path.mnt) { 784 if (path->mnt != nd->path.mnt) {
537 path_to_nameidata(path, nd); 785 path_to_nameidata(path, nd);
786 nd->inode = nd->path.dentry->d_inode;
538 dget(dentry); 787 dget(dentry);
539 } 788 }
540 mntget(path->mnt); 789 mntget(path->mnt);
790
541 nd->last_type = LAST_BIND; 791 nd->last_type = LAST_BIND;
542 *p = dentry->d_inode->i_op->follow_link(dentry, nd); 792 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
543 error = PTR_ERR(*p); 793 error = PTR_ERR(*p);
@@ -591,6 +841,20 @@ loop:
591 return err; 841 return err;
592} 842}
593 843
844static int follow_up_rcu(struct path *path)
845{
846 struct vfsmount *parent;
847 struct dentry *mountpoint;
848
849 parent = path->mnt->mnt_parent;
850 if (parent == path->mnt)
851 return 0;
852 mountpoint = path->mnt->mnt_mountpoint;
853 path->dentry = mountpoint;
854 path->mnt = parent;
855 return 1;
856}
857
594int follow_up(struct path *path) 858int follow_up(struct path *path)
595{ 859{
596 struct vfsmount *parent; 860 struct vfsmount *parent;
@@ -612,9 +876,24 @@ int follow_up(struct path *path)
612 return 1; 876 return 1;
613} 877}
614 878
615/* no need for dcache_lock, as serialization is taken care in 879/*
616 * namespace.c 880 * serialization is taken care of in namespace.c
617 */ 881 */
882static void __follow_mount_rcu(struct nameidata *nd, struct path *path,
883 struct inode **inode)
884{
885 while (d_mountpoint(path->dentry)) {
886 struct vfsmount *mounted;
887 mounted = __lookup_mnt(path->mnt, path->dentry, 1);
888 if (!mounted)
889 return;
890 path->mnt = mounted;
891 path->dentry = mounted->mnt_root;
892 nd->seq = read_seqcount_begin(&path->dentry->d_seq);
893 *inode = path->dentry->d_inode;
894 }
895}
896
618static int __follow_mount(struct path *path) 897static int __follow_mount(struct path *path)
619{ 898{
620 int res = 0; 899 int res = 0;
@@ -645,9 +924,6 @@ static void follow_mount(struct path *path)
645 } 924 }
646} 925}
647 926
648/* no need for dcache_lock, as serialization is taken care in
649 * namespace.c
650 */
651int follow_down(struct path *path) 927int follow_down(struct path *path)
652{ 928{
653 struct vfsmount *mounted; 929 struct vfsmount *mounted;
@@ -663,7 +939,42 @@ int follow_down(struct path *path)
663 return 0; 939 return 0;
664} 940}
665 941
666static __always_inline void follow_dotdot(struct nameidata *nd) 942static int follow_dotdot_rcu(struct nameidata *nd)
943{
944 struct inode *inode = nd->inode;
945
946 set_root_rcu(nd);
947
948 while(1) {
949 if (nd->path.dentry == nd->root.dentry &&
950 nd->path.mnt == nd->root.mnt) {
951 break;
952 }
953 if (nd->path.dentry != nd->path.mnt->mnt_root) {
954 struct dentry *old = nd->path.dentry;
955 struct dentry *parent = old->d_parent;
956 unsigned seq;
957
958 seq = read_seqcount_begin(&parent->d_seq);
959 if (read_seqcount_retry(&old->d_seq, nd->seq))
960 return -ECHILD;
961 inode = parent->d_inode;
962 nd->path.dentry = parent;
963 nd->seq = seq;
964 break;
965 }
966 if (!follow_up_rcu(&nd->path))
967 break;
968 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
969 inode = nd->path.dentry->d_inode;
970 }
971 __follow_mount_rcu(nd, &nd->path, &inode);
972 nd->inode = inode;
973
974 return 0;
975}
976
977static void follow_dotdot(struct nameidata *nd)
667{ 978{
668 set_root(nd); 979 set_root(nd);
669 980
@@ -684,6 +995,7 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
684 break; 995 break;
685 } 996 }
686 follow_mount(&nd->path); 997 follow_mount(&nd->path);
998 nd->inode = nd->path.dentry->d_inode;
687} 999}
688 1000
689/* 1001/*
@@ -721,17 +1033,17 @@ static struct dentry *d_alloc_and_lookup(struct dentry *parent,
721 * It _is_ time-critical. 1033 * It _is_ time-critical.
722 */ 1034 */
723static int do_lookup(struct nameidata *nd, struct qstr *name, 1035static int do_lookup(struct nameidata *nd, struct qstr *name,
724 struct path *path) 1036 struct path *path, struct inode **inode)
725{ 1037{
726 struct vfsmount *mnt = nd->path.mnt; 1038 struct vfsmount *mnt = nd->path.mnt;
727 struct dentry *dentry, *parent; 1039 struct dentry *dentry, *parent = nd->path.dentry;
728 struct inode *dir; 1040 struct inode *dir;
729 /* 1041 /*
730 * See if the low-level filesystem might want 1042 * See if the low-level filesystem might want
731 * to use its own hash.. 1043 * to use its own hash..
732 */ 1044 */
733 if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) { 1045 if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
734 int err = nd->path.dentry->d_op->d_hash(nd->path.dentry, name); 1046 int err = parent->d_op->d_hash(parent, nd->inode, name);
735 if (err < 0) 1047 if (err < 0)
736 return err; 1048 return err;
737 } 1049 }
@@ -741,21 +1053,44 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
741 * of a false negative due to a concurrent rename, we're going to 1053 * of a false negative due to a concurrent rename, we're going to
742 * do the non-racy lookup, below. 1054 * do the non-racy lookup, below.
743 */ 1055 */
744 dentry = __d_lookup(nd->path.dentry, name); 1056 if (nd->flags & LOOKUP_RCU) {
745 if (!dentry) 1057 unsigned seq;
746 goto need_lookup; 1058
1059 *inode = nd->inode;
1060 dentry = __d_lookup_rcu(parent, name, &seq, inode);
1061 if (!dentry) {
1062 if (nameidata_drop_rcu(nd))
1063 return -ECHILD;
1064 goto need_lookup;
1065 }
1066 /* Memory barrier in read_seqcount_begin of child is enough */
1067 if (__read_seqcount_retry(&parent->d_seq, nd->seq))
1068 return -ECHILD;
1069
1070 nd->seq = seq;
1071 if (dentry->d_flags & DCACHE_OP_REVALIDATE)
1072 goto need_revalidate;
1073 path->mnt = mnt;
1074 path->dentry = dentry;
1075 __follow_mount_rcu(nd, path, inode);
1076 } else {
1077 dentry = __d_lookup(parent, name);
1078 if (!dentry)
1079 goto need_lookup;
747found: 1080found:
748 if (dentry->d_op && dentry->d_op->d_revalidate) 1081 if (dentry->d_flags & DCACHE_OP_REVALIDATE)
749 goto need_revalidate; 1082 goto need_revalidate;
750done: 1083done:
751 path->mnt = mnt; 1084 path->mnt = mnt;
752 path->dentry = dentry; 1085 path->dentry = dentry;
753 __follow_mount(path); 1086 __follow_mount(path);
1087 *inode = path->dentry->d_inode;
1088 }
754 return 0; 1089 return 0;
755 1090
756need_lookup: 1091need_lookup:
757 parent = nd->path.dentry;
758 dir = parent->d_inode; 1092 dir = parent->d_inode;
1093 BUG_ON(nd->inode != dir);
759 1094
760 mutex_lock(&dir->i_mutex); 1095 mutex_lock(&dir->i_mutex);
761 /* 1096 /*
@@ -817,7 +1152,6 @@ static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
817static int link_path_walk(const char *name, struct nameidata *nd) 1152static int link_path_walk(const char *name, struct nameidata *nd)
818{ 1153{
819 struct path next; 1154 struct path next;
820 struct inode *inode;
821 int err; 1155 int err;
822 unsigned int lookup_flags = nd->flags; 1156 unsigned int lookup_flags = nd->flags;
823 1157
@@ -826,18 +1160,28 @@ static int link_path_walk(const char *name, struct nameidata *nd)
826 if (!*name) 1160 if (!*name)
827 goto return_reval; 1161 goto return_reval;
828 1162
829 inode = nd->path.dentry->d_inode;
830 if (nd->depth) 1163 if (nd->depth)
831 lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE); 1164 lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
832 1165
833 /* At this point we know we have a real path component. */ 1166 /* At this point we know we have a real path component. */
834 for(;;) { 1167 for(;;) {
1168 struct inode *inode;
835 unsigned long hash; 1169 unsigned long hash;
836 struct qstr this; 1170 struct qstr this;
837 unsigned int c; 1171 unsigned int c;
838 1172
839 nd->flags |= LOOKUP_CONTINUE; 1173 nd->flags |= LOOKUP_CONTINUE;
840 err = exec_permission(inode); 1174 if (nd->flags & LOOKUP_RCU) {
1175 err = exec_permission(nd->inode, IPERM_FLAG_RCU);
1176 if (err == -ECHILD) {
1177 if (nameidata_drop_rcu(nd))
1178 return -ECHILD;
1179 goto exec_again;
1180 }
1181 } else {
1182exec_again:
1183 err = exec_permission(nd->inode, 0);
1184 }
841 if (err) 1185 if (err)
842 break; 1186 break;
843 1187
@@ -868,37 +1212,44 @@ static int link_path_walk(const char *name, struct nameidata *nd)
868 if (this.name[0] == '.') switch (this.len) { 1212 if (this.name[0] == '.') switch (this.len) {
869 default: 1213 default:
870 break; 1214 break;
871 case 2: 1215 case 2:
872 if (this.name[1] != '.') 1216 if (this.name[1] != '.')
873 break; 1217 break;
874 follow_dotdot(nd); 1218 if (nd->flags & LOOKUP_RCU) {
875 inode = nd->path.dentry->d_inode; 1219 if (follow_dotdot_rcu(nd))
1220 return -ECHILD;
1221 } else
1222 follow_dotdot(nd);
876 /* fallthrough */ 1223 /* fallthrough */
877 case 1: 1224 case 1:
878 continue; 1225 continue;
879 } 1226 }
880 /* This does the actual lookups.. */ 1227 /* This does the actual lookups.. */
881 err = do_lookup(nd, &this, &next); 1228 err = do_lookup(nd, &this, &next, &inode);
882 if (err) 1229 if (err)
883 break; 1230 break;
884
885 err = -ENOENT; 1231 err = -ENOENT;
886 inode = next.dentry->d_inode;
887 if (!inode) 1232 if (!inode)
888 goto out_dput; 1233 goto out_dput;
889 1234
890 if (inode->i_op->follow_link) { 1235 if (inode->i_op->follow_link) {
1236 /* We commonly drop rcu-walk here */
1237 if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
1238 return -ECHILD;
1239 BUG_ON(inode != next.dentry->d_inode);
891 err = do_follow_link(&next, nd); 1240 err = do_follow_link(&next, nd);
892 if (err) 1241 if (err)
893 goto return_err; 1242 goto return_err;
1243 nd->inode = nd->path.dentry->d_inode;
894 err = -ENOENT; 1244 err = -ENOENT;
895 inode = nd->path.dentry->d_inode; 1245 if (!nd->inode)
896 if (!inode)
897 break; 1246 break;
898 } else 1247 } else {
899 path_to_nameidata(&next, nd); 1248 path_to_nameidata(&next, nd);
1249 nd->inode = inode;
1250 }
900 err = -ENOTDIR; 1251 err = -ENOTDIR;
901 if (!inode->i_op->lookup) 1252 if (!nd->inode->i_op->lookup)
902 break; 1253 break;
903 continue; 1254 continue;
904 /* here ends the main loop */ 1255 /* here ends the main loop */
@@ -913,32 +1264,39 @@ last_component:
913 if (this.name[0] == '.') switch (this.len) { 1264 if (this.name[0] == '.') switch (this.len) {
914 default: 1265 default:
915 break; 1266 break;
916 case 2: 1267 case 2:
917 if (this.name[1] != '.') 1268 if (this.name[1] != '.')
918 break; 1269 break;
919 follow_dotdot(nd); 1270 if (nd->flags & LOOKUP_RCU) {
920 inode = nd->path.dentry->d_inode; 1271 if (follow_dotdot_rcu(nd))
1272 return -ECHILD;
1273 } else
1274 follow_dotdot(nd);
921 /* fallthrough */ 1275 /* fallthrough */
922 case 1: 1276 case 1:
923 goto return_reval; 1277 goto return_reval;
924 } 1278 }
925 err = do_lookup(nd, &this, &next); 1279 err = do_lookup(nd, &this, &next, &inode);
926 if (err) 1280 if (err)
927 break; 1281 break;
928 inode = next.dentry->d_inode;
929 if (follow_on_final(inode, lookup_flags)) { 1282 if (follow_on_final(inode, lookup_flags)) {
1283 if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
1284 return -ECHILD;
1285 BUG_ON(inode != next.dentry->d_inode);
930 err = do_follow_link(&next, nd); 1286 err = do_follow_link(&next, nd);
931 if (err) 1287 if (err)
932 goto return_err; 1288 goto return_err;
933 inode = nd->path.dentry->d_inode; 1289 nd->inode = nd->path.dentry->d_inode;
934 } else 1290 } else {
935 path_to_nameidata(&next, nd); 1291 path_to_nameidata(&next, nd);
1292 nd->inode = inode;
1293 }
936 err = -ENOENT; 1294 err = -ENOENT;
937 if (!inode) 1295 if (!nd->inode)
938 break; 1296 break;
939 if (lookup_flags & LOOKUP_DIRECTORY) { 1297 if (lookup_flags & LOOKUP_DIRECTORY) {
940 err = -ENOTDIR; 1298 err = -ENOTDIR;
941 if (!inode->i_op->lookup) 1299 if (!nd->inode->i_op->lookup)
942 break; 1300 break;
943 } 1301 }
944 goto return_base; 1302 goto return_base;
@@ -958,25 +1316,43 @@ return_reval:
958 * We bypassed the ordinary revalidation routines. 1316 * We bypassed the ordinary revalidation routines.
959 * We may need to check the cached dentry for staleness. 1317 * We may need to check the cached dentry for staleness.
960 */ 1318 */
961 if (nd->path.dentry && nd->path.dentry->d_sb && 1319 if (need_reval_dot(nd->path.dentry)) {
962 (nd->path.dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) {
963 err = -ESTALE;
964 /* Note: we do not d_invalidate() */ 1320 /* Note: we do not d_invalidate() */
965 if (!nd->path.dentry->d_op->d_revalidate( 1321 err = d_revalidate(nd->path.dentry, nd);
966 nd->path.dentry, nd)) 1322 if (!err)
1323 err = -ESTALE;
1324 if (err < 0)
967 break; 1325 break;
968 } 1326 }
969return_base: 1327return_base:
1328 if (nameidata_drop_rcu_last_maybe(nd))
1329 return -ECHILD;
970 return 0; 1330 return 0;
971out_dput: 1331out_dput:
972 path_put_conditional(&next, nd); 1332 if (!(nd->flags & LOOKUP_RCU))
1333 path_put_conditional(&next, nd);
973 break; 1334 break;
974 } 1335 }
975 path_put(&nd->path); 1336 if (!(nd->flags & LOOKUP_RCU))
1337 path_put(&nd->path);
976return_err: 1338return_err:
977 return err; 1339 return err;
978} 1340}
979 1341
1342static inline int path_walk_rcu(const char *name, struct nameidata *nd)
1343{
1344 current->total_link_count = 0;
1345
1346 return link_path_walk(name, nd);
1347}
1348
1349static inline int path_walk_simple(const char *name, struct nameidata *nd)
1350{
1351 current->total_link_count = 0;
1352
1353 return link_path_walk(name, nd);
1354}
1355
980static int path_walk(const char *name, struct nameidata *nd) 1356static int path_walk(const char *name, struct nameidata *nd)
981{ 1357{
982 struct path save = nd->path; 1358 struct path save = nd->path;
@@ -1002,6 +1378,93 @@ static int path_walk(const char *name, struct nameidata *nd)
1002 return result; 1378 return result;
1003} 1379}
1004 1380
1381static void path_finish_rcu(struct nameidata *nd)
1382{
1383 if (nd->flags & LOOKUP_RCU) {
1384 /* RCU dangling. Cancel it. */
1385 nd->flags &= ~LOOKUP_RCU;
1386 nd->root.mnt = NULL;
1387 rcu_read_unlock();
1388 br_read_unlock(vfsmount_lock);
1389 }
1390 if (nd->file)
1391 fput(nd->file);
1392}
1393
1394static int path_init_rcu(int dfd, const char *name, unsigned int flags, struct nameidata *nd)
1395{
1396 int retval = 0;
1397 int fput_needed;
1398 struct file *file;
1399
1400 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1401 nd->flags = flags | LOOKUP_RCU;
1402 nd->depth = 0;
1403 nd->root.mnt = NULL;
1404 nd->file = NULL;
1405
1406 if (*name=='/') {
1407 struct fs_struct *fs = current->fs;
1408 unsigned seq;
1409
1410 br_read_lock(vfsmount_lock);
1411 rcu_read_lock();
1412
1413 do {
1414 seq = read_seqcount_begin(&fs->seq);
1415 nd->root = fs->root;
1416 nd->path = nd->root;
1417 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1418 } while (read_seqcount_retry(&fs->seq, seq));
1419
1420 } else if (dfd == AT_FDCWD) {
1421 struct fs_struct *fs = current->fs;
1422 unsigned seq;
1423
1424 br_read_lock(vfsmount_lock);
1425 rcu_read_lock();
1426
1427 do {
1428 seq = read_seqcount_begin(&fs->seq);
1429 nd->path = fs->pwd;
1430 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1431 } while (read_seqcount_retry(&fs->seq, seq));
1432
1433 } else {
1434 struct dentry *dentry;
1435
1436 file = fget_light(dfd, &fput_needed);
1437 retval = -EBADF;
1438 if (!file)
1439 goto out_fail;
1440
1441 dentry = file->f_path.dentry;
1442
1443 retval = -ENOTDIR;
1444 if (!S_ISDIR(dentry->d_inode->i_mode))
1445 goto fput_fail;
1446
1447 retval = file_permission(file, MAY_EXEC);
1448 if (retval)
1449 goto fput_fail;
1450
1451 nd->path = file->f_path;
1452 if (fput_needed)
1453 nd->file = file;
1454
1455 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1456 br_read_lock(vfsmount_lock);
1457 rcu_read_lock();
1458 }
1459 nd->inode = nd->path.dentry->d_inode;
1460 return 0;
1461
1462fput_fail:
1463 fput_light(file, fput_needed);
1464out_fail:
1465 return retval;
1466}
1467
1005static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd) 1468static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd)
1006{ 1469{
1007 int retval = 0; 1470 int retval = 0;
@@ -1042,6 +1505,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, struct namei
1042 1505
1043 fput_light(file, fput_needed); 1506 fput_light(file, fput_needed);
1044 } 1507 }
1508 nd->inode = nd->path.dentry->d_inode;
1045 return 0; 1509 return 0;
1046 1510
1047fput_fail: 1511fput_fail:
@@ -1054,16 +1518,53 @@ out_fail:
1054static int do_path_lookup(int dfd, const char *name, 1518static int do_path_lookup(int dfd, const char *name,
1055 unsigned int flags, struct nameidata *nd) 1519 unsigned int flags, struct nameidata *nd)
1056{ 1520{
1057 int retval = path_init(dfd, name, flags, nd); 1521 int retval;
1058 if (!retval) 1522
1059 retval = path_walk(name, nd); 1523 /*
1060 if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry && 1524 * Path walking is largely split up into 2 different synchronisation
1061 nd->path.dentry->d_inode)) 1525 * schemes, rcu-walk and ref-walk (explained in
1062 audit_inode(name, nd->path.dentry); 1526 * Documentation/filesystems/path-lookup.txt). These share much of the
1527 * path walk code, but some things particularly setup, cleanup, and
1528 * following mounts are sufficiently divergent that functions are
1529 * duplicated. Typically there is a function foo(), and its RCU
1530 * analogue, foo_rcu().
1531 *
1532 * -ECHILD is the error number of choice (just to avoid clashes) that
1533 * is returned if some aspect of an rcu-walk fails. Such an error must
1534 * be handled by restarting a traditional ref-walk (which will always
1535 * be able to complete).
1536 */
1537 retval = path_init_rcu(dfd, name, flags, nd);
1538 if (unlikely(retval))
1539 return retval;
1540 retval = path_walk_rcu(name, nd);
1541 path_finish_rcu(nd);
1063 if (nd->root.mnt) { 1542 if (nd->root.mnt) {
1064 path_put(&nd->root); 1543 path_put(&nd->root);
1065 nd->root.mnt = NULL; 1544 nd->root.mnt = NULL;
1066 } 1545 }
1546
1547 if (unlikely(retval == -ECHILD || retval == -ESTALE)) {
1548 /* slower, locked walk */
1549 if (retval == -ESTALE)
1550 flags |= LOOKUP_REVAL;
1551 retval = path_init(dfd, name, flags, nd);
1552 if (unlikely(retval))
1553 return retval;
1554 retval = path_walk(name, nd);
1555 if (nd->root.mnt) {
1556 path_put(&nd->root);
1557 nd->root.mnt = NULL;
1558 }
1559 }
1560
1561 if (likely(!retval)) {
1562 if (unlikely(!audit_dummy_context())) {
1563 if (nd->path.dentry && nd->inode)
1564 audit_inode(name, nd->path.dentry);
1565 }
1566 }
1567
1067 return retval; 1568 return retval;
1068} 1569}
1069 1570
@@ -1106,10 +1607,11 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
1106 path_get(&nd->path); 1607 path_get(&nd->path);
1107 nd->root = nd->path; 1608 nd->root = nd->path;
1108 path_get(&nd->root); 1609 path_get(&nd->root);
1610 nd->inode = nd->path.dentry->d_inode;
1109 1611
1110 retval = path_walk(name, nd); 1612 retval = path_walk(name, nd);
1111 if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry && 1613 if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
1112 nd->path.dentry->d_inode)) 1614 nd->inode))
1113 audit_inode(name, nd->path.dentry); 1615 audit_inode(name, nd->path.dentry);
1114 1616
1115 path_put(&nd->root); 1617 path_put(&nd->root);
@@ -1125,7 +1627,7 @@ static struct dentry *__lookup_hash(struct qstr *name,
1125 struct dentry *dentry; 1627 struct dentry *dentry;
1126 int err; 1628 int err;
1127 1629
1128 err = exec_permission(inode); 1630 err = exec_permission(inode, 0);
1129 if (err) 1631 if (err)
1130 return ERR_PTR(err); 1632 return ERR_PTR(err);
1131 1633
@@ -1133,8 +1635,8 @@ static struct dentry *__lookup_hash(struct qstr *name,
1133 * See if the low-level filesystem might want 1635 * See if the low-level filesystem might want
1134 * to use its own hash.. 1636 * to use its own hash..
1135 */ 1637 */
1136 if (base->d_op && base->d_op->d_hash) { 1638 if (base->d_flags & DCACHE_OP_HASH) {
1137 err = base->d_op->d_hash(base, name); 1639 err = base->d_op->d_hash(base, inode, name);
1138 dentry = ERR_PTR(err); 1640 dentry = ERR_PTR(err);
1139 if (err < 0) 1641 if (err < 0)
1140 goto out; 1642 goto out;
@@ -1147,7 +1649,7 @@ static struct dentry *__lookup_hash(struct qstr *name,
1147 */ 1649 */
1148 dentry = d_lookup(base, name); 1650 dentry = d_lookup(base, name);
1149 1651
1150 if (dentry && dentry->d_op && dentry->d_op->d_revalidate) 1652 if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE))
1151 dentry = do_revalidate(dentry, nd); 1653 dentry = do_revalidate(dentry, nd);
1152 1654
1153 if (!dentry) 1655 if (!dentry)
@@ -1490,6 +1992,7 @@ out_unlock:
1490 mutex_unlock(&dir->d_inode->i_mutex); 1992 mutex_unlock(&dir->d_inode->i_mutex);
1491 dput(nd->path.dentry); 1993 dput(nd->path.dentry);
1492 nd->path.dentry = path->dentry; 1994 nd->path.dentry = path->dentry;
1995
1493 if (error) 1996 if (error)
1494 return error; 1997 return error;
1495 /* Don't check for write permission, don't truncate */ 1998 /* Don't check for write permission, don't truncate */
@@ -1584,6 +2087,9 @@ exit:
1584 return ERR_PTR(error); 2087 return ERR_PTR(error);
1585} 2088}
1586 2089
2090/*
2091 * Handle O_CREAT case for do_filp_open
2092 */
1587static struct file *do_last(struct nameidata *nd, struct path *path, 2093static struct file *do_last(struct nameidata *nd, struct path *path,
1588 int open_flag, int acc_mode, 2094 int open_flag, int acc_mode,
1589 int mode, const char *pathname) 2095 int mode, const char *pathname)
@@ -1597,50 +2103,25 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
1597 follow_dotdot(nd); 2103 follow_dotdot(nd);
1598 dir = nd->path.dentry; 2104 dir = nd->path.dentry;
1599 case LAST_DOT: 2105 case LAST_DOT:
1600 if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) { 2106 if (need_reval_dot(dir)) {
1601 if (!dir->d_op->d_revalidate(dir, nd)) { 2107 error = d_revalidate(nd->path.dentry, nd);
2108 if (!error)
1602 error = -ESTALE; 2109 error = -ESTALE;
2110 if (error < 0)
1603 goto exit; 2111 goto exit;
1604 }
1605 } 2112 }
1606 /* fallthrough */ 2113 /* fallthrough */
1607 case LAST_ROOT: 2114 case LAST_ROOT:
1608 if (open_flag & O_CREAT) 2115 goto exit;
1609 goto exit;
1610 /* fallthrough */
1611 case LAST_BIND: 2116 case LAST_BIND:
1612 audit_inode(pathname, dir); 2117 audit_inode(pathname, dir);
1613 goto ok; 2118 goto ok;
1614 } 2119 }
1615 2120
1616 /* trailing slashes? */ 2121 /* trailing slashes? */
1617 if (nd->last.name[nd->last.len]) { 2122 if (nd->last.name[nd->last.len])
1618 if (open_flag & O_CREAT) 2123 goto exit;
1619 goto exit;
1620 nd->flags |= LOOKUP_DIRECTORY | LOOKUP_FOLLOW;
1621 }
1622
1623 /* just plain open? */
1624 if (!(open_flag & O_CREAT)) {
1625 error = do_lookup(nd, &nd->last, path);
1626 if (error)
1627 goto exit;
1628 error = -ENOENT;
1629 if (!path->dentry->d_inode)
1630 goto exit_dput;
1631 if (path->dentry->d_inode->i_op->follow_link)
1632 return NULL;
1633 error = -ENOTDIR;
1634 if (nd->flags & LOOKUP_DIRECTORY) {
1635 if (!path->dentry->d_inode->i_op->lookup)
1636 goto exit_dput;
1637 }
1638 path_to_nameidata(path, nd);
1639 audit_inode(pathname, nd->path.dentry);
1640 goto ok;
1641 }
1642 2124
1643 /* OK, it's O_CREAT */
1644 mutex_lock(&dir->d_inode->i_mutex); 2125 mutex_lock(&dir->d_inode->i_mutex);
1645 2126
1646 path->dentry = lookup_hash(nd); 2127 path->dentry = lookup_hash(nd);
@@ -1711,8 +2192,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
1711 return NULL; 2192 return NULL;
1712 2193
1713 path_to_nameidata(path, nd); 2194 path_to_nameidata(path, nd);
2195 nd->inode = path->dentry->d_inode;
1714 error = -EISDIR; 2196 error = -EISDIR;
1715 if (S_ISDIR(path->dentry->d_inode->i_mode)) 2197 if (S_ISDIR(nd->inode->i_mode))
1716 goto exit; 2198 goto exit;
1717ok: 2199ok:
1718 filp = finish_open(nd, open_flag, acc_mode); 2200 filp = finish_open(nd, open_flag, acc_mode);
@@ -1743,7 +2225,7 @@ struct file *do_filp_open(int dfd, const char *pathname,
1743 struct path path; 2225 struct path path;
1744 int count = 0; 2226 int count = 0;
1745 int flag = open_to_namei_flags(open_flag); 2227 int flag = open_to_namei_flags(open_flag);
1746 int force_reval = 0; 2228 int flags;
1747 2229
1748 if (!(open_flag & O_CREAT)) 2230 if (!(open_flag & O_CREAT))
1749 mode = 0; 2231 mode = 0;
@@ -1772,54 +2254,84 @@ struct file *do_filp_open(int dfd, const char *pathname,
1772 if (open_flag & O_APPEND) 2254 if (open_flag & O_APPEND)
1773 acc_mode |= MAY_APPEND; 2255 acc_mode |= MAY_APPEND;
1774 2256
1775 /* find the parent */ 2257 flags = LOOKUP_OPEN;
1776reval: 2258 if (open_flag & O_CREAT) {
1777 error = path_init(dfd, pathname, LOOKUP_PARENT, &nd); 2259 flags |= LOOKUP_CREATE;
2260 if (open_flag & O_EXCL)
2261 flags |= LOOKUP_EXCL;
2262 }
2263 if (open_flag & O_DIRECTORY)
2264 flags |= LOOKUP_DIRECTORY;
2265 if (!(open_flag & O_NOFOLLOW))
2266 flags |= LOOKUP_FOLLOW;
2267
2268 filp = get_empty_filp();
2269 if (!filp)
2270 return ERR_PTR(-ENFILE);
2271
2272 filp->f_flags = open_flag;
2273 nd.intent.open.file = filp;
2274 nd.intent.open.flags = flag;
2275 nd.intent.open.create_mode = mode;
2276
2277 if (open_flag & O_CREAT)
2278 goto creat;
2279
2280 /* !O_CREAT, simple open */
2281 error = do_path_lookup(dfd, pathname, flags, &nd);
2282 if (unlikely(error))
2283 goto out_filp;
2284 error = -ELOOP;
2285 if (!(nd.flags & LOOKUP_FOLLOW)) {
2286 if (nd.inode->i_op->follow_link)
2287 goto out_path;
2288 }
2289 error = -ENOTDIR;
2290 if (nd.flags & LOOKUP_DIRECTORY) {
2291 if (!nd.inode->i_op->lookup)
2292 goto out_path;
2293 }
2294 audit_inode(pathname, nd.path.dentry);
2295 filp = finish_open(&nd, open_flag, acc_mode);
2296 return filp;
2297
2298creat:
2299 /* OK, have to create the file. Find the parent. */
2300 error = path_init_rcu(dfd, pathname,
2301 LOOKUP_PARENT | (flags & LOOKUP_REVAL), &nd);
1778 if (error) 2302 if (error)
1779 return ERR_PTR(error); 2303 goto out_filp;
1780 if (force_reval) 2304 error = path_walk_rcu(pathname, &nd);
1781 nd.flags |= LOOKUP_REVAL; 2305 path_finish_rcu(&nd);
2306 if (unlikely(error == -ECHILD || error == -ESTALE)) {
2307 /* slower, locked walk */
2308 if (error == -ESTALE) {
2309reval:
2310 flags |= LOOKUP_REVAL;
2311 }
2312 error = path_init(dfd, pathname,
2313 LOOKUP_PARENT | (flags & LOOKUP_REVAL), &nd);
2314 if (error)
2315 goto out_filp;
1782 2316
1783 current->total_link_count = 0; 2317 error = path_walk_simple(pathname, &nd);
1784 error = link_path_walk(pathname, &nd);
1785 if (error) {
1786 filp = ERR_PTR(error);
1787 goto out;
1788 } 2318 }
1789 if (unlikely(!audit_dummy_context()) && (open_flag & O_CREAT)) 2319 if (unlikely(error))
2320 goto out_filp;
2321 if (unlikely(!audit_dummy_context()))
1790 audit_inode(pathname, nd.path.dentry); 2322 audit_inode(pathname, nd.path.dentry);
1791 2323
1792 /* 2324 /*
1793 * We have the parent and last component. 2325 * We have the parent and last component.
1794 */ 2326 */
1795 2327 nd.flags = flags;
1796 error = -ENFILE;
1797 filp = get_empty_filp();
1798 if (filp == NULL)
1799 goto exit_parent;
1800 nd.intent.open.file = filp;
1801 filp->f_flags = open_flag;
1802 nd.intent.open.flags = flag;
1803 nd.intent.open.create_mode = mode;
1804 nd.flags &= ~LOOKUP_PARENT;
1805 nd.flags |= LOOKUP_OPEN;
1806 if (open_flag & O_CREAT) {
1807 nd.flags |= LOOKUP_CREATE;
1808 if (open_flag & O_EXCL)
1809 nd.flags |= LOOKUP_EXCL;
1810 }
1811 if (open_flag & O_DIRECTORY)
1812 nd.flags |= LOOKUP_DIRECTORY;
1813 if (!(open_flag & O_NOFOLLOW))
1814 nd.flags |= LOOKUP_FOLLOW;
1815 filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); 2328 filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
1816 while (unlikely(!filp)) { /* trailing symlink */ 2329 while (unlikely(!filp)) { /* trailing symlink */
1817 struct path holder; 2330 struct path holder;
1818 struct inode *inode = path.dentry->d_inode;
1819 void *cookie; 2331 void *cookie;
1820 error = -ELOOP; 2332 error = -ELOOP;
1821 /* S_ISDIR part is a temporary automount kludge */ 2333 /* S_ISDIR part is a temporary automount kludge */
1822 if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(inode->i_mode)) 2334 if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(nd.inode->i_mode))
1823 goto exit_dput; 2335 goto exit_dput;
1824 if (count++ == 32) 2336 if (count++ == 32)
1825 goto exit_dput; 2337 goto exit_dput;
@@ -1840,36 +2352,33 @@ reval:
1840 goto exit_dput; 2352 goto exit_dput;
1841 error = __do_follow_link(&path, &nd, &cookie); 2353 error = __do_follow_link(&path, &nd, &cookie);
1842 if (unlikely(error)) { 2354 if (unlikely(error)) {
2355 if (!IS_ERR(cookie) && nd.inode->i_op->put_link)
2356 nd.inode->i_op->put_link(path.dentry, &nd, cookie);
1843 /* nd.path had been dropped */ 2357 /* nd.path had been dropped */
1844 if (!IS_ERR(cookie) && inode->i_op->put_link) 2358 nd.path = path;
1845 inode->i_op->put_link(path.dentry, &nd, cookie); 2359 goto out_path;
1846 path_put(&path);
1847 release_open_intent(&nd);
1848 filp = ERR_PTR(error);
1849 goto out;
1850 } 2360 }
1851 holder = path; 2361 holder = path;
1852 nd.flags &= ~LOOKUP_PARENT; 2362 nd.flags &= ~LOOKUP_PARENT;
1853 filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); 2363 filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
1854 if (inode->i_op->put_link) 2364 if (nd.inode->i_op->put_link)
1855 inode->i_op->put_link(holder.dentry, &nd, cookie); 2365 nd.inode->i_op->put_link(holder.dentry, &nd, cookie);
1856 path_put(&holder); 2366 path_put(&holder);
1857 } 2367 }
1858out: 2368out:
1859 if (nd.root.mnt) 2369 if (nd.root.mnt)
1860 path_put(&nd.root); 2370 path_put(&nd.root);
1861 if (filp == ERR_PTR(-ESTALE) && !force_reval) { 2371 if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL))
1862 force_reval = 1;
1863 goto reval; 2372 goto reval;
1864 }
1865 return filp; 2373 return filp;
1866 2374
1867exit_dput: 2375exit_dput:
1868 path_put_conditional(&path, &nd); 2376 path_put_conditional(&path, &nd);
2377out_path:
2378 path_put(&nd.path);
2379out_filp:
1869 if (!IS_ERR(nd.intent.open.file)) 2380 if (!IS_ERR(nd.intent.open.file))
1870 release_open_intent(&nd); 2381 release_open_intent(&nd);
1871exit_parent:
1872 path_put(&nd.path);
1873 filp = ERR_PTR(error); 2382 filp = ERR_PTR(error);
1874 goto out; 2383 goto out;
1875} 2384}
@@ -2130,12 +2639,10 @@ void dentry_unhash(struct dentry *dentry)
2130{ 2639{
2131 dget(dentry); 2640 dget(dentry);
2132 shrink_dcache_parent(dentry); 2641 shrink_dcache_parent(dentry);
2133 spin_lock(&dcache_lock);
2134 spin_lock(&dentry->d_lock); 2642 spin_lock(&dentry->d_lock);
2135 if (atomic_read(&dentry->d_count) == 2) 2643 if (dentry->d_count == 2)
2136 __d_drop(dentry); 2644 __d_drop(dentry);
2137 spin_unlock(&dentry->d_lock); 2645 spin_unlock(&dentry->d_lock);
2138 spin_unlock(&dcache_lock);
2139} 2646}
2140 2647
2141int vfs_rmdir(struct inode *dir, struct dentry *dentry) 2648int vfs_rmdir(struct inode *dir, struct dentry *dentry)
diff --git a/fs/namespace.c b/fs/namespace.c
index 3dbfc072ec70..3ddfd9046c44 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -138,6 +138,64 @@ void mnt_release_group_id(struct vfsmount *mnt)
138 mnt->mnt_group_id = 0; 138 mnt->mnt_group_id = 0;
139} 139}
140 140
141/*
142 * vfsmount lock must be held for read
143 */
144static inline void mnt_add_count(struct vfsmount *mnt, int n)
145{
146#ifdef CONFIG_SMP
147 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
148#else
149 preempt_disable();
150 mnt->mnt_count += n;
151 preempt_enable();
152#endif
153}
154
155static inline void mnt_set_count(struct vfsmount *mnt, int n)
156{
157#ifdef CONFIG_SMP
158 this_cpu_write(mnt->mnt_pcp->mnt_count, n);
159#else
160 mnt->mnt_count = n;
161#endif
162}
163
164/*
165 * vfsmount lock must be held for read
166 */
167static inline void mnt_inc_count(struct vfsmount *mnt)
168{
169 mnt_add_count(mnt, 1);
170}
171
172/*
173 * vfsmount lock must be held for read
174 */
175static inline void mnt_dec_count(struct vfsmount *mnt)
176{
177 mnt_add_count(mnt, -1);
178}
179
180/*
181 * vfsmount lock must be held for write
182 */
183unsigned int mnt_get_count(struct vfsmount *mnt)
184{
185#ifdef CONFIG_SMP
186 unsigned int count = atomic_read(&mnt->mnt_longrefs);
187 int cpu;
188
189 for_each_possible_cpu(cpu) {
190 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
191 }
192
193 return count;
194#else
195 return mnt->mnt_count;
196#endif
197}
198
141struct vfsmount *alloc_vfsmnt(const char *name) 199struct vfsmount *alloc_vfsmnt(const char *name)
142{ 200{
143 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 201 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
@@ -154,7 +212,17 @@ struct vfsmount *alloc_vfsmnt(const char *name)
154 goto out_free_id; 212 goto out_free_id;
155 } 213 }
156 214
157 atomic_set(&mnt->mnt_count, 1); 215#ifdef CONFIG_SMP
216 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
217 if (!mnt->mnt_pcp)
218 goto out_free_devname;
219
220 atomic_set(&mnt->mnt_longrefs, 1);
221#else
222 mnt->mnt_count = 1;
223 mnt->mnt_writers = 0;
224#endif
225
158 INIT_LIST_HEAD(&mnt->mnt_hash); 226 INIT_LIST_HEAD(&mnt->mnt_hash);
159 INIT_LIST_HEAD(&mnt->mnt_child); 227 INIT_LIST_HEAD(&mnt->mnt_child);
160 INIT_LIST_HEAD(&mnt->mnt_mounts); 228 INIT_LIST_HEAD(&mnt->mnt_mounts);
@@ -166,13 +234,6 @@ struct vfsmount *alloc_vfsmnt(const char *name)
166#ifdef CONFIG_FSNOTIFY 234#ifdef CONFIG_FSNOTIFY
167 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); 235 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
168#endif 236#endif
169#ifdef CONFIG_SMP
170 mnt->mnt_writers = alloc_percpu(int);
171 if (!mnt->mnt_writers)
172 goto out_free_devname;
173#else
174 mnt->mnt_writers = 0;
175#endif
176 } 237 }
177 return mnt; 238 return mnt;
178 239
@@ -216,32 +277,32 @@ int __mnt_is_readonly(struct vfsmount *mnt)
216} 277}
217EXPORT_SYMBOL_GPL(__mnt_is_readonly); 278EXPORT_SYMBOL_GPL(__mnt_is_readonly);
218 279
219static inline void inc_mnt_writers(struct vfsmount *mnt) 280static inline void mnt_inc_writers(struct vfsmount *mnt)
220{ 281{
221#ifdef CONFIG_SMP 282#ifdef CONFIG_SMP
222 (*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))++; 283 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
223#else 284#else
224 mnt->mnt_writers++; 285 mnt->mnt_writers++;
225#endif 286#endif
226} 287}
227 288
228static inline void dec_mnt_writers(struct vfsmount *mnt) 289static inline void mnt_dec_writers(struct vfsmount *mnt)
229{ 290{
230#ifdef CONFIG_SMP 291#ifdef CONFIG_SMP
231 (*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))--; 292 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
232#else 293#else
233 mnt->mnt_writers--; 294 mnt->mnt_writers--;
234#endif 295#endif
235} 296}
236 297
237static unsigned int count_mnt_writers(struct vfsmount *mnt) 298static unsigned int mnt_get_writers(struct vfsmount *mnt)
238{ 299{
239#ifdef CONFIG_SMP 300#ifdef CONFIG_SMP
240 unsigned int count = 0; 301 unsigned int count = 0;
241 int cpu; 302 int cpu;
242 303
243 for_each_possible_cpu(cpu) { 304 for_each_possible_cpu(cpu) {
244 count += *per_cpu_ptr(mnt->mnt_writers, cpu); 305 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
245 } 306 }
246 307
247 return count; 308 return count;
@@ -273,9 +334,9 @@ int mnt_want_write(struct vfsmount *mnt)
273 int ret = 0; 334 int ret = 0;
274 335
275 preempt_disable(); 336 preempt_disable();
276 inc_mnt_writers(mnt); 337 mnt_inc_writers(mnt);
277 /* 338 /*
278 * The store to inc_mnt_writers must be visible before we pass 339 * The store to mnt_inc_writers must be visible before we pass
279 * MNT_WRITE_HOLD loop below, so that the slowpath can see our 340 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
280 * incremented count after it has set MNT_WRITE_HOLD. 341 * incremented count after it has set MNT_WRITE_HOLD.
281 */ 342 */
@@ -289,7 +350,7 @@ int mnt_want_write(struct vfsmount *mnt)
289 */ 350 */
290 smp_rmb(); 351 smp_rmb();
291 if (__mnt_is_readonly(mnt)) { 352 if (__mnt_is_readonly(mnt)) {
292 dec_mnt_writers(mnt); 353 mnt_dec_writers(mnt);
293 ret = -EROFS; 354 ret = -EROFS;
294 goto out; 355 goto out;
295 } 356 }
@@ -317,7 +378,7 @@ int mnt_clone_write(struct vfsmount *mnt)
317 if (__mnt_is_readonly(mnt)) 378 if (__mnt_is_readonly(mnt))
318 return -EROFS; 379 return -EROFS;
319 preempt_disable(); 380 preempt_disable();
320 inc_mnt_writers(mnt); 381 mnt_inc_writers(mnt);
321 preempt_enable(); 382 preempt_enable();
322 return 0; 383 return 0;
323} 384}
@@ -351,7 +412,7 @@ EXPORT_SYMBOL_GPL(mnt_want_write_file);
351void mnt_drop_write(struct vfsmount *mnt) 412void mnt_drop_write(struct vfsmount *mnt)
352{ 413{
353 preempt_disable(); 414 preempt_disable();
354 dec_mnt_writers(mnt); 415 mnt_dec_writers(mnt);
355 preempt_enable(); 416 preempt_enable();
356} 417}
357EXPORT_SYMBOL_GPL(mnt_drop_write); 418EXPORT_SYMBOL_GPL(mnt_drop_write);
@@ -384,7 +445,7 @@ static int mnt_make_readonly(struct vfsmount *mnt)
384 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while 445 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
385 * we're counting up here. 446 * we're counting up here.
386 */ 447 */
387 if (count_mnt_writers(mnt) > 0) 448 if (mnt_get_writers(mnt) > 0)
388 ret = -EBUSY; 449 ret = -EBUSY;
389 else 450 else
390 mnt->mnt_flags |= MNT_READONLY; 451 mnt->mnt_flags |= MNT_READONLY;
@@ -418,7 +479,7 @@ void free_vfsmnt(struct vfsmount *mnt)
418 kfree(mnt->mnt_devname); 479 kfree(mnt->mnt_devname);
419 mnt_free_id(mnt); 480 mnt_free_id(mnt);
420#ifdef CONFIG_SMP 481#ifdef CONFIG_SMP
421 free_percpu(mnt->mnt_writers); 482 free_percpu(mnt->mnt_pcp);
422#endif 483#endif
423 kmem_cache_free(mnt_cache, mnt); 484 kmem_cache_free(mnt_cache, mnt);
424} 485}
@@ -492,6 +553,27 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
492} 553}
493 554
494/* 555/*
556 * Clear dentry's mounted state if it has no remaining mounts.
557 * vfsmount_lock must be held for write.
558 */
559static void dentry_reset_mounted(struct vfsmount *mnt, struct dentry *dentry)
560{
561 unsigned u;
562
563 for (u = 0; u < HASH_SIZE; u++) {
564 struct vfsmount *p;
565
566 list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
567 if (p->mnt_mountpoint == dentry)
568 return;
569 }
570 }
571 spin_lock(&dentry->d_lock);
572 dentry->d_flags &= ~DCACHE_MOUNTED;
573 spin_unlock(&dentry->d_lock);
574}
575
576/*
495 * vfsmount lock must be held for write 577 * vfsmount lock must be held for write
496 */ 578 */
497static void detach_mnt(struct vfsmount *mnt, struct path *old_path) 579static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
@@ -502,7 +584,7 @@ static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
502 mnt->mnt_mountpoint = mnt->mnt_root; 584 mnt->mnt_mountpoint = mnt->mnt_root;
503 list_del_init(&mnt->mnt_child); 585 list_del_init(&mnt->mnt_child);
504 list_del_init(&mnt->mnt_hash); 586 list_del_init(&mnt->mnt_hash);
505 old_path->dentry->d_mounted--; 587 dentry_reset_mounted(old_path->mnt, old_path->dentry);
506} 588}
507 589
508/* 590/*
@@ -513,7 +595,9 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
513{ 595{
514 child_mnt->mnt_parent = mntget(mnt); 596 child_mnt->mnt_parent = mntget(mnt);
515 child_mnt->mnt_mountpoint = dget(dentry); 597 child_mnt->mnt_mountpoint = dget(dentry);
516 dentry->d_mounted++; 598 spin_lock(&dentry->d_lock);
599 dentry->d_flags |= DCACHE_MOUNTED;
600 spin_unlock(&dentry->d_lock);
517} 601}
518 602
519/* 603/*
@@ -629,9 +713,10 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
629 return NULL; 713 return NULL;
630} 714}
631 715
632static inline void __mntput(struct vfsmount *mnt) 716static inline void mntfree(struct vfsmount *mnt)
633{ 717{
634 struct super_block *sb = mnt->mnt_sb; 718 struct super_block *sb = mnt->mnt_sb;
719
635 /* 720 /*
636 * This probably indicates that somebody messed 721 * This probably indicates that somebody messed
637 * up a mnt_want/drop_write() pair. If this 722 * up a mnt_want/drop_write() pair. If this
@@ -639,38 +724,123 @@ static inline void __mntput(struct vfsmount *mnt)
639 * to make r/w->r/o transitions. 724 * to make r/w->r/o transitions.
640 */ 725 */
641 /* 726 /*
642 * atomic_dec_and_lock() used to deal with ->mnt_count decrements 727 * The locking used to deal with mnt_count decrement provides barriers,
643 * provides barriers, so count_mnt_writers() below is safe. AV 728 * so mnt_get_writers() below is safe.
644 */ 729 */
645 WARN_ON(count_mnt_writers(mnt)); 730 WARN_ON(mnt_get_writers(mnt));
646 fsnotify_vfsmount_delete(mnt); 731 fsnotify_vfsmount_delete(mnt);
647 dput(mnt->mnt_root); 732 dput(mnt->mnt_root);
648 free_vfsmnt(mnt); 733 free_vfsmnt(mnt);
649 deactivate_super(sb); 734 deactivate_super(sb);
650} 735}
651 736
652void mntput_no_expire(struct vfsmount *mnt) 737#ifdef CONFIG_SMP
653{ 738static inline void __mntput(struct vfsmount *mnt, int longrefs)
654repeat: 739{
655 if (atomic_add_unless(&mnt->mnt_count, -1, 1)) 740 if (!longrefs) {
656 return; 741put_again:
742 br_read_lock(vfsmount_lock);
743 if (likely(atomic_read(&mnt->mnt_longrefs))) {
744 mnt_dec_count(mnt);
745 br_read_unlock(vfsmount_lock);
746 return;
747 }
748 br_read_unlock(vfsmount_lock);
749 } else {
750 BUG_ON(!atomic_read(&mnt->mnt_longrefs));
751 if (atomic_add_unless(&mnt->mnt_longrefs, -1, 1))
752 return;
753 }
754
657 br_write_lock(vfsmount_lock); 755 br_write_lock(vfsmount_lock);
658 if (!atomic_dec_and_test(&mnt->mnt_count)) { 756 if (!longrefs)
757 mnt_dec_count(mnt);
758 else
759 atomic_dec(&mnt->mnt_longrefs);
760 if (mnt_get_count(mnt)) {
659 br_write_unlock(vfsmount_lock); 761 br_write_unlock(vfsmount_lock);
660 return; 762 return;
661 } 763 }
662 if (likely(!mnt->mnt_pinned)) { 764 if (unlikely(mnt->mnt_pinned)) {
765 mnt_add_count(mnt, mnt->mnt_pinned + 1);
766 mnt->mnt_pinned = 0;
663 br_write_unlock(vfsmount_lock); 767 br_write_unlock(vfsmount_lock);
664 __mntput(mnt); 768 acct_auto_close_mnt(mnt);
769 goto put_again;
770 }
771 br_write_unlock(vfsmount_lock);
772 mntfree(mnt);
773}
774#else
775static inline void __mntput(struct vfsmount *mnt, int longrefs)
776{
777put_again:
778 mnt_dec_count(mnt);
779 if (likely(mnt_get_count(mnt)))
665 return; 780 return;
781 br_write_lock(vfsmount_lock);
782 if (unlikely(mnt->mnt_pinned)) {
783 mnt_add_count(mnt, mnt->mnt_pinned + 1);
784 mnt->mnt_pinned = 0;
785 br_write_unlock(vfsmount_lock);
786 acct_auto_close_mnt(mnt);
787 goto put_again;
666 } 788 }
667 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
668 mnt->mnt_pinned = 0;
669 br_write_unlock(vfsmount_lock); 789 br_write_unlock(vfsmount_lock);
670 acct_auto_close_mnt(mnt); 790 mntfree(mnt);
671 goto repeat; 791}
792#endif
793
794static void mntput_no_expire(struct vfsmount *mnt)
795{
796 __mntput(mnt, 0);
797}
798
799void mntput(struct vfsmount *mnt)
800{
801 if (mnt) {
802 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
803 if (unlikely(mnt->mnt_expiry_mark))
804 mnt->mnt_expiry_mark = 0;
805 __mntput(mnt, 0);
806 }
807}
808EXPORT_SYMBOL(mntput);
809
810struct vfsmount *mntget(struct vfsmount *mnt)
811{
812 if (mnt)
813 mnt_inc_count(mnt);
814 return mnt;
672} 815}
673EXPORT_SYMBOL(mntput_no_expire); 816EXPORT_SYMBOL(mntget);
817
818void mntput_long(struct vfsmount *mnt)
819{
820#ifdef CONFIG_SMP
821 if (mnt) {
822 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
823 if (unlikely(mnt->mnt_expiry_mark))
824 mnt->mnt_expiry_mark = 0;
825 __mntput(mnt, 1);
826 }
827#else
828 mntput(mnt);
829#endif
830}
831EXPORT_SYMBOL(mntput_long);
832
833struct vfsmount *mntget_long(struct vfsmount *mnt)
834{
835#ifdef CONFIG_SMP
836 if (mnt)
837 atomic_inc(&mnt->mnt_longrefs);
838 return mnt;
839#else
840 return mntget(mnt);
841#endif
842}
843EXPORT_SYMBOL(mntget_long);
674 844
675void mnt_pin(struct vfsmount *mnt) 845void mnt_pin(struct vfsmount *mnt)
676{ 846{
@@ -678,19 +848,17 @@ void mnt_pin(struct vfsmount *mnt)
678 mnt->mnt_pinned++; 848 mnt->mnt_pinned++;
679 br_write_unlock(vfsmount_lock); 849 br_write_unlock(vfsmount_lock);
680} 850}
681
682EXPORT_SYMBOL(mnt_pin); 851EXPORT_SYMBOL(mnt_pin);
683 852
684void mnt_unpin(struct vfsmount *mnt) 853void mnt_unpin(struct vfsmount *mnt)
685{ 854{
686 br_write_lock(vfsmount_lock); 855 br_write_lock(vfsmount_lock);
687 if (mnt->mnt_pinned) { 856 if (mnt->mnt_pinned) {
688 atomic_inc(&mnt->mnt_count); 857 mnt_inc_count(mnt);
689 mnt->mnt_pinned--; 858 mnt->mnt_pinned--;
690 } 859 }
691 br_write_unlock(vfsmount_lock); 860 br_write_unlock(vfsmount_lock);
692} 861}
693
694EXPORT_SYMBOL(mnt_unpin); 862EXPORT_SYMBOL(mnt_unpin);
695 863
696static inline void mangle(struct seq_file *m, const char *s) 864static inline void mangle(struct seq_file *m, const char *s)
@@ -985,12 +1153,13 @@ int may_umount_tree(struct vfsmount *mnt)
985 int minimum_refs = 0; 1153 int minimum_refs = 0;
986 struct vfsmount *p; 1154 struct vfsmount *p;
987 1155
988 br_read_lock(vfsmount_lock); 1156 /* write lock needed for mnt_get_count */
1157 br_write_lock(vfsmount_lock);
989 for (p = mnt; p; p = next_mnt(p, mnt)) { 1158 for (p = mnt; p; p = next_mnt(p, mnt)) {
990 actual_refs += atomic_read(&p->mnt_count); 1159 actual_refs += mnt_get_count(p);
991 minimum_refs += 2; 1160 minimum_refs += 2;
992 } 1161 }
993 br_read_unlock(vfsmount_lock); 1162 br_write_unlock(vfsmount_lock);
994 1163
995 if (actual_refs > minimum_refs) 1164 if (actual_refs > minimum_refs)
996 return 0; 1165 return 0;
@@ -1017,10 +1186,10 @@ int may_umount(struct vfsmount *mnt)
1017{ 1186{
1018 int ret = 1; 1187 int ret = 1;
1019 down_read(&namespace_sem); 1188 down_read(&namespace_sem);
1020 br_read_lock(vfsmount_lock); 1189 br_write_lock(vfsmount_lock);
1021 if (propagate_mount_busy(mnt, 2)) 1190 if (propagate_mount_busy(mnt, 2))
1022 ret = 0; 1191 ret = 0;
1023 br_read_unlock(vfsmount_lock); 1192 br_write_unlock(vfsmount_lock);
1024 up_read(&namespace_sem); 1193 up_read(&namespace_sem);
1025 return ret; 1194 return ret;
1026} 1195}
@@ -1047,7 +1216,7 @@ void release_mounts(struct list_head *head)
1047 dput(dentry); 1216 dput(dentry);
1048 mntput(m); 1217 mntput(m);
1049 } 1218 }
1050 mntput(mnt); 1219 mntput_long(mnt);
1051 } 1220 }
1052} 1221}
1053 1222
@@ -1073,7 +1242,7 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
1073 list_del_init(&p->mnt_child); 1242 list_del_init(&p->mnt_child);
1074 if (p->mnt_parent != p) { 1243 if (p->mnt_parent != p) {
1075 p->mnt_parent->mnt_ghosts++; 1244 p->mnt_parent->mnt_ghosts++;
1076 p->mnt_mountpoint->d_mounted--; 1245 dentry_reset_mounted(p->mnt_parent, p->mnt_mountpoint);
1077 } 1246 }
1078 change_mnt_propagation(p, MS_PRIVATE); 1247 change_mnt_propagation(p, MS_PRIVATE);
1079 } 1248 }
@@ -1102,8 +1271,16 @@ static int do_umount(struct vfsmount *mnt, int flags)
1102 flags & (MNT_FORCE | MNT_DETACH)) 1271 flags & (MNT_FORCE | MNT_DETACH))
1103 return -EINVAL; 1272 return -EINVAL;
1104 1273
1105 if (atomic_read(&mnt->mnt_count) != 2) 1274 /*
1275 * probably don't strictly need the lock here if we examined
1276 * all race cases, but it's a slowpath.
1277 */
1278 br_write_lock(vfsmount_lock);
1279 if (mnt_get_count(mnt) != 2) {
1280 br_write_lock(vfsmount_lock);
1106 return -EBUSY; 1281 return -EBUSY;
1282 }
1283 br_write_unlock(vfsmount_lock);
1107 1284
1108 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1285 if (!xchg(&mnt->mnt_expiry_mark, 1))
1109 return -EAGAIN; 1286 return -EAGAIN;
@@ -1792,7 +1969,7 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
1792 1969
1793unlock: 1970unlock:
1794 up_write(&namespace_sem); 1971 up_write(&namespace_sem);
1795 mntput(newmnt); 1972 mntput_long(newmnt);
1796 return err; 1973 return err;
1797} 1974}
1798 1975
@@ -2125,11 +2302,11 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2125 if (fs) { 2302 if (fs) {
2126 if (p == fs->root.mnt) { 2303 if (p == fs->root.mnt) {
2127 rootmnt = p; 2304 rootmnt = p;
2128 fs->root.mnt = mntget(q); 2305 fs->root.mnt = mntget_long(q);
2129 } 2306 }
2130 if (p == fs->pwd.mnt) { 2307 if (p == fs->pwd.mnt) {
2131 pwdmnt = p; 2308 pwdmnt = p;
2132 fs->pwd.mnt = mntget(q); 2309 fs->pwd.mnt = mntget_long(q);
2133 } 2310 }
2134 } 2311 }
2135 p = next_mnt(p, mnt_ns->root); 2312 p = next_mnt(p, mnt_ns->root);
@@ -2138,9 +2315,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2138 up_write(&namespace_sem); 2315 up_write(&namespace_sem);
2139 2316
2140 if (rootmnt) 2317 if (rootmnt)
2141 mntput(rootmnt); 2318 mntput_long(rootmnt);
2142 if (pwdmnt) 2319 if (pwdmnt)
2143 mntput(pwdmnt); 2320 mntput_long(pwdmnt);
2144 2321
2145 return new_ns; 2322 return new_ns;
2146} 2323}
@@ -2327,6 +2504,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2327 touch_mnt_namespace(current->nsproxy->mnt_ns); 2504 touch_mnt_namespace(current->nsproxy->mnt_ns);
2328 br_write_unlock(vfsmount_lock); 2505 br_write_unlock(vfsmount_lock);
2329 chroot_fs_refs(&root, &new); 2506 chroot_fs_refs(&root, &new);
2507
2330 error = 0; 2508 error = 0;
2331 path_put(&root_parent); 2509 path_put(&root_parent);
2332 path_put(&parent_path); 2510 path_put(&parent_path);
@@ -2353,6 +2531,7 @@ static void __init init_mount_tree(void)
2353 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); 2531 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2354 if (IS_ERR(mnt)) 2532 if (IS_ERR(mnt))
2355 panic("Can't create rootfs"); 2533 panic("Can't create rootfs");
2534
2356 ns = create_mnt_ns(mnt); 2535 ns = create_mnt_ns(mnt);
2357 if (IS_ERR(ns)) 2536 if (IS_ERR(ns))
2358 panic("Can't allocate initial namespace"); 2537 panic("Can't allocate initial namespace");
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index f22b12e7d337..28f136d4aaec 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/namei.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/byteorder.h> 22#include <asm/byteorder.h>
22 23
@@ -74,9 +75,12 @@ const struct inode_operations ncp_dir_inode_operations =
74 * Dentry operations routines 75 * Dentry operations routines
75 */ 76 */
76static int ncp_lookup_validate(struct dentry *, struct nameidata *); 77static int ncp_lookup_validate(struct dentry *, struct nameidata *);
77static int ncp_hash_dentry(struct dentry *, struct qstr *); 78static int ncp_hash_dentry(const struct dentry *, const struct inode *,
78static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *); 79 struct qstr *);
79static int ncp_delete_dentry(struct dentry *); 80static int ncp_compare_dentry(const struct dentry *, const struct inode *,
81 const struct dentry *, const struct inode *,
82 unsigned int, const char *, const struct qstr *);
83static int ncp_delete_dentry(const struct dentry *);
80 84
81static const struct dentry_operations ncp_dentry_operations = 85static const struct dentry_operations ncp_dentry_operations =
82{ 86{
@@ -113,10 +117,10 @@ static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator)
113 117
114#define ncp_preserve_case(i) (ncp_namespace(i) != NW_NS_DOS) 118#define ncp_preserve_case(i) (ncp_namespace(i) != NW_NS_DOS)
115 119
116static inline int ncp_case_sensitive(struct dentry *dentry) 120static inline int ncp_case_sensitive(const struct inode *i)
117{ 121{
118#ifdef CONFIG_NCPFS_NFS_NS 122#ifdef CONFIG_NCPFS_NFS_NS
119 return ncp_namespace(dentry->d_inode) == NW_NS_NFS; 123 return ncp_namespace(i) == NW_NS_NFS;
120#else 124#else
121 return 0; 125 return 0;
122#endif /* CONFIG_NCPFS_NFS_NS */ 126#endif /* CONFIG_NCPFS_NFS_NS */
@@ -127,14 +131,16 @@ static inline int ncp_case_sensitive(struct dentry *dentry)
127 * is case-sensitive. 131 * is case-sensitive.
128 */ 132 */
129static int 133static int
130ncp_hash_dentry(struct dentry *dentry, struct qstr *this) 134ncp_hash_dentry(const struct dentry *dentry, const struct inode *inode,
135 struct qstr *this)
131{ 136{
132 if (!ncp_case_sensitive(dentry)) { 137 if (!ncp_case_sensitive(inode)) {
138 struct super_block *sb = dentry->d_sb;
133 struct nls_table *t; 139 struct nls_table *t;
134 unsigned long hash; 140 unsigned long hash;
135 int i; 141 int i;
136 142
137 t = NCP_IO_TABLE(dentry); 143 t = NCP_IO_TABLE(sb);
138 hash = init_name_hash(); 144 hash = init_name_hash();
139 for (i=0; i<this->len ; i++) 145 for (i=0; i<this->len ; i++)
140 hash = partial_name_hash(ncp_tolower(t, this->name[i]), 146 hash = partial_name_hash(ncp_tolower(t, this->name[i]),
@@ -145,15 +151,17 @@ ncp_hash_dentry(struct dentry *dentry, struct qstr *this)
145} 151}
146 152
147static int 153static int
148ncp_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b) 154ncp_compare_dentry(const struct dentry *parent, const struct inode *pinode,
155 const struct dentry *dentry, const struct inode *inode,
156 unsigned int len, const char *str, const struct qstr *name)
149{ 157{
150 if (a->len != b->len) 158 if (len != name->len)
151 return 1; 159 return 1;
152 160
153 if (ncp_case_sensitive(dentry)) 161 if (ncp_case_sensitive(pinode))
154 return strncmp(a->name, b->name, a->len); 162 return strncmp(str, name->name, len);
155 163
156 return ncp_strnicmp(NCP_IO_TABLE(dentry), a->name, b->name, a->len); 164 return ncp_strnicmp(NCP_IO_TABLE(pinode->i_sb), str, name->name, len);
157} 165}
158 166
159/* 167/*
@@ -162,7 +170,7 @@ ncp_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
162 * Closing files can be safely postponed until iput() - it's done there anyway. 170 * Closing files can be safely postponed until iput() - it's done there anyway.
163 */ 171 */
164static int 172static int
165ncp_delete_dentry(struct dentry * dentry) 173ncp_delete_dentry(const struct dentry * dentry)
166{ 174{
167 struct inode *inode = dentry->d_inode; 175 struct inode *inode = dentry->d_inode;
168 176
@@ -301,6 +309,9 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
301 int res, val = 0, len; 309 int res, val = 0, len;
302 __u8 __name[NCP_MAXPATHLEN + 1]; 310 __u8 __name[NCP_MAXPATHLEN + 1];
303 311
312 if (nd->flags & LOOKUP_RCU)
313 return -ECHILD;
314
304 parent = dget_parent(dentry); 315 parent = dget_parent(dentry);
305 dir = parent->d_inode; 316 dir = parent->d_inode;
306 317
@@ -384,21 +395,21 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
384 } 395 }
385 396
386 /* If a pointer is invalid, we search the dentry. */ 397 /* If a pointer is invalid, we search the dentry. */
387 spin_lock(&dcache_lock); 398 spin_lock(&parent->d_lock);
388 next = parent->d_subdirs.next; 399 next = parent->d_subdirs.next;
389 while (next != &parent->d_subdirs) { 400 while (next != &parent->d_subdirs) {
390 dent = list_entry(next, struct dentry, d_u.d_child); 401 dent = list_entry(next, struct dentry, d_u.d_child);
391 if ((unsigned long)dent->d_fsdata == fpos) { 402 if ((unsigned long)dent->d_fsdata == fpos) {
392 if (dent->d_inode) 403 if (dent->d_inode)
393 dget_locked(dent); 404 dget(dent);
394 else 405 else
395 dent = NULL; 406 dent = NULL;
396 spin_unlock(&dcache_lock); 407 spin_unlock(&parent->d_lock);
397 goto out; 408 goto out;
398 } 409 }
399 next = next->next; 410 next = next->next;
400 } 411 }
401 spin_unlock(&dcache_lock); 412 spin_unlock(&parent->d_lock);
402 return NULL; 413 return NULL;
403 414
404out: 415out:
@@ -592,7 +603,7 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
592 qname.hash = full_name_hash(qname.name, qname.len); 603 qname.hash = full_name_hash(qname.name, qname.len);
593 604
594 if (dentry->d_op && dentry->d_op->d_hash) 605 if (dentry->d_op && dentry->d_op->d_hash)
595 if (dentry->d_op->d_hash(dentry, &qname) != 0) 606 if (dentry->d_op->d_hash(dentry, dentry->d_inode, &qname) != 0)
596 goto end_advance; 607 goto end_advance;
597 608
598 newdent = d_lookup(dentry, &qname); 609 newdent = d_lookup(dentry, &qname);
@@ -611,35 +622,12 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
611 shrink_dcache_parent(newdent); 622 shrink_dcache_parent(newdent);
612 623
613 /* 624 /*
614 * It is not as dangerous as it looks. NetWare's OS2 namespace is 625 * NetWare's OS2 namespace is case preserving yet case
615 * case preserving yet case insensitive. So we update dentry's name 626 * insensitive. So we update dentry's name as received from
616 * as received from server. We found dentry via d_lookup with our 627 * server. Parent dir's i_mutex is locked because we're in
617 * hash, so we know that hash does not change, and so replacing name 628 * readdir.
618 * should be reasonably safe.
619 */ 629 */
620 if (qname.len == newdent->d_name.len && 630 dentry_update_name_case(newdent, &qname);
621 memcmp(newdent->d_name.name, qname.name, newdent->d_name.len)) {
622 struct inode *inode = newdent->d_inode;
623
624 /*
625 * Inside ncpfs all uses of d_name are either for debugging,
626 * or on functions which acquire inode mutex (mknod, creat,
627 * lookup). So grab i_mutex here, to be sure. d_path
628 * uses dcache_lock when generating path, so we should too.
629 * And finally d_compare is protected by dentry's d_lock, so
630 * here we go.
631 */
632 if (inode)
633 mutex_lock(&inode->i_mutex);
634 spin_lock(&dcache_lock);
635 spin_lock(&newdent->d_lock);
636 memcpy((char *) newdent->d_name.name, qname.name,
637 newdent->d_name.len);
638 spin_unlock(&newdent->d_lock);
639 spin_unlock(&dcache_lock);
640 if (inode)
641 mutex_unlock(&inode->i_mutex);
642 }
643 } 631 }
644 632
645 if (!newdent->d_inode) { 633 if (!newdent->d_inode) {
@@ -649,7 +637,7 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
649 entry->ino = iunique(dir->i_sb, 2); 637 entry->ino = iunique(dir->i_sb, 2);
650 inode = ncp_iget(dir->i_sb, entry); 638 inode = ncp_iget(dir->i_sb, entry);
651 if (inode) { 639 if (inode) {
652 newdent->d_op = &ncp_dentry_operations; 640 d_set_d_op(newdent, &ncp_dentry_operations);
653 d_instantiate(newdent, inode); 641 d_instantiate(newdent, inode);
654 if (!hashed) 642 if (!hashed)
655 d_rehash(newdent); 643 d_rehash(newdent);
@@ -657,7 +645,7 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
657 } else { 645 } else {
658 struct inode *inode = newdent->d_inode; 646 struct inode *inode = newdent->d_inode;
659 647
660 mutex_lock(&inode->i_mutex); 648 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
661 ncp_update_inode2(inode, entry); 649 ncp_update_inode2(inode, entry);
662 mutex_unlock(&inode->i_mutex); 650 mutex_unlock(&inode->i_mutex);
663 } 651 }
@@ -905,7 +893,7 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
905 if (inode) { 893 if (inode) {
906 ncp_new_dentry(dentry); 894 ncp_new_dentry(dentry);
907add_entry: 895add_entry:
908 dentry->d_op = &ncp_dentry_operations; 896 d_set_d_op(dentry, &ncp_dentry_operations);
909 d_add(dentry, inode); 897 d_add(dentry, inode);
910 error = 0; 898 error = 0;
911 } 899 }
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 8fb93b604e73..9531c052d7a4 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -29,6 +29,7 @@
29#include <linux/vfs.h> 29#include <linux/vfs.h>
30#include <linux/mount.h> 30#include <linux/mount.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/namei.h>
32 33
33#include <linux/ncp_fs.h> 34#include <linux/ncp_fs.h>
34 35
@@ -58,11 +59,18 @@ static struct inode *ncp_alloc_inode(struct super_block *sb)
58 return &ei->vfs_inode; 59 return &ei->vfs_inode;
59} 60}
60 61
61static void ncp_destroy_inode(struct inode *inode) 62static void ncp_i_callback(struct rcu_head *head)
62{ 63{
64 struct inode *inode = container_of(head, struct inode, i_rcu);
65 INIT_LIST_HEAD(&inode->i_dentry);
63 kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode)); 66 kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
64} 67}
65 68
69static void ncp_destroy_inode(struct inode *inode)
70{
71 call_rcu(&inode->i_rcu, ncp_i_callback);
72}
73
66static void init_once(void *foo) 74static void init_once(void *foo)
67{ 75{
68 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; 76 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
@@ -710,7 +718,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
710 sb->s_root = d_alloc_root(root_inode); 718 sb->s_root = d_alloc_root(root_inode);
711 if (!sb->s_root) 719 if (!sb->s_root)
712 goto out_no_root; 720 goto out_no_root;
713 sb->s_root->d_op = &ncp_root_dentry_operations; 721 d_set_d_op(sb->s_root, &ncp_root_dentry_operations);
714 return 0; 722 return 0;
715 723
716out_no_root: 724out_no_root:
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 3c57eca634ce..1220df75ff22 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -135,7 +135,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
135 const unsigned char *, unsigned int, int); 135 const unsigned char *, unsigned int, int);
136 136
137#define NCP_ESC ':' 137#define NCP_ESC ':'
138#define NCP_IO_TABLE(dentry) (NCP_SERVER((dentry)->d_inode)->nls_io) 138#define NCP_IO_TABLE(sb) (NCP_SBP(sb)->nls_io)
139#define ncp_tolower(t, c) nls_tolower(t, c) 139#define ncp_tolower(t, c) nls_tolower(t, c)
140#define ncp_toupper(t, c) nls_toupper(t, c) 140#define ncp_toupper(t, c) nls_toupper(t, c)
141#define ncp_strnicmp(t, s1, s2, len) \ 141#define ncp_strnicmp(t, s1, s2, len) \
@@ -150,15 +150,15 @@ int ncp__io2vol(unsigned char *, unsigned int *,
150int ncp__vol2io(unsigned char *, unsigned int *, 150int ncp__vol2io(unsigned char *, unsigned int *,
151 const unsigned char *, unsigned int, int); 151 const unsigned char *, unsigned int, int);
152 152
153#define NCP_IO_TABLE(dentry) NULL 153#define NCP_IO_TABLE(sb) NULL
154#define ncp_tolower(t, c) tolower(c) 154#define ncp_tolower(t, c) tolower(c)
155#define ncp_toupper(t, c) toupper(c) 155#define ncp_toupper(t, c) toupper(c)
156#define ncp_io2vol(S,m,i,n,k,U) ncp__io2vol(m,i,n,k,U) 156#define ncp_io2vol(S,m,i,n,k,U) ncp__io2vol(m,i,n,k,U)
157#define ncp_vol2io(S,m,i,n,k,U) ncp__vol2io(m,i,n,k,U) 157#define ncp_vol2io(S,m,i,n,k,U) ncp__vol2io(m,i,n,k,U)
158 158
159 159
160static inline int ncp_strnicmp(struct nls_table *t, const unsigned char *s1, 160static inline int ncp_strnicmp(const struct nls_table *t,
161 const unsigned char *s2, int len) 161 const unsigned char *s1, const unsigned char *s2, int len)
162{ 162{
163 while (len--) { 163 while (len--) {
164 if (tolower(*s1++) != tolower(*s2++)) 164 if (tolower(*s1++) != tolower(*s2++))
@@ -193,7 +193,7 @@ ncp_renew_dentries(struct dentry *parent)
193 struct list_head *next; 193 struct list_head *next;
194 struct dentry *dentry; 194 struct dentry *dentry;
195 195
196 spin_lock(&dcache_lock); 196 spin_lock(&parent->d_lock);
197 next = parent->d_subdirs.next; 197 next = parent->d_subdirs.next;
198 while (next != &parent->d_subdirs) { 198 while (next != &parent->d_subdirs) {
199 dentry = list_entry(next, struct dentry, d_u.d_child); 199 dentry = list_entry(next, struct dentry, d_u.d_child);
@@ -205,7 +205,7 @@ ncp_renew_dentries(struct dentry *parent)
205 205
206 next = next->next; 206 next = next->next;
207 } 207 }
208 spin_unlock(&dcache_lock); 208 spin_unlock(&parent->d_lock);
209} 209}
210 210
211static inline void 211static inline void
@@ -215,7 +215,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
215 struct list_head *next; 215 struct list_head *next;
216 struct dentry *dentry; 216 struct dentry *dentry;
217 217
218 spin_lock(&dcache_lock); 218 spin_lock(&parent->d_lock);
219 next = parent->d_subdirs.next; 219 next = parent->d_subdirs.next;
220 while (next != &parent->d_subdirs) { 220 while (next != &parent->d_subdirs) {
221 dentry = list_entry(next, struct dentry, d_u.d_child); 221 dentry = list_entry(next, struct dentry, d_u.d_child);
@@ -223,7 +223,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
223 ncp_age_dentry(server, dentry); 223 ncp_age_dentry(server, dentry);
224 next = next->next; 224 next = next->next;
225 } 225 }
226 spin_unlock(&dcache_lock); 226 spin_unlock(&parent->d_lock);
227} 227}
228 228
229struct ncp_cache_head { 229struct ncp_cache_head {
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 996dd8989a91..d33da530097a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -438,7 +438,7 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
438 if (dentry == NULL) 438 if (dentry == NULL)
439 return; 439 return;
440 440
441 dentry->d_op = NFS_PROTO(dir)->dentry_ops; 441 d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
442 inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr); 442 inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
443 if (IS_ERR(inode)) 443 if (IS_ERR(inode))
444 goto out; 444 goto out;
@@ -938,7 +938,8 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
938 * component of the path. 938 * component of the path.
939 * We check for this using LOOKUP_CONTINUE and LOOKUP_PARENT. 939 * We check for this using LOOKUP_CONTINUE and LOOKUP_PARENT.
940 */ 940 */
941static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd, unsigned int mask) 941static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd,
942 unsigned int mask)
942{ 943{
943 if (nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT)) 944 if (nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT))
944 return 0; 945 return 0;
@@ -1018,7 +1019,7 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
1018 * If the parent directory is seen to have changed, we throw out the 1019 * If the parent directory is seen to have changed, we throw out the
1019 * cached dentry and do a new lookup. 1020 * cached dentry and do a new lookup.
1020 */ 1021 */
1021static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd) 1022static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
1022{ 1023{
1023 struct inode *dir; 1024 struct inode *dir;
1024 struct inode *inode; 1025 struct inode *inode;
@@ -1027,6 +1028,9 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
1027 struct nfs_fattr *fattr = NULL; 1028 struct nfs_fattr *fattr = NULL;
1028 int error; 1029 int error;
1029 1030
1031 if (nd->flags & LOOKUP_RCU)
1032 return -ECHILD;
1033
1030 parent = dget_parent(dentry); 1034 parent = dget_parent(dentry);
1031 dir = parent->d_inode; 1035 dir = parent->d_inode;
1032 nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE); 1036 nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
@@ -1117,7 +1121,7 @@ out_error:
1117/* 1121/*
1118 * This is called from dput() when d_count is going to 0. 1122 * This is called from dput() when d_count is going to 0.
1119 */ 1123 */
1120static int nfs_dentry_delete(struct dentry *dentry) 1124static int nfs_dentry_delete(const struct dentry *dentry)
1121{ 1125{
1122 dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n", 1126 dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
1123 dentry->d_parent->d_name.name, dentry->d_name.name, 1127 dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -1188,7 +1192,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
1188 if (dentry->d_name.len > NFS_SERVER(dir)->namelen) 1192 if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
1189 goto out; 1193 goto out;
1190 1194
1191 dentry->d_op = NFS_PROTO(dir)->dentry_ops; 1195 d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
1192 1196
1193 /* 1197 /*
1194 * If we're doing an exclusive create, optimize away the lookup 1198 * If we're doing an exclusive create, optimize away the lookup
@@ -1333,7 +1337,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1333 res = ERR_PTR(-ENAMETOOLONG); 1337 res = ERR_PTR(-ENAMETOOLONG);
1334 goto out; 1338 goto out;
1335 } 1339 }
1336 dentry->d_op = NFS_PROTO(dir)->dentry_ops; 1340 d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
1337 1341
1338 /* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash 1342 /* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash
1339 * the dentry. */ 1343 * the dentry. */
@@ -1718,11 +1722,9 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry)
1718 dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id, 1722 dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
1719 dir->i_ino, dentry->d_name.name); 1723 dir->i_ino, dentry->d_name.name);
1720 1724
1721 spin_lock(&dcache_lock);
1722 spin_lock(&dentry->d_lock); 1725 spin_lock(&dentry->d_lock);
1723 if (atomic_read(&dentry->d_count) > 1) { 1726 if (dentry->d_count > 1) {
1724 spin_unlock(&dentry->d_lock); 1727 spin_unlock(&dentry->d_lock);
1725 spin_unlock(&dcache_lock);
1726 /* Start asynchronous writeout of the inode */ 1728 /* Start asynchronous writeout of the inode */
1727 write_inode_now(dentry->d_inode, 0); 1729 write_inode_now(dentry->d_inode, 0);
1728 error = nfs_sillyrename(dir, dentry); 1730 error = nfs_sillyrename(dir, dentry);
@@ -1733,7 +1735,6 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry)
1733 need_rehash = 1; 1735 need_rehash = 1;
1734 } 1736 }
1735 spin_unlock(&dentry->d_lock); 1737 spin_unlock(&dentry->d_lock);
1736 spin_unlock(&dcache_lock);
1737 error = nfs_safe_remove(dentry); 1738 error = nfs_safe_remove(dentry);
1738 if (!error || error == -ENOENT) { 1739 if (!error || error == -ENOENT) {
1739 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 1740 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
@@ -1868,7 +1869,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1868 dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n", 1869 dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
1869 old_dentry->d_parent->d_name.name, old_dentry->d_name.name, 1870 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
1870 new_dentry->d_parent->d_name.name, new_dentry->d_name.name, 1871 new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
1871 atomic_read(&new_dentry->d_count)); 1872 new_dentry->d_count);
1872 1873
1873 /* 1874 /*
1874 * For non-directories, check whether the target is busy and if so, 1875 * For non-directories, check whether the target is busy and if so,
@@ -1886,7 +1887,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1886 rehash = new_dentry; 1887 rehash = new_dentry;
1887 } 1888 }
1888 1889
1889 if (atomic_read(&new_dentry->d_count) > 2) { 1890 if (new_dentry->d_count > 2) {
1890 int err; 1891 int err;
1891 1892
1892 /* copy the target dentry's name */ 1893 /* copy the target dentry's name */
@@ -2188,11 +2189,14 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
2188 return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags)); 2189 return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
2189} 2190}
2190 2191
2191int nfs_permission(struct inode *inode, int mask) 2192int nfs_permission(struct inode *inode, int mask, unsigned int flags)
2192{ 2193{
2193 struct rpc_cred *cred; 2194 struct rpc_cred *cred;
2194 int res = 0; 2195 int res = 0;
2195 2196
2197 if (flags & IPERM_FLAG_RCU)
2198 return -ECHILD;
2199
2196 nfs_inc_stats(inode, NFSIOS_VFSACCESS); 2200 nfs_inc_stats(inode, NFSIOS_VFSACCESS);
2197 2201
2198 if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) 2202 if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
@@ -2240,7 +2244,7 @@ out:
2240out_notsup: 2244out_notsup:
2241 res = nfs_revalidate_inode(NFS_SERVER(inode), inode); 2245 res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
2242 if (res == 0) 2246 if (res == 0)
2243 res = generic_permission(inode, mask, NULL); 2247 res = generic_permission(inode, mask, flags, NULL);
2244 goto out; 2248 goto out;
2245} 2249}
2246 2250
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index ac7b814ce162..5596c6a2881e 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -63,9 +63,11 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
63 * This again causes shrink_dcache_for_umount_subtree() to 63 * This again causes shrink_dcache_for_umount_subtree() to
64 * Oops, since the test for IS_ROOT() will fail. 64 * Oops, since the test for IS_ROOT() will fail.
65 */ 65 */
66 spin_lock(&dcache_lock); 66 spin_lock(&sb->s_root->d_inode->i_lock);
67 spin_lock(&sb->s_root->d_lock);
67 list_del_init(&sb->s_root->d_alias); 68 list_del_init(&sb->s_root->d_alias);
68 spin_unlock(&dcache_lock); 69 spin_unlock(&sb->s_root->d_lock);
70 spin_unlock(&sb->s_root->d_inode->i_lock);
69 } 71 }
70 return 0; 72 return 0;
71} 73}
@@ -119,7 +121,7 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh)
119 security_d_instantiate(ret, inode); 121 security_d_instantiate(ret, inode);
120 122
121 if (ret->d_op == NULL) 123 if (ret->d_op == NULL)
122 ret->d_op = server->nfs_client->rpc_ops->dentry_ops; 124 d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops);
123out: 125out:
124 nfs_free_fattr(fsinfo.fattr); 126 nfs_free_fattr(fsinfo.fattr);
125 return ret; 127 return ret;
@@ -226,7 +228,7 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
226 security_d_instantiate(ret, inode); 228 security_d_instantiate(ret, inode);
227 229
228 if (ret->d_op == NULL) 230 if (ret->d_op == NULL)
229 ret->d_op = server->nfs_client->rpc_ops->dentry_ops; 231 d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops);
230 232
231out: 233out:
232 nfs_free_fattr(fattr); 234 nfs_free_fattr(fattr);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e67e31c73416..017daa3bed38 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1438,11 +1438,18 @@ struct inode *nfs_alloc_inode(struct super_block *sb)
1438 return &nfsi->vfs_inode; 1438 return &nfsi->vfs_inode;
1439} 1439}
1440 1440
1441void nfs_destroy_inode(struct inode *inode) 1441static void nfs_i_callback(struct rcu_head *head)
1442{ 1442{
1443 struct inode *inode = container_of(head, struct inode, i_rcu);
1444 INIT_LIST_HEAD(&inode->i_dentry);
1443 kmem_cache_free(nfs_inode_cachep, NFS_I(inode)); 1445 kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
1444} 1446}
1445 1447
1448void nfs_destroy_inode(struct inode *inode)
1449{
1450 call_rcu(&inode->i_rcu, nfs_i_callback);
1451}
1452
1446static inline void nfs4_init_once(struct nfs_inode *nfsi) 1453static inline void nfs4_init_once(struct nfs_inode *nfsi)
1447{ 1454{
1448#ifdef CONFIG_NFS_V4 1455#ifdef CONFIG_NFS_V4
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index db6aa3673cf3..74aaf3963c10 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -49,12 +49,17 @@ char *nfs_path(const char *base,
49 const struct dentry *dentry, 49 const struct dentry *dentry,
50 char *buffer, ssize_t buflen) 50 char *buffer, ssize_t buflen)
51{ 51{
52 char *end = buffer+buflen; 52 char *end;
53 int namelen; 53 int namelen;
54 unsigned seq;
54 55
56rename_retry:
57 end = buffer+buflen;
55 *--end = '\0'; 58 *--end = '\0';
56 buflen--; 59 buflen--;
57 spin_lock(&dcache_lock); 60
61 seq = read_seqbegin(&rename_lock);
62 rcu_read_lock();
58 while (!IS_ROOT(dentry) && dentry != droot) { 63 while (!IS_ROOT(dentry) && dentry != droot) {
59 namelen = dentry->d_name.len; 64 namelen = dentry->d_name.len;
60 buflen -= namelen + 1; 65 buflen -= namelen + 1;
@@ -65,7 +70,9 @@ char *nfs_path(const char *base,
65 *--end = '/'; 70 *--end = '/';
66 dentry = dentry->d_parent; 71 dentry = dentry->d_parent;
67 } 72 }
68 spin_unlock(&dcache_lock); 73 rcu_read_unlock();
74 if (read_seqretry(&rename_lock, seq))
75 goto rename_retry;
69 if (*end != '/') { 76 if (*end != '/') {
70 if (--buflen < 0) 77 if (--buflen < 0)
71 goto Elong; 78 goto Elong;
@@ -82,7 +89,9 @@ char *nfs_path(const char *base,
82 memcpy(end, base, namelen); 89 memcpy(end, base, namelen);
83 return end; 90 return end;
84Elong_unlock: 91Elong_unlock:
85 spin_unlock(&dcache_lock); 92 rcu_read_unlock();
93 if (read_seqretry(&rename_lock, seq))
94 goto rename_retry;
86Elong: 95Elong:
87 return ERR_PTR(-ENAMETOOLONG); 96 return ERR_PTR(-ENAMETOOLONG);
88} 97}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 7bdec8531400..8fe9eb47a97f 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -496,7 +496,7 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
496 496
497 dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n", 497 dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
498 dentry->d_parent->d_name.name, dentry->d_name.name, 498 dentry->d_parent->d_name.name, dentry->d_name.name,
499 atomic_read(&dentry->d_count)); 499 dentry->d_count);
500 nfs_inc_stats(dir, NFSIOS_SILLYRENAME); 500 nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
501 501
502 /* 502 /*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 184938fcff04..3a359023c9f7 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1756,8 +1756,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
1756 goto out_dput_new; 1756 goto out_dput_new;
1757 1757
1758 if (svc_msnfs(ffhp) && 1758 if (svc_msnfs(ffhp) &&
1759 ((atomic_read(&odentry->d_count) > 1) 1759 ((odentry->d_count > 1) || (ndentry->d_count > 1))) {
1760 || (atomic_read(&ndentry->d_count) > 1))) {
1761 host_err = -EPERM; 1760 host_err = -EPERM;
1762 goto out_dput_new; 1761 goto out_dput_new;
1763 } 1762 }
@@ -1843,7 +1842,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
1843 if (type != S_IFDIR) { /* It's UNLINK */ 1842 if (type != S_IFDIR) { /* It's UNLINK */
1844#ifdef MSNFS 1843#ifdef MSNFS
1845 if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) && 1844 if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
1846 (atomic_read(&rdentry->d_count) > 1)) { 1845 (rdentry->d_count > 1)) {
1847 host_err = -EPERM; 1846 host_err = -EPERM;
1848 } else 1847 } else
1849#endif 1848#endif
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 71d4bc8464e0..77b48c8fab17 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -785,15 +785,19 @@ out_err:
785 return err; 785 return err;
786} 786}
787 787
788int nilfs_permission(struct inode *inode, int mask) 788int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
789{ 789{
790 struct nilfs_root *root = NILFS_I(inode)->i_root; 790 struct nilfs_root *root;
791
792 if (flags & IPERM_FLAG_RCU)
793 return -ECHILD;
791 794
795 root = NILFS_I(inode)->i_root;
792 if ((mask & MAY_WRITE) && root && 796 if ((mask & MAY_WRITE) && root &&
793 root->cno != NILFS_CPTREE_CURRENT_CNO) 797 root->cno != NILFS_CPTREE_CURRENT_CNO)
794 return -EROFS; /* snapshot is not writable */ 798 return -EROFS; /* snapshot is not writable */
795 799
796 return generic_permission(inode, mask, NULL); 800 return generic_permission(inode, mask, flags, NULL);
797} 801}
798 802
799int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, 803int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index f7560da5a567..0ca98823db59 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -256,7 +256,7 @@ extern void nilfs_update_inode(struct inode *, struct buffer_head *);
256extern void nilfs_truncate(struct inode *); 256extern void nilfs_truncate(struct inode *);
257extern void nilfs_evict_inode(struct inode *); 257extern void nilfs_evict_inode(struct inode *);
258extern int nilfs_setattr(struct dentry *, struct iattr *); 258extern int nilfs_setattr(struct dentry *, struct iattr *);
259int nilfs_permission(struct inode *inode, int mask); 259int nilfs_permission(struct inode *inode, int mask, unsigned int flags);
260extern int nilfs_load_inode_block(struct nilfs_sb_info *, struct inode *, 260extern int nilfs_load_inode_block(struct nilfs_sb_info *, struct inode *,
261 struct buffer_head **); 261 struct buffer_head **);
262extern int nilfs_inode_dirty(struct inode *); 262extern int nilfs_inode_dirty(struct inode *);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index f804d41ec9d3..e2dcc9c733f7 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -162,10 +162,13 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
162 return &ii->vfs_inode; 162 return &ii->vfs_inode;
163} 163}
164 164
165void nilfs_destroy_inode(struct inode *inode) 165static void nilfs_i_callback(struct rcu_head *head)
166{ 166{
167 struct inode *inode = container_of(head, struct inode, i_rcu);
167 struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 168 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
168 169
170 INIT_LIST_HEAD(&inode->i_dentry);
171
169 if (mdi) { 172 if (mdi) {
170 kfree(mdi->mi_bgl); /* kfree(NULL) is safe */ 173 kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
171 kfree(mdi); 174 kfree(mdi);
@@ -173,6 +176,11 @@ void nilfs_destroy_inode(struct inode *inode)
173 kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode)); 176 kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
174} 177}
175 178
179void nilfs_destroy_inode(struct inode *inode)
180{
181 call_rcu(&inode->i_rcu, nilfs_i_callback);
182}
183
176static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag) 184static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
177{ 185{
178 struct the_nilfs *nilfs = sbi->s_nilfs; 186 struct the_nilfs *nilfs = sbi->s_nilfs;
@@ -838,7 +846,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
838 846
839static int nilfs_tree_was_touched(struct dentry *root_dentry) 847static int nilfs_tree_was_touched(struct dentry *root_dentry)
840{ 848{
841 return atomic_read(&root_dentry->d_count) > 1; 849 return root_dentry->d_count > 1;
842} 850}
843 851
844/** 852/**
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 20dc218707ca..79b47cbb5cd8 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -59,7 +59,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
59 /* determine if the children should tell inode about their events */ 59 /* determine if the children should tell inode about their events */
60 watched = fsnotify_inode_watches_children(inode); 60 watched = fsnotify_inode_watches_children(inode);
61 61
62 spin_lock(&dcache_lock); 62 spin_lock(&inode->i_lock);
63 /* run all of the dentries associated with this inode. Since this is a 63 /* run all of the dentries associated with this inode. Since this is a
64 * directory, there damn well better only be one item on this list */ 64 * directory, there damn well better only be one item on this list */
65 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 65 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
@@ -68,19 +68,21 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
68 /* run all of the children of the original inode and fix their 68 /* run all of the children of the original inode and fix their
69 * d_flags to indicate parental interest (their parent is the 69 * d_flags to indicate parental interest (their parent is the
70 * original inode) */ 70 * original inode) */
71 spin_lock(&alias->d_lock);
71 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { 72 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
72 if (!child->d_inode) 73 if (!child->d_inode)
73 continue; 74 continue;
74 75
75 spin_lock(&child->d_lock); 76 spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
76 if (watched) 77 if (watched)
77 child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; 78 child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
78 else 79 else
79 child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; 80 child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
80 spin_unlock(&child->d_lock); 81 spin_unlock(&child->d_lock);
81 } 82 }
83 spin_unlock(&alias->d_lock);
82 } 84 }
83 spin_unlock(&dcache_lock); 85 spin_unlock(&inode->i_lock);
84} 86}
85 87
86/* Notify this dentry's parent about a child's events. */ 88/* Notify this dentry's parent about a child's events. */
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 93622b175fc7..a627ed82c0a3 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -332,6 +332,13 @@ struct inode *ntfs_alloc_big_inode(struct super_block *sb)
332 return NULL; 332 return NULL;
333} 333}
334 334
335static void ntfs_i_callback(struct rcu_head *head)
336{
337 struct inode *inode = container_of(head, struct inode, i_rcu);
338 INIT_LIST_HEAD(&inode->i_dentry);
339 kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
340}
341
335void ntfs_destroy_big_inode(struct inode *inode) 342void ntfs_destroy_big_inode(struct inode *inode)
336{ 343{
337 ntfs_inode *ni = NTFS_I(inode); 344 ntfs_inode *ni = NTFS_I(inode);
@@ -340,7 +347,7 @@ void ntfs_destroy_big_inode(struct inode *inode)
340 BUG_ON(ni->page); 347 BUG_ON(ni->page);
341 if (!atomic_dec_and_test(&ni->count)) 348 if (!atomic_dec_and_test(&ni->count))
342 BUG(); 349 BUG();
343 kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode)); 350 call_rcu(&inode->i_rcu, ntfs_i_callback);
344} 351}
345 352
346static inline ntfs_inode *ntfs_alloc_extent_inode(void) 353static inline ntfs_inode *ntfs_alloc_extent_inode(void)
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 391915093fe1..704f6b1742f3 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -291,13 +291,17 @@ static int ocfs2_set_acl(handle_t *handle,
291 return ret; 291 return ret;
292} 292}
293 293
294int ocfs2_check_acl(struct inode *inode, int mask) 294int ocfs2_check_acl(struct inode *inode, int mask, unsigned int flags)
295{ 295{
296 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 296 struct ocfs2_super *osb;
297 struct buffer_head *di_bh = NULL; 297 struct buffer_head *di_bh = NULL;
298 struct posix_acl *acl; 298 struct posix_acl *acl;
299 int ret = -EAGAIN; 299 int ret = -EAGAIN;
300 300
301 if (flags & IPERM_FLAG_RCU)
302 return -ECHILD;
303
304 osb = OCFS2_SB(inode->i_sb);
301 if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) 305 if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
302 return ret; 306 return ret;
303 307
diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
index 5c5d31f05853..4fe7c9cf4bfb 100644
--- a/fs/ocfs2/acl.h
+++ b/fs/ocfs2/acl.h
@@ -26,7 +26,7 @@ struct ocfs2_acl_entry {
26 __le32 e_id; 26 __le32 e_id;
27}; 27};
28 28
29extern int ocfs2_check_acl(struct inode *, int); 29extern int ocfs2_check_acl(struct inode *, int, unsigned int);
30extern int ocfs2_acl_chmod(struct inode *); 30extern int ocfs2_acl_chmod(struct inode *);
31extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *, 31extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
32 struct buffer_head *, struct buffer_head *, 32 struct buffer_head *, struct buffer_head *,
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 895532ac4d98..6d80ecc7834f 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -52,9 +52,15 @@ void ocfs2_dentry_attach_gen(struct dentry *dentry)
52static int ocfs2_dentry_revalidate(struct dentry *dentry, 52static int ocfs2_dentry_revalidate(struct dentry *dentry,
53 struct nameidata *nd) 53 struct nameidata *nd)
54{ 54{
55 struct inode *inode = dentry->d_inode; 55 struct inode *inode;
56 int ret = 0; /* if all else fails, just return false */ 56 int ret = 0; /* if all else fails, just return false */
57 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 57 struct ocfs2_super *osb;
58
59 if (nd->flags & LOOKUP_RCU)
60 return -ECHILD;
61
62 inode = dentry->d_inode;
63 osb = OCFS2_SB(dentry->d_sb);
58 64
59 mlog_entry("(0x%p, '%.*s')\n", dentry, 65 mlog_entry("(0x%p, '%.*s')\n", dentry,
60 dentry->d_name.len, dentry->d_name.name); 66 dentry->d_name.len, dentry->d_name.name);
@@ -169,23 +175,25 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
169 struct list_head *p; 175 struct list_head *p;
170 struct dentry *dentry = NULL; 176 struct dentry *dentry = NULL;
171 177
172 spin_lock(&dcache_lock); 178 spin_lock(&inode->i_lock);
173
174 list_for_each(p, &inode->i_dentry) { 179 list_for_each(p, &inode->i_dentry) {
175 dentry = list_entry(p, struct dentry, d_alias); 180 dentry = list_entry(p, struct dentry, d_alias);
176 181
182 spin_lock(&dentry->d_lock);
177 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 183 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
178 mlog(0, "dentry found: %.*s\n", 184 mlog(0, "dentry found: %.*s\n",
179 dentry->d_name.len, dentry->d_name.name); 185 dentry->d_name.len, dentry->d_name.name);
180 186
181 dget_locked(dentry); 187 dget_dlock(dentry);
188 spin_unlock(&dentry->d_lock);
182 break; 189 break;
183 } 190 }
191 spin_unlock(&dentry->d_lock);
184 192
185 dentry = NULL; 193 dentry = NULL;
186 } 194 }
187 195
188 spin_unlock(&dcache_lock); 196 spin_unlock(&inode->i_lock);
189 197
190 return dentry; 198 return dentry;
191} 199}
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index b2df490a19ed..8c5c0eddc365 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -351,11 +351,18 @@ static struct inode *dlmfs_alloc_inode(struct super_block *sb)
351 return &ip->ip_vfs_inode; 351 return &ip->ip_vfs_inode;
352} 352}
353 353
354static void dlmfs_destroy_inode(struct inode *inode) 354static void dlmfs_i_callback(struct rcu_head *head)
355{ 355{
356 struct inode *inode = container_of(head, struct inode, i_rcu);
357 INIT_LIST_HEAD(&inode->i_dentry);
356 kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode)); 358 kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
357} 359}
358 360
361static void dlmfs_destroy_inode(struct inode *inode)
362{
363 call_rcu(&inode->i_rcu, dlmfs_i_callback);
364}
365
359static void dlmfs_evict_inode(struct inode *inode) 366static void dlmfs_evict_inode(struct inode *inode)
360{ 367{
361 int status; 368 int status;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 19ad145d2af3..6adafa576065 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -138,7 +138,7 @@ check_gen:
138 138
139 result = d_obtain_alias(inode); 139 result = d_obtain_alias(inode);
140 if (!IS_ERR(result)) 140 if (!IS_ERR(result))
141 result->d_op = &ocfs2_dentry_ops; 141 d_set_d_op(result, &ocfs2_dentry_ops);
142 else 142 else
143 mlog_errno(PTR_ERR(result)); 143 mlog_errno(PTR_ERR(result));
144 144
@@ -176,7 +176,7 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
176 176
177 parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); 177 parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
178 if (!IS_ERR(parent)) 178 if (!IS_ERR(parent))
179 parent->d_op = &ocfs2_dentry_ops; 179 d_set_d_op(parent, &ocfs2_dentry_ops);
180 180
181bail_unlock: 181bail_unlock:
182 ocfs2_inode_unlock(dir, 0); 182 ocfs2_inode_unlock(dir, 0);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f6cba566429d..bdadbae09094 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1307,10 +1307,13 @@ bail:
1307 return err; 1307 return err;
1308} 1308}
1309 1309
1310int ocfs2_permission(struct inode *inode, int mask) 1310int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
1311{ 1311{
1312 int ret; 1312 int ret;
1313 1313
1314 if (flags & IPERM_FLAG_RCU)
1315 return -ECHILD;
1316
1314 mlog_entry_void(); 1317 mlog_entry_void();
1315 1318
1316 ret = ocfs2_inode_lock(inode, NULL, 0); 1319 ret = ocfs2_inode_lock(inode, NULL, 0);
@@ -1320,7 +1323,7 @@ int ocfs2_permission(struct inode *inode, int mask)
1320 goto out; 1323 goto out;
1321 } 1324 }
1322 1325
1323 ret = generic_permission(inode, mask, ocfs2_check_acl); 1326 ret = generic_permission(inode, mask, flags, ocfs2_check_acl);
1324 1327
1325 ocfs2_inode_unlock(inode, 0); 1328 ocfs2_inode_unlock(inode, 0);
1326out: 1329out:
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 97bf761c9e7c..f5afbbef6703 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -61,7 +61,7 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
61int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); 61int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
62int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, 62int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
63 struct kstat *stat); 63 struct kstat *stat);
64int ocfs2_permission(struct inode *inode, int mask); 64int ocfs2_permission(struct inode *inode, int mask, unsigned int flags);
65 65
66int ocfs2_should_update_atime(struct inode *inode, 66int ocfs2_should_update_atime(struct inode *inode,
67 struct vfsmount *vfsmnt); 67 struct vfsmount *vfsmnt);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index ff5744e1e36f..d14cad6e2e41 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -147,7 +147,7 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
147 spin_unlock(&oi->ip_lock); 147 spin_unlock(&oi->ip_lock);
148 148
149bail_add: 149bail_add:
150 dentry->d_op = &ocfs2_dentry_ops; 150 d_set_d_op(dentry, &ocfs2_dentry_ops);
151 ret = d_splice_alias(inode, dentry); 151 ret = d_splice_alias(inode, dentry);
152 152
153 if (inode) { 153 if (inode) {
@@ -415,7 +415,7 @@ static int ocfs2_mknod(struct inode *dir,
415 mlog_errno(status); 415 mlog_errno(status);
416 goto leave; 416 goto leave;
417 } 417 }
418 dentry->d_op = &ocfs2_dentry_ops; 418 d_set_d_op(dentry, &ocfs2_dentry_ops);
419 419
420 status = ocfs2_add_entry(handle, dentry, inode, 420 status = ocfs2_add_entry(handle, dentry, inode,
421 OCFS2_I(inode)->ip_blkno, parent_fe_bh, 421 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
@@ -743,7 +743,7 @@ static int ocfs2_link(struct dentry *old_dentry,
743 } 743 }
744 744
745 ihold(inode); 745 ihold(inode);
746 dentry->d_op = &ocfs2_dentry_ops; 746 d_set_d_op(dentry, &ocfs2_dentry_ops);
747 d_instantiate(dentry, inode); 747 d_instantiate(dentry, inode);
748 748
749out_commit: 749out_commit:
@@ -1794,7 +1794,7 @@ static int ocfs2_symlink(struct inode *dir,
1794 mlog_errno(status); 1794 mlog_errno(status);
1795 goto bail; 1795 goto bail;
1796 } 1796 }
1797 dentry->d_op = &ocfs2_dentry_ops; 1797 d_set_d_op(dentry, &ocfs2_dentry_ops);
1798 1798
1799 status = ocfs2_add_entry(handle, dentry, inode, 1799 status = ocfs2_add_entry(handle, dentry, inode,
1800 le64_to_cpu(fe->i_blkno), parent_fe_bh, 1800 le64_to_cpu(fe->i_blkno), parent_fe_bh,
@@ -2459,7 +2459,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
2459 goto out_commit; 2459 goto out_commit;
2460 } 2460 }
2461 2461
2462 dentry->d_op = &ocfs2_dentry_ops; 2462 d_set_d_op(dentry, &ocfs2_dentry_ops);
2463 d_instantiate(dentry, inode); 2463 d_instantiate(dentry, inode);
2464 status = 0; 2464 status = 0;
2465out_commit: 2465out_commit:
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index cfeab7ce3697..17ff46fa8a10 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -569,11 +569,18 @@ static struct inode *ocfs2_alloc_inode(struct super_block *sb)
569 return &oi->vfs_inode; 569 return &oi->vfs_inode;
570} 570}
571 571
572static void ocfs2_destroy_inode(struct inode *inode) 572static void ocfs2_i_callback(struct rcu_head *head)
573{ 573{
574 struct inode *inode = container_of(head, struct inode, i_rcu);
575 INIT_LIST_HEAD(&inode->i_dentry);
574 kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode)); 576 kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode));
575} 577}
576 578
579static void ocfs2_destroy_inode(struct inode *inode)
580{
581 call_rcu(&inode->i_rcu, ocfs2_i_callback);
582}
583
577static unsigned long long ocfs2_max_file_offset(unsigned int bbits, 584static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
578 unsigned int cbits) 585 unsigned int cbits)
579{ 586{
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 911e61f348fc..a2a5bff774e3 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -343,11 +343,18 @@ static struct inode *openprom_alloc_inode(struct super_block *sb)
343 return &oi->vfs_inode; 343 return &oi->vfs_inode;
344} 344}
345 345
346static void openprom_destroy_inode(struct inode *inode) 346static void openprom_i_callback(struct rcu_head *head)
347{ 347{
348 struct inode *inode = container_of(head, struct inode, i_rcu);
349 INIT_LIST_HEAD(&inode->i_dentry);
348 kmem_cache_free(op_inode_cachep, OP_I(inode)); 350 kmem_cache_free(op_inode_cachep, OP_I(inode));
349} 351}
350 352
353static void openprom_destroy_inode(struct inode *inode)
354{
355 call_rcu(&inode->i_rcu, openprom_i_callback);
356}
357
351static struct inode *openprom_iget(struct super_block *sb, ino_t ino) 358static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
352{ 359{
353 struct inode *inode; 360 struct inode *inode;
diff --git a/fs/pipe.c b/fs/pipe.c
index 04629f36e397..68f1f8e4e23b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -999,12 +999,12 @@ struct file *create_write_pipe(int flags)
999 goto err; 999 goto err;
1000 1000
1001 err = -ENOMEM; 1001 err = -ENOMEM;
1002 path.dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &name); 1002 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
1003 if (!path.dentry) 1003 if (!path.dentry)
1004 goto err_inode; 1004 goto err_inode;
1005 path.mnt = mntget(pipe_mnt); 1005 path.mnt = mntget(pipe_mnt);
1006 1006
1007 path.dentry->d_op = &pipefs_dentry_operations; 1007 d_set_d_op(path.dentry, &pipefs_dentry_operations);
1008 d_instantiate(path.dentry, inode); 1008 d_instantiate(path.dentry, inode);
1009 1009
1010 err = -ENFILE; 1010 err = -ENFILE;
@@ -1253,6 +1253,10 @@ out:
1253 return ret; 1253 return ret;
1254} 1254}
1255 1255
1256static const struct super_operations pipefs_ops = {
1257 .destroy_inode = free_inode_nonrcu,
1258};
1259
1256/* 1260/*
1257 * pipefs should _never_ be mounted by userland - too much of security hassle, 1261 * pipefs should _never_ be mounted by userland - too much of security hassle,
1258 * no real gain from having the whole whorehouse mounted. So we don't need 1262 * no real gain from having the whole whorehouse mounted. So we don't need
@@ -1262,7 +1266,7 @@ out:
1262static struct dentry *pipefs_mount(struct file_system_type *fs_type, 1266static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1263 int flags, const char *dev_name, void *data) 1267 int flags, const char *dev_name, void *data)
1264{ 1268{
1265 return mount_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC); 1269 return mount_pseudo(fs_type, "pipe:", &pipefs_ops, PIPEFS_MAGIC);
1266} 1270}
1267 1271
1268static struct file_system_type pipe_fs_type = { 1272static struct file_system_type pipe_fs_type = {
@@ -1288,7 +1292,7 @@ static int __init init_pipe_fs(void)
1288static void __exit exit_pipe_fs(void) 1292static void __exit exit_pipe_fs(void)
1289{ 1293{
1290 unregister_filesystem(&pipe_fs_type); 1294 unregister_filesystem(&pipe_fs_type);
1291 mntput(pipe_mnt); 1295 mntput_long(pipe_mnt);
1292} 1296}
1293 1297
1294fs_initcall(init_pipe_fs); 1298fs_initcall(init_pipe_fs);
diff --git a/fs/pnode.c b/fs/pnode.c
index 8066b8dd748f..d42514e32380 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -288,7 +288,7 @@ out:
288 */ 288 */
289static inline int do_refcount_check(struct vfsmount *mnt, int count) 289static inline int do_refcount_check(struct vfsmount *mnt, int count)
290{ 290{
291 int mycount = atomic_read(&mnt->mnt_count) - mnt->mnt_ghosts; 291 int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
292 return (mycount > count); 292 return (mycount > count);
293} 293}
294 294
@@ -300,7 +300,7 @@ static inline int do_refcount_check(struct vfsmount *mnt, int count)
300 * Check if any of these mounts that **do not have submounts** 300 * Check if any of these mounts that **do not have submounts**
301 * have more references than 'refcnt'. If so return busy. 301 * have more references than 'refcnt'. If so return busy.
302 * 302 *
303 * vfsmount lock must be held for read or write 303 * vfsmount lock must be held for write
304 */ 304 */
305int propagate_mount_busy(struct vfsmount *mnt, int refcnt) 305int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
306{ 306{
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 08cba2c3b612..b20962c71a52 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1795,10 +1795,16 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
1795 */ 1795 */
1796static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) 1796static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1797{ 1797{
1798 struct inode *inode = dentry->d_inode; 1798 struct inode *inode;
1799 struct task_struct *task = get_proc_task(inode); 1799 struct task_struct *task;
1800 const struct cred *cred; 1800 const struct cred *cred;
1801 1801
1802 if (nd && nd->flags & LOOKUP_RCU)
1803 return -ECHILD;
1804
1805 inode = dentry->d_inode;
1806 task = get_proc_task(inode);
1807
1802 if (task) { 1808 if (task) {
1803 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1809 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1804 task_dumpable(task)) { 1810 task_dumpable(task)) {
@@ -1820,7 +1826,7 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1820 return 0; 1826 return 0;
1821} 1827}
1822 1828
1823static int pid_delete_dentry(struct dentry * dentry) 1829static int pid_delete_dentry(const struct dentry * dentry)
1824{ 1830{
1825 /* Is the task we represent dead? 1831 /* Is the task we represent dead?
1826 * If so, then don't put the dentry on the lru list, 1832 * If so, then don't put the dentry on the lru list,
@@ -1964,12 +1970,19 @@ static int proc_fd_link(struct inode *inode, struct path *path)
1964 1970
1965static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) 1971static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1966{ 1972{
1967 struct inode *inode = dentry->d_inode; 1973 struct inode *inode;
1968 struct task_struct *task = get_proc_task(inode); 1974 struct task_struct *task;
1969 int fd = proc_fd(inode); 1975 int fd;
1970 struct files_struct *files; 1976 struct files_struct *files;
1971 const struct cred *cred; 1977 const struct cred *cred;
1972 1978
1979 if (nd && nd->flags & LOOKUP_RCU)
1980 return -ECHILD;
1981
1982 inode = dentry->d_inode;
1983 task = get_proc_task(inode);
1984 fd = proc_fd(inode);
1985
1973 if (task) { 1986 if (task) {
1974 files = get_files_struct(task); 1987 files = get_files_struct(task);
1975 if (files) { 1988 if (files) {
@@ -2045,7 +2058,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
2045 inode->i_op = &proc_pid_link_inode_operations; 2058 inode->i_op = &proc_pid_link_inode_operations;
2046 inode->i_size = 64; 2059 inode->i_size = 64;
2047 ei->op.proc_get_link = proc_fd_link; 2060 ei->op.proc_get_link = proc_fd_link;
2048 dentry->d_op = &tid_fd_dentry_operations; 2061 d_set_d_op(dentry, &tid_fd_dentry_operations);
2049 d_add(dentry, inode); 2062 d_add(dentry, inode);
2050 /* Close the race of the process dying before we return the dentry */ 2063 /* Close the race of the process dying before we return the dentry */
2051 if (tid_fd_revalidate(dentry, NULL)) 2064 if (tid_fd_revalidate(dentry, NULL))
@@ -2177,11 +2190,13 @@ static const struct file_operations proc_fd_operations = {
2177 * /proc/pid/fd needs a special permission handler so that a process can still 2190 * /proc/pid/fd needs a special permission handler so that a process can still
2178 * access /proc/self/fd after it has executed a setuid(). 2191 * access /proc/self/fd after it has executed a setuid().
2179 */ 2192 */
2180static int proc_fd_permission(struct inode *inode, int mask) 2193static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
2181{ 2194{
2182 int rv; 2195 int rv;
2183 2196
2184 rv = generic_permission(inode, mask, NULL); 2197 if (flags & IPERM_FLAG_RCU)
2198 return -ECHILD;
2199 rv = generic_permission(inode, mask, flags, NULL);
2185 if (rv == 0) 2200 if (rv == 0)
2186 return 0; 2201 return 0;
2187 if (task_pid(current) == proc_pid(inode)) 2202 if (task_pid(current) == proc_pid(inode))
@@ -2213,7 +2228,7 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
2213 ei->fd = fd; 2228 ei->fd = fd;
2214 inode->i_mode = S_IFREG | S_IRUSR; 2229 inode->i_mode = S_IFREG | S_IRUSR;
2215 inode->i_fop = &proc_fdinfo_file_operations; 2230 inode->i_fop = &proc_fdinfo_file_operations;
2216 dentry->d_op = &tid_fd_dentry_operations; 2231 d_set_d_op(dentry, &tid_fd_dentry_operations);
2217 d_add(dentry, inode); 2232 d_add(dentry, inode);
2218 /* Close the race of the process dying before we return the dentry */ 2233 /* Close the race of the process dying before we return the dentry */
2219 if (tid_fd_revalidate(dentry, NULL)) 2234 if (tid_fd_revalidate(dentry, NULL))
@@ -2272,7 +2287,7 @@ static struct dentry *proc_pident_instantiate(struct inode *dir,
2272 if (p->fop) 2287 if (p->fop)
2273 inode->i_fop = p->fop; 2288 inode->i_fop = p->fop;
2274 ei->op = p->op; 2289 ei->op = p->op;
2275 dentry->d_op = &pid_dentry_operations; 2290 d_set_d_op(dentry, &pid_dentry_operations);
2276 d_add(dentry, inode); 2291 d_add(dentry, inode);
2277 /* Close the race of the process dying before we return the dentry */ 2292 /* Close the race of the process dying before we return the dentry */
2278 if (pid_revalidate(dentry, NULL)) 2293 if (pid_revalidate(dentry, NULL))
@@ -2639,8 +2654,14 @@ static const struct pid_entry proc_base_stuff[] = {
2639 */ 2654 */
2640static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) 2655static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
2641{ 2656{
2642 struct inode *inode = dentry->d_inode; 2657 struct inode *inode;
2643 struct task_struct *task = get_proc_task(inode); 2658 struct task_struct *task;
2659
2660 if (nd->flags & LOOKUP_RCU)
2661 return -ECHILD;
2662
2663 inode = dentry->d_inode;
2664 task = get_proc_task(inode);
2644 if (task) { 2665 if (task) {
2645 put_task_struct(task); 2666 put_task_struct(task);
2646 return 1; 2667 return 1;
@@ -2691,7 +2712,7 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
2691 if (p->fop) 2712 if (p->fop)
2692 inode->i_fop = p->fop; 2713 inode->i_fop = p->fop;
2693 ei->op = p->op; 2714 ei->op = p->op;
2694 dentry->d_op = &proc_base_dentry_operations; 2715 d_set_d_op(dentry, &proc_base_dentry_operations);
2695 d_add(dentry, inode); 2716 d_add(dentry, inode);
2696 error = NULL; 2717 error = NULL;
2697out: 2718out:
@@ -3005,7 +3026,7 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
3005 inode->i_nlink = 2 + pid_entry_count_dirs(tgid_base_stuff, 3026 inode->i_nlink = 2 + pid_entry_count_dirs(tgid_base_stuff,
3006 ARRAY_SIZE(tgid_base_stuff)); 3027 ARRAY_SIZE(tgid_base_stuff));
3007 3028
3008 dentry->d_op = &pid_dentry_operations; 3029 d_set_d_op(dentry, &pid_dentry_operations);
3009 3030
3010 d_add(dentry, inode); 3031 d_add(dentry, inode);
3011 /* Close the race of the process dying before we return the dentry */ 3032 /* Close the race of the process dying before we return the dentry */
@@ -3248,7 +3269,7 @@ static struct dentry *proc_task_instantiate(struct inode *dir,
3248 inode->i_nlink = 2 + pid_entry_count_dirs(tid_base_stuff, 3269 inode->i_nlink = 2 + pid_entry_count_dirs(tid_base_stuff,
3249 ARRAY_SIZE(tid_base_stuff)); 3270 ARRAY_SIZE(tid_base_stuff));
3250 3271
3251 dentry->d_op = &pid_dentry_operations; 3272 d_set_d_op(dentry, &pid_dentry_operations);
3252 3273
3253 d_add(dentry, inode); 3274 d_add(dentry, inode);
3254 /* Close the race of the process dying before we return the dentry */ 3275 /* Close the race of the process dying before we return the dentry */
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index dd29f0337661..f766be29d2c7 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -400,7 +400,7 @@ static const struct inode_operations proc_link_inode_operations = {
400 * smarter: we could keep a "volatile" flag in the 400 * smarter: we could keep a "volatile" flag in the
401 * inode to indicate which ones to keep. 401 * inode to indicate which ones to keep.
402 */ 402 */
403static int proc_delete_dentry(struct dentry * dentry) 403static int proc_delete_dentry(const struct dentry * dentry)
404{ 404{
405 return 1; 405 return 1;
406} 406}
@@ -439,7 +439,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
439out_unlock: 439out_unlock:
440 440
441 if (inode) { 441 if (inode) {
442 dentry->d_op = &proc_dentry_operations; 442 d_set_d_op(dentry, &proc_dentry_operations);
443 d_add(dentry, inode); 443 d_add(dentry, inode);
444 return NULL; 444 return NULL;
445 } 445 }
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 3ddb6068177c..6bcb926b101b 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -65,11 +65,18 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
65 return inode; 65 return inode;
66} 66}
67 67
68static void proc_destroy_inode(struct inode *inode) 68static void proc_i_callback(struct rcu_head *head)
69{ 69{
70 struct inode *inode = container_of(head, struct inode, i_rcu);
71 INIT_LIST_HEAD(&inode->i_dentry);
70 kmem_cache_free(proc_inode_cachep, PROC_I(inode)); 72 kmem_cache_free(proc_inode_cachep, PROC_I(inode));
71} 73}
72 74
75static void proc_destroy_inode(struct inode *inode)
76{
77 call_rcu(&inode->i_rcu, proc_i_callback);
78}
79
73static void init_once(void *foo) 80static void init_once(void *foo)
74{ 81{
75 struct proc_inode *ei = (struct proc_inode *) foo; 82 struct proc_inode *ei = (struct proc_inode *) foo;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index b652cb00906b..09a1f92a34ef 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -5,6 +5,7 @@
5#include <linux/sysctl.h> 5#include <linux/sysctl.h>
6#include <linux/proc_fs.h> 6#include <linux/proc_fs.h>
7#include <linux/security.h> 7#include <linux/security.h>
8#include <linux/namei.h>
8#include "internal.h" 9#include "internal.h"
9 10
10static const struct dentry_operations proc_sys_dentry_operations; 11static const struct dentry_operations proc_sys_dentry_operations;
@@ -120,7 +121,7 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
120 goto out; 121 goto out;
121 122
122 err = NULL; 123 err = NULL;
123 dentry->d_op = &proc_sys_dentry_operations; 124 d_set_d_op(dentry, &proc_sys_dentry_operations);
124 d_add(dentry, inode); 125 d_add(dentry, inode);
125 126
126out: 127out:
@@ -201,7 +202,7 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
201 dput(child); 202 dput(child);
202 return -ENOMEM; 203 return -ENOMEM;
203 } else { 204 } else {
204 child->d_op = &proc_sys_dentry_operations; 205 d_set_d_op(child, &proc_sys_dentry_operations);
205 d_add(child, inode); 206 d_add(child, inode);
206 } 207 }
207 } else { 208 } else {
@@ -294,7 +295,7 @@ out:
294 return ret; 295 return ret;
295} 296}
296 297
297static int proc_sys_permission(struct inode *inode, int mask) 298static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags)
298{ 299{
299 /* 300 /*
300 * sysctl entries that are not writeable, 301 * sysctl entries that are not writeable,
@@ -304,6 +305,9 @@ static int proc_sys_permission(struct inode *inode, int mask)
304 struct ctl_table *table; 305 struct ctl_table *table;
305 int error; 306 int error;
306 307
308 if (flags & IPERM_FLAG_RCU)
309 return -ECHILD;
310
307 /* Executable files are not allowed under /proc/sys/ */ 311 /* Executable files are not allowed under /proc/sys/ */
308 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) 312 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))
309 return -EACCES; 313 return -EACCES;
@@ -389,23 +393,30 @@ static const struct inode_operations proc_sys_dir_operations = {
389 393
390static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd) 394static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd)
391{ 395{
396 if (nd->flags & LOOKUP_RCU)
397 return -ECHILD;
392 return !PROC_I(dentry->d_inode)->sysctl->unregistering; 398 return !PROC_I(dentry->d_inode)->sysctl->unregistering;
393} 399}
394 400
395static int proc_sys_delete(struct dentry *dentry) 401static int proc_sys_delete(const struct dentry *dentry)
396{ 402{
397 return !!PROC_I(dentry->d_inode)->sysctl->unregistering; 403 return !!PROC_I(dentry->d_inode)->sysctl->unregistering;
398} 404}
399 405
400static int proc_sys_compare(struct dentry *dir, struct qstr *qstr, 406static int proc_sys_compare(const struct dentry *parent,
401 struct qstr *name) 407 const struct inode *pinode,
408 const struct dentry *dentry, const struct inode *inode,
409 unsigned int len, const char *str, const struct qstr *name)
402{ 410{
403 struct dentry *dentry = container_of(qstr, struct dentry, d_name); 411 /* Although proc doesn't have negative dentries, rcu-walk means
404 if (qstr->len != name->len) 412 * that inode here can be NULL */
413 if (!inode)
414 return 0;
415 if (name->len != len)
405 return 1; 416 return 1;
406 if (memcmp(qstr->name, name->name, name->len)) 417 if (memcmp(name->name, str, len))
407 return 1; 418 return 1;
408 return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl); 419 return !sysctl_is_seen(PROC_I(inode)->sysctl);
409} 420}
410 421
411static const struct dentry_operations proc_sys_dentry_operations = { 422static const struct dentry_operations proc_sys_dentry_operations = {
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index fcada42f1aa3..e63b4171d583 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -425,11 +425,18 @@ static struct inode *qnx4_alloc_inode(struct super_block *sb)
425 return &ei->vfs_inode; 425 return &ei->vfs_inode;
426} 426}
427 427
428static void qnx4_destroy_inode(struct inode *inode) 428static void qnx4_i_callback(struct rcu_head *head)
429{ 429{
430 struct inode *inode = container_of(head, struct inode, i_rcu);
431 INIT_LIST_HEAD(&inode->i_dentry);
430 kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode)); 432 kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
431} 433}
432 434
435static void qnx4_destroy_inode(struct inode *inode)
436{
437 call_rcu(&inode->i_rcu, qnx4_i_callback);
438}
439
433static void init_once(void *foo) 440static void init_once(void *foo)
434{ 441{
435 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; 442 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b243117b8752..2575682a9ead 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -529,11 +529,18 @@ static struct inode *reiserfs_alloc_inode(struct super_block *sb)
529 return &ei->vfs_inode; 529 return &ei->vfs_inode;
530} 530}
531 531
532static void reiserfs_destroy_inode(struct inode *inode) 532static void reiserfs_i_callback(struct rcu_head *head)
533{ 533{
534 struct inode *inode = container_of(head, struct inode, i_rcu);
535 INIT_LIST_HEAD(&inode->i_dentry);
534 kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); 536 kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
535} 537}
536 538
539static void reiserfs_destroy_inode(struct inode *inode)
540{
541 call_rcu(&inode->i_rcu, reiserfs_i_callback);
542}
543
537static void init_once(void *foo) 544static void init_once(void *foo)
538{ 545{
539 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; 546 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 5d04a7828e7a..3cfb2e933644 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -870,11 +870,14 @@ out:
870 return err; 870 return err;
871} 871}
872 872
873static int reiserfs_check_acl(struct inode *inode, int mask) 873static int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags)
874{ 874{
875 struct posix_acl *acl; 875 struct posix_acl *acl;
876 int error = -EAGAIN; /* do regular unix permission checks by default */ 876 int error = -EAGAIN; /* do regular unix permission checks by default */
877 877
878 if (flags & IPERM_FLAG_RCU)
879 return -ECHILD;
880
878 acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS); 881 acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
879 882
880 if (acl) { 883 if (acl) {
@@ -951,8 +954,10 @@ static int xattr_mount_check(struct super_block *s)
951 return 0; 954 return 0;
952} 955}
953 956
954int reiserfs_permission(struct inode *inode, int mask) 957int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
955{ 958{
959 if (flags & IPERM_FLAG_RCU)
960 return -ECHILD;
956 /* 961 /*
957 * We don't do permission checks on the internal objects. 962 * We don't do permission checks on the internal objects.
958 * Permissions are determined by the "owning" object. 963 * Permissions are determined by the "owning" object.
@@ -965,13 +970,16 @@ int reiserfs_permission(struct inode *inode, int mask)
965 * Stat data v1 doesn't support ACLs. 970 * Stat data v1 doesn't support ACLs.
966 */ 971 */
967 if (get_inode_sd_version(inode) != STAT_DATA_V1) 972 if (get_inode_sd_version(inode) != STAT_DATA_V1)
968 return generic_permission(inode, mask, reiserfs_check_acl); 973 return generic_permission(inode, mask, flags,
974 reiserfs_check_acl);
969#endif 975#endif
970 return generic_permission(inode, mask, NULL); 976 return generic_permission(inode, mask, flags, NULL);
971} 977}
972 978
973static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) 979static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
974{ 980{
981 if (nd->flags & LOOKUP_RCU)
982 return -ECHILD;
975 return -EPERM; 983 return -EPERM;
976} 984}
977 985
@@ -990,7 +998,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
990 strlen(PRIVROOT_NAME)); 998 strlen(PRIVROOT_NAME));
991 if (!IS_ERR(dentry)) { 999 if (!IS_ERR(dentry)) {
992 REISERFS_SB(s)->priv_root = dentry; 1000 REISERFS_SB(s)->priv_root = dentry;
993 dentry->d_op = &xattr_lookup_poison_ops; 1001 d_set_d_op(dentry, &xattr_lookup_poison_ops);
994 if (dentry->d_inode) 1002 if (dentry->d_inode)
995 dentry->d_inode->i_flags |= S_PRIVATE; 1003 dentry->d_inode->i_flags |= S_PRIVATE;
996 } else 1004 } else
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 6647f90e55cd..2305e3121cb1 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -400,11 +400,18 @@ static struct inode *romfs_alloc_inode(struct super_block *sb)
400/* 400/*
401 * return a spent inode to the slab cache 401 * return a spent inode to the slab cache
402 */ 402 */
403static void romfs_destroy_inode(struct inode *inode) 403static void romfs_i_callback(struct rcu_head *head)
404{ 404{
405 struct inode *inode = container_of(head, struct inode, i_rcu);
406 INIT_LIST_HEAD(&inode->i_dentry);
405 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); 407 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
406} 408}
407 409
410static void romfs_destroy_inode(struct inode *inode)
411{
412 call_rcu(&inode->i_rcu, romfs_i_callback);
413}
414
408/* 415/*
409 * get filesystem statistics 416 * get filesystem statistics
410 */ 417 */
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 24de30ba34c1..20700b9f2b4c 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -440,11 +440,18 @@ static struct inode *squashfs_alloc_inode(struct super_block *sb)
440} 440}
441 441
442 442
443static void squashfs_destroy_inode(struct inode *inode) 443static void squashfs_i_callback(struct rcu_head *head)
444{ 444{
445 struct inode *inode = container_of(head, struct inode, i_rcu);
446 INIT_LIST_HEAD(&inode->i_dentry);
445 kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode)); 447 kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
446} 448}
447 449
450static void squashfs_destroy_inode(struct inode *inode)
451{
452 call_rcu(&inode->i_rcu, squashfs_i_callback);
453}
454
448 455
449static struct file_system_type squashfs_fs_type = { 456static struct file_system_type squashfs_fs_type = {
450 .owner = THIS_MODULE, 457 .owner = THIS_MODULE,
diff --git a/fs/super.c b/fs/super.c
index ca696155cd9a..823e061faa87 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -30,6 +30,7 @@
30#include <linux/idr.h> 30#include <linux/idr.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/backing-dev.h> 32#include <linux/backing-dev.h>
33#include <linux/rculist_bl.h>
33#include "internal.h" 34#include "internal.h"
34 35
35 36
@@ -71,7 +72,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
71 INIT_LIST_HEAD(&s->s_files); 72 INIT_LIST_HEAD(&s->s_files);
72#endif 73#endif
73 INIT_LIST_HEAD(&s->s_instances); 74 INIT_LIST_HEAD(&s->s_instances);
74 INIT_HLIST_HEAD(&s->s_anon); 75 INIT_HLIST_BL_HEAD(&s->s_anon);
75 INIT_LIST_HEAD(&s->s_inodes); 76 INIT_LIST_HEAD(&s->s_inodes);
76 INIT_LIST_HEAD(&s->s_dentry_lru); 77 INIT_LIST_HEAD(&s->s_dentry_lru);
77 init_rwsem(&s->s_umount); 78 init_rwsem(&s->s_umount);
@@ -1139,7 +1140,7 @@ static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
1139 return mnt; 1140 return mnt;
1140 1141
1141 err: 1142 err:
1142 mntput(mnt); 1143 mntput_long(mnt);
1143 return ERR_PTR(err); 1144 return ERR_PTR(err);
1144} 1145}
1145 1146
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 7e54bac8c4b0..ea9120a830d8 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -231,7 +231,7 @@ void release_sysfs_dirent(struct sysfs_dirent * sd)
231 goto repeat; 231 goto repeat;
232} 232}
233 233
234static int sysfs_dentry_delete(struct dentry *dentry) 234static int sysfs_dentry_delete(const struct dentry *dentry)
235{ 235{
236 struct sysfs_dirent *sd = dentry->d_fsdata; 236 struct sysfs_dirent *sd = dentry->d_fsdata;
237 return !!(sd->s_flags & SYSFS_FLAG_REMOVED); 237 return !!(sd->s_flags & SYSFS_FLAG_REMOVED);
@@ -239,9 +239,13 @@ static int sysfs_dentry_delete(struct dentry *dentry)
239 239
240static int sysfs_dentry_revalidate(struct dentry *dentry, struct nameidata *nd) 240static int sysfs_dentry_revalidate(struct dentry *dentry, struct nameidata *nd)
241{ 241{
242 struct sysfs_dirent *sd = dentry->d_fsdata; 242 struct sysfs_dirent *sd;
243 int is_dir; 243 int is_dir;
244 244
245 if (nd->flags & LOOKUP_RCU)
246 return -ECHILD;
247
248 sd = dentry->d_fsdata;
245 mutex_lock(&sysfs_mutex); 249 mutex_lock(&sysfs_mutex);
246 250
247 /* The sysfs dirent has been deleted */ 251 /* The sysfs dirent has been deleted */
@@ -701,7 +705,7 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
701 /* instantiate and hash dentry */ 705 /* instantiate and hash dentry */
702 ret = d_find_alias(inode); 706 ret = d_find_alias(inode);
703 if (!ret) { 707 if (!ret) {
704 dentry->d_op = &sysfs_dentry_ops; 708 d_set_d_op(dentry, &sysfs_dentry_ops);
705 dentry->d_fsdata = sysfs_get(sd); 709 dentry->d_fsdata = sysfs_get(sd);
706 d_add(dentry, inode); 710 d_add(dentry, inode);
707 } else { 711 } else {
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index cffb1fd8ba33..30ac27345586 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -348,13 +348,18 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const cha
348 return -ENOENT; 348 return -ENOENT;
349} 349}
350 350
351int sysfs_permission(struct inode *inode, int mask) 351int sysfs_permission(struct inode *inode, int mask, unsigned int flags)
352{ 352{
353 struct sysfs_dirent *sd = inode->i_private; 353 struct sysfs_dirent *sd;
354
355 if (flags & IPERM_FLAG_RCU)
356 return -ECHILD;
357
358 sd = inode->i_private;
354 359
355 mutex_lock(&sysfs_mutex); 360 mutex_lock(&sysfs_mutex);
356 sysfs_refresh_inode(sd, inode); 361 sysfs_refresh_inode(sd, inode);
357 mutex_unlock(&sysfs_mutex); 362 mutex_unlock(&sysfs_mutex);
358 363
359 return generic_permission(inode, mask, NULL); 364 return generic_permission(inode, mask, flags, NULL);
360} 365}
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index d9be60a2e956..ffaaa816bfba 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -200,7 +200,7 @@ static inline void __sysfs_put(struct sysfs_dirent *sd)
200struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd); 200struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd);
201void sysfs_evict_inode(struct inode *inode); 201void sysfs_evict_inode(struct inode *inode);
202int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr); 202int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr);
203int sysfs_permission(struct inode *inode, int mask); 203int sysfs_permission(struct inode *inode, int mask, unsigned int flags);
204int sysfs_setattr(struct dentry *dentry, struct iattr *iattr); 204int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
205int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); 205int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
206int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value, 206int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index de44d067b9e6..0630eb969a28 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -333,11 +333,18 @@ static struct inode *sysv_alloc_inode(struct super_block *sb)
333 return &si->vfs_inode; 333 return &si->vfs_inode;
334} 334}
335 335
336static void sysv_destroy_inode(struct inode *inode) 336static void sysv_i_callback(struct rcu_head *head)
337{ 337{
338 struct inode *inode = container_of(head, struct inode, i_rcu);
339 INIT_LIST_HEAD(&inode->i_dentry);
338 kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); 340 kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
339} 341}
340 342
343static void sysv_destroy_inode(struct inode *inode)
344{
345 call_rcu(&inode->i_rcu, sysv_i_callback);
346}
347
341static void init_once(void *p) 348static void init_once(void *p)
342{ 349{
343 struct sysv_inode_info *si = (struct sysv_inode_info *)p; 350 struct sysv_inode_info *si = (struct sysv_inode_info *)p;
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 11e7f7d11cd0..b5e68da2db32 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -27,7 +27,8 @@ static int add_nondir(struct dentry *dentry, struct inode *inode)
27 return err; 27 return err;
28} 28}
29 29
30static int sysv_hash(struct dentry *dentry, struct qstr *qstr) 30static int sysv_hash(const struct dentry *dentry, const struct inode *inode,
31 struct qstr *qstr)
31{ 32{
32 /* Truncate the name in place, avoids having to define a compare 33 /* Truncate the name in place, avoids having to define a compare
33 function. */ 34 function. */
@@ -47,7 +48,7 @@ static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, st
47 struct inode * inode = NULL; 48 struct inode * inode = NULL;
48 ino_t ino; 49 ino_t ino;
49 50
50 dentry->d_op = dir->i_sb->s_root->d_op; 51 d_set_d_op(dentry, dir->i_sb->s_root->d_op);
51 if (dentry->d_name.len > SYSV_NAMELEN) 52 if (dentry->d_name.len > SYSV_NAMELEN)
52 return ERR_PTR(-ENAMETOOLONG); 53 return ERR_PTR(-ENAMETOOLONG);
53 ino = sysv_inode_by_name(dentry); 54 ino = sysv_inode_by_name(dentry);
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 3d9c62be0c10..76712aefc4ab 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -346,7 +346,7 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
346 if (sbi->s_forced_ro) 346 if (sbi->s_forced_ro)
347 sb->s_flags |= MS_RDONLY; 347 sb->s_flags |= MS_RDONLY;
348 if (sbi->s_truncate) 348 if (sbi->s_truncate)
349 sb->s_root->d_op = &sysv_dentry_operations; 349 d_set_d_op(sb->s_root, &sysv_dentry_operations);
350 return 1; 350 return 1;
351} 351}
352 352
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 91fac54c70e3..6e11c2975dcf 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -272,12 +272,20 @@ static struct inode *ubifs_alloc_inode(struct super_block *sb)
272 return &ui->vfs_inode; 272 return &ui->vfs_inode;
273}; 273};
274 274
275static void ubifs_i_callback(struct rcu_head *head)
276{
277 struct inode *inode = container_of(head, struct inode, i_rcu);
278 struct ubifs_inode *ui = ubifs_inode(inode);
279 INIT_LIST_HEAD(&inode->i_dentry);
280 kmem_cache_free(ubifs_inode_slab, ui);
281}
282
275static void ubifs_destroy_inode(struct inode *inode) 283static void ubifs_destroy_inode(struct inode *inode)
276{ 284{
277 struct ubifs_inode *ui = ubifs_inode(inode); 285 struct ubifs_inode *ui = ubifs_inode(inode);
278 286
279 kfree(ui->data); 287 kfree(ui->data);
280 kmem_cache_free(ubifs_inode_slab, inode); 288 call_rcu(&inode->i_rcu, ubifs_i_callback);
281} 289}
282 290
283/* 291/*
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4a5c7c61836a..b539d53320fb 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -139,11 +139,18 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
139 return &ei->vfs_inode; 139 return &ei->vfs_inode;
140} 140}
141 141
142static void udf_destroy_inode(struct inode *inode) 142static void udf_i_callback(struct rcu_head *head)
143{ 143{
144 struct inode *inode = container_of(head, struct inode, i_rcu);
145 INIT_LIST_HEAD(&inode->i_dentry);
144 kmem_cache_free(udf_inode_cachep, UDF_I(inode)); 146 kmem_cache_free(udf_inode_cachep, UDF_I(inode));
145} 147}
146 148
149static void udf_destroy_inode(struct inode *inode)
150{
151 call_rcu(&inode->i_rcu, udf_i_callback);
152}
153
147static void init_once(void *foo) 154static void init_once(void *foo)
148{ 155{
149 struct udf_inode_info *ei = (struct udf_inode_info *)foo; 156 struct udf_inode_info *ei = (struct udf_inode_info *)foo;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 2c47daed56da..2c61ac5d4e48 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1412,11 +1412,18 @@ static struct inode *ufs_alloc_inode(struct super_block *sb)
1412 return &ei->vfs_inode; 1412 return &ei->vfs_inode;
1413} 1413}
1414 1414
1415static void ufs_destroy_inode(struct inode *inode) 1415static void ufs_i_callback(struct rcu_head *head)
1416{ 1416{
1417 struct inode *inode = container_of(head, struct inode, i_rcu);
1418 INIT_LIST_HEAD(&inode->i_dentry);
1417 kmem_cache_free(ufs_inode_cachep, UFS_I(inode)); 1419 kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
1418} 1420}
1419 1421
1422static void ufs_destroy_inode(struct inode *inode)
1423{
1424 call_rcu(&inode->i_rcu, ufs_i_callback);
1425}
1426
1420static void init_once(void *foo) 1427static void init_once(void *foo)
1421{ 1428{
1422 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; 1429 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index b2771862fd3d..39f4f809bb68 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -219,12 +219,13 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
219} 219}
220 220
221int 221int
222xfs_check_acl(struct inode *inode, int mask) 222xfs_check_acl(struct inode *inode, int mask, unsigned int flags)
223{ 223{
224 struct xfs_inode *ip = XFS_I(inode); 224 struct xfs_inode *ip;
225 struct posix_acl *acl; 225 struct posix_acl *acl;
226 int error = -EAGAIN; 226 int error = -EAGAIN;
227 227
228 ip = XFS_I(inode);
228 trace_xfs_check_acl(ip); 229 trace_xfs_check_acl(ip);
229 230
230 /* 231 /*
@@ -234,6 +235,12 @@ xfs_check_acl(struct inode *inode, int mask)
234 if (!XFS_IFORK_Q(ip)) 235 if (!XFS_IFORK_Q(ip))
235 return -EAGAIN; 236 return -EAGAIN;
236 237
238 if (flags & IPERM_FLAG_RCU) {
239 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
240 return -ECHILD;
241 return -EAGAIN;
242 }
243
237 acl = xfs_get_acl(inode, ACL_TYPE_ACCESS); 244 acl = xfs_get_acl(inode, ACL_TYPE_ACCESS);
238 if (IS_ERR(acl)) 245 if (IS_ERR(acl))
239 return PTR_ERR(acl); 246 return PTR_ERR(acl);
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 0135e2a669d7..11dd72070cbb 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -42,7 +42,7 @@ struct xfs_acl {
42#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) 42#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
43 43
44#ifdef CONFIG_XFS_POSIX_ACL 44#ifdef CONFIG_XFS_POSIX_ACL
45extern int xfs_check_acl(struct inode *inode, int mask); 45extern int xfs_check_acl(struct inode *inode, int mask, unsigned int flags);
46extern struct posix_acl *xfs_get_acl(struct inode *inode, int type); 46extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
47extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl); 47extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl);
48extern int xfs_acl_chmod(struct inode *inode); 48extern int xfs_acl_chmod(struct inode *inode);
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 0cdd26932d8e..d7de5a3f7867 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -91,6 +91,17 @@ xfs_inode_alloc(
91 return ip; 91 return ip;
92} 92}
93 93
94STATIC void
95xfs_inode_free_callback(
96 struct rcu_head *head)
97{
98 struct inode *inode = container_of(head, struct inode, i_rcu);
99 struct xfs_inode *ip = XFS_I(inode);
100
101 INIT_LIST_HEAD(&inode->i_dentry);
102 kmem_zone_free(xfs_inode_zone, ip);
103}
104
94void 105void
95xfs_inode_free( 106xfs_inode_free(
96 struct xfs_inode *ip) 107 struct xfs_inode *ip)
@@ -134,7 +145,7 @@ xfs_inode_free(
134 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 145 ASSERT(!spin_is_locked(&ip->i_flags_lock));
135 ASSERT(completion_done(&ip->i_flush)); 146 ASSERT(completion_done(&ip->i_flush));
136 147
137 kmem_zone_free(xfs_inode_zone, ip); 148 call_rcu(&ip->i_vnode.i_rcu, xfs_inode_free_callback);
138} 149}
139 150
140/* 151/*
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 7113a32a86ea..e612575a2596 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -1,6 +1,10 @@
1#ifndef __LINUX_BIT_SPINLOCK_H 1#ifndef __LINUX_BIT_SPINLOCK_H
2#define __LINUX_BIT_SPINLOCK_H 2#define __LINUX_BIT_SPINLOCK_H
3 3
4#include <linux/kernel.h>
5#include <linux/preempt.h>
6#include <asm/atomic.h>
7
4/* 8/*
5 * bit-based spin_lock() 9 * bit-based spin_lock()
6 * 10 *
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index 2e914d0771b9..4ccc59c1ea82 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -37,7 +37,7 @@ extern const struct file_operations coda_ioctl_operations;
37/* operations shared over more than one file */ 37/* operations shared over more than one file */
38int coda_open(struct inode *i, struct file *f); 38int coda_open(struct inode *i, struct file *f);
39int coda_release(struct inode *i, struct file *f); 39int coda_release(struct inode *i, struct file *f);
40int coda_permission(struct inode *inode, int mask); 40int coda_permission(struct inode *inode, int mask, unsigned int flags);
41int coda_revalidate_inode(struct dentry *); 41int coda_revalidate_inode(struct dentry *);
42int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); 42int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
43int coda_setattr(struct dentry *, struct iattr *); 43int coda_setattr(struct dentry *, struct iattr *);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 6a4aea30aa09..bd07758943e0 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -4,7 +4,9 @@
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/rculist.h> 6#include <linux/rculist.h>
7#include <linux/rculist_bl.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <linux/seqlock.h>
8#include <linux/cache.h> 10#include <linux/cache.h>
9#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
10 12
@@ -45,6 +47,27 @@ struct dentry_stat_t {
45}; 47};
46extern struct dentry_stat_t dentry_stat; 48extern struct dentry_stat_t dentry_stat;
47 49
50/*
51 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
52 * The strings are both count bytes long, and count is non-zero.
53 */
54static inline int dentry_cmp(const unsigned char *cs, size_t scount,
55 const unsigned char *ct, size_t tcount)
56{
57 int ret;
58 if (scount != tcount)
59 return 1;
60 do {
61 ret = (*cs != *ct);
62 if (ret)
63 break;
64 cs++;
65 ct++;
66 tcount--;
67 } while (tcount);
68 return ret;
69}
70
48/* Name hashing routines. Initial hash value */ 71/* Name hashing routines. Initial hash value */
49/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ 72/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
50#define init_name_hash() 0 73#define init_name_hash() 0
@@ -81,25 +104,33 @@ full_name_hash(const unsigned char *name, unsigned int len)
81 * large memory footprint increase). 104 * large memory footprint increase).
82 */ 105 */
83#ifdef CONFIG_64BIT 106#ifdef CONFIG_64BIT
84#define DNAME_INLINE_LEN_MIN 32 /* 192 bytes */ 107# define DNAME_INLINE_LEN 32 /* 192 bytes */
85#else 108#else
86#define DNAME_INLINE_LEN_MIN 40 /* 128 bytes */ 109# ifdef CONFIG_SMP
110# define DNAME_INLINE_LEN 36 /* 128 bytes */
111# else
112# define DNAME_INLINE_LEN 40 /* 128 bytes */
113# endif
87#endif 114#endif
88 115
89struct dentry { 116struct dentry {
90 atomic_t d_count; 117 /* RCU lookup touched fields */
91 unsigned int d_flags; /* protected by d_lock */ 118 unsigned int d_flags; /* protected by d_lock */
92 spinlock_t d_lock; /* per dentry lock */ 119 seqcount_t d_seq; /* per dentry seqlock */
93 int d_mounted; 120 struct hlist_bl_node d_hash; /* lookup hash list */
94 struct inode *d_inode; /* Where the name belongs to - NULL is
95 * negative */
96 /*
97 * The next three fields are touched by __d_lookup. Place them here
98 * so they all fit in a cache line.
99 */
100 struct hlist_node d_hash; /* lookup hash list */
101 struct dentry *d_parent; /* parent directory */ 121 struct dentry *d_parent; /* parent directory */
102 struct qstr d_name; 122 struct qstr d_name;
123 struct inode *d_inode; /* Where the name belongs to - NULL is
124 * negative */
125 unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
126
127 /* Ref lookup also touches following */
128 unsigned int d_count; /* protected by d_lock */
129 spinlock_t d_lock; /* per dentry lock */
130 const struct dentry_operations *d_op;
131 struct super_block *d_sb; /* The root of the dentry tree */
132 unsigned long d_time; /* used by d_revalidate */
133 void *d_fsdata; /* fs-specific data */
103 134
104 struct list_head d_lru; /* LRU list */ 135 struct list_head d_lru; /* LRU list */
105 /* 136 /*
@@ -111,12 +142,6 @@ struct dentry {
111 } d_u; 142 } d_u;
112 struct list_head d_subdirs; /* our children */ 143 struct list_head d_subdirs; /* our children */
113 struct list_head d_alias; /* inode alias list */ 144 struct list_head d_alias; /* inode alias list */
114 unsigned long d_time; /* used by d_revalidate */
115 const struct dentry_operations *d_op;
116 struct super_block *d_sb; /* The root of the dentry tree */
117 void *d_fsdata; /* fs-specific data */
118
119 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
120}; 145};
121 146
122/* 147/*
@@ -133,96 +158,61 @@ enum dentry_d_lock_class
133 158
134struct dentry_operations { 159struct dentry_operations {
135 int (*d_revalidate)(struct dentry *, struct nameidata *); 160 int (*d_revalidate)(struct dentry *, struct nameidata *);
136 int (*d_hash) (struct dentry *, struct qstr *); 161 int (*d_hash)(const struct dentry *, const struct inode *,
137 int (*d_compare) (struct dentry *, struct qstr *, struct qstr *); 162 struct qstr *);
138 int (*d_delete)(struct dentry *); 163 int (*d_compare)(const struct dentry *, const struct inode *,
164 const struct dentry *, const struct inode *,
165 unsigned int, const char *, const struct qstr *);
166 int (*d_delete)(const struct dentry *);
139 void (*d_release)(struct dentry *); 167 void (*d_release)(struct dentry *);
140 void (*d_iput)(struct dentry *, struct inode *); 168 void (*d_iput)(struct dentry *, struct inode *);
141 char *(*d_dname)(struct dentry *, char *, int); 169 char *(*d_dname)(struct dentry *, char *, int);
142}; 170} ____cacheline_aligned;
143
144/* the dentry parameter passed to d_hash and d_compare is the parent
145 * directory of the entries to be compared. It is used in case these
146 * functions need any directory specific information for determining
147 * equivalency classes. Using the dentry itself might not work, as it
148 * might be a negative dentry which has no information associated with
149 * it */
150 171
151/* 172/*
152locking rules: 173 * Locking rules for dentry_operations callbacks are to be found in
153 big lock dcache_lock d_lock may block 174 * Documentation/filesystems/Locking. Keep it updated!
154d_revalidate: no no no yes 175 *
155d_hash no no no yes 176 * FUrther descriptions are found in Documentation/filesystems/vfs.txt.
156d_compare: no yes yes no 177 * Keep it updated too!
157d_delete: no yes no no
158d_release: no no no yes
159d_iput: no no no yes
160 */ 178 */
161 179
162/* d_flags entries */ 180/* d_flags entries */
163#define DCACHE_AUTOFS_PENDING 0x0001 /* autofs: "under construction" */ 181#define DCACHE_AUTOFS_PENDING 0x0001 /* autofs: "under construction" */
164#define DCACHE_NFSFS_RENAMED 0x0002 /* this dentry has been "silly 182#define DCACHE_NFSFS_RENAMED 0x0002
165 * renamed" and has to be 183 /* this dentry has been "silly renamed" and has to be deleted on the last
166 * deleted on the last dput() 184 * dput() */
167 */ 185
168#define DCACHE_DISCONNECTED 0x0004 186#define DCACHE_DISCONNECTED 0x0004
169 /* This dentry is possibly not currently connected to the dcache tree, 187 /* This dentry is possibly not currently connected to the dcache tree, in
170 * in which case its parent will either be itself, or will have this 188 * which case its parent will either be itself, or will have this flag as
171 * flag as well. nfsd will not use a dentry with this bit set, but will 189 * well. nfsd will not use a dentry with this bit set, but will first
172 * first endeavour to clear the bit either by discovering that it is 190 * endeavour to clear the bit either by discovering that it is connected,
173 * connected, or by performing lookup operations. Any filesystem which 191 * or by performing lookup operations. Any filesystem which supports
174 * supports nfsd_operations MUST have a lookup function which, if it finds 192 * nfsd_operations MUST have a lookup function which, if it finds a
175 * a directory inode with a DCACHE_DISCONNECTED dentry, will d_move 193 * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that
176 * that dentry into place and return that dentry rather than the passed one, 194 * dentry into place and return that dentry rather than the passed one,
177 * typically using d_splice_alias. 195 * typically using d_splice_alias. */
178 */
179 196
180#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ 197#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
181#define DCACHE_UNHASHED 0x0010 198#define DCACHE_UNHASHED 0x0010
182 199#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020
183#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */ 200 /* Parent inode is watched by inotify */
184 201
185#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ 202#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */
186 203#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080
187#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ 204 /* Parent inode is watched by some fsnotify listener */
188 205
189#define DCACHE_CANT_MOUNT 0x0100 206#define DCACHE_CANT_MOUNT 0x0100
207#define DCACHE_GENOCIDE 0x0200
208#define DCACHE_MOUNTED 0x0400 /* is a mountpoint */
190 209
191extern spinlock_t dcache_lock; 210#define DCACHE_OP_HASH 0x1000
192extern seqlock_t rename_lock; 211#define DCACHE_OP_COMPARE 0x2000
193 212#define DCACHE_OP_REVALIDATE 0x4000
194/** 213#define DCACHE_OP_DELETE 0x8000
195 * d_drop - drop a dentry
196 * @dentry: dentry to drop
197 *
198 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
199 * be found through a VFS lookup any more. Note that this is different from
200 * deleting the dentry - d_delete will try to mark the dentry negative if
201 * possible, giving a successful _negative_ lookup, while d_drop will
202 * just make the cache lookup fail.
203 *
204 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
205 * reason (NFS timeouts or autofs deletes).
206 *
207 * __d_drop requires dentry->d_lock.
208 */
209
210static inline void __d_drop(struct dentry *dentry)
211{
212 if (!(dentry->d_flags & DCACHE_UNHASHED)) {
213 dentry->d_flags |= DCACHE_UNHASHED;
214 hlist_del_rcu(&dentry->d_hash);
215 }
216}
217 214
218static inline void d_drop(struct dentry *dentry) 215extern seqlock_t rename_lock;
219{
220 spin_lock(&dcache_lock);
221 spin_lock(&dentry->d_lock);
222 __d_drop(dentry);
223 spin_unlock(&dentry->d_lock);
224 spin_unlock(&dcache_lock);
225}
226 216
227static inline int dname_external(struct dentry *dentry) 217static inline int dname_external(struct dentry *dentry)
228{ 218{
@@ -235,10 +225,14 @@ static inline int dname_external(struct dentry *dentry)
235extern void d_instantiate(struct dentry *, struct inode *); 225extern void d_instantiate(struct dentry *, struct inode *);
236extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); 226extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
237extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); 227extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
228extern void __d_drop(struct dentry *dentry);
229extern void d_drop(struct dentry *dentry);
238extern void d_delete(struct dentry *); 230extern void d_delete(struct dentry *);
231extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
239 232
240/* allocate/de-allocate */ 233/* allocate/de-allocate */
241extern struct dentry * d_alloc(struct dentry *, const struct qstr *); 234extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
235extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
242extern struct dentry * d_splice_alias(struct inode *, struct dentry *); 236extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
243extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); 237extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
244extern struct dentry * d_obtain_alias(struct inode *); 238extern struct dentry * d_obtain_alias(struct inode *);
@@ -296,14 +290,40 @@ static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *in
296 return res; 290 return res;
297} 291}
298 292
293extern void dentry_update_name_case(struct dentry *, struct qstr *);
294
299/* used for rename() and baskets */ 295/* used for rename() and baskets */
300extern void d_move(struct dentry *, struct dentry *); 296extern void d_move(struct dentry *, struct dentry *);
301extern struct dentry *d_ancestor(struct dentry *, struct dentry *); 297extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
302 298
303/* appendix may either be NULL or be used for transname suffixes */ 299/* appendix may either be NULL or be used for transname suffixes */
304extern struct dentry * d_lookup(struct dentry *, struct qstr *); 300extern struct dentry *d_lookup(struct dentry *, struct qstr *);
305extern struct dentry * __d_lookup(struct dentry *, struct qstr *); 301extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
306extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *); 302extern struct dentry *__d_lookup(struct dentry *, struct qstr *);
303extern struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
304 unsigned *seq, struct inode **inode);
305
306/**
307 * __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok
308 * @dentry: dentry to take a ref on
309 * @seq: seqcount to verify against
310 * @Returns: 0 on failure, else 1.
311 *
312 * __d_rcu_to_refcount operates on a dentry,seq pair that was returned
313 * by __d_lookup_rcu, to get a reference on an rcu-walk dentry.
314 */
315static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
316{
317 int ret = 0;
318
319 assert_spin_locked(&dentry->d_lock);
320 if (!read_seqcount_retry(&dentry->d_seq, seq)) {
321 ret = 1;
322 dentry->d_count++;
323 }
324
325 return ret;
326}
307 327
308/* validate "insecure" dentry pointer */ 328/* validate "insecure" dentry pointer */
309extern int d_validate(struct dentry *, struct dentry *); 329extern int d_validate(struct dentry *, struct dentry *);
@@ -316,34 +336,37 @@ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
316extern char *__d_path(const struct path *path, struct path *root, char *, int); 336extern char *__d_path(const struct path *path, struct path *root, char *, int);
317extern char *d_path(const struct path *, char *, int); 337extern char *d_path(const struct path *, char *, int);
318extern char *d_path_with_unreachable(const struct path *, char *, int); 338extern char *d_path_with_unreachable(const struct path *, char *, int);
319extern char *__dentry_path(struct dentry *, char *, int); 339extern char *dentry_path_raw(struct dentry *, char *, int);
320extern char *dentry_path(struct dentry *, char *, int); 340extern char *dentry_path(struct dentry *, char *, int);
321 341
322/* Allocation counts.. */ 342/* Allocation counts.. */
323 343
324/** 344/**
325 * dget, dget_locked - get a reference to a dentry 345 * dget, dget_dlock - get a reference to a dentry
326 * @dentry: dentry to get a reference to 346 * @dentry: dentry to get a reference to
327 * 347 *
328 * Given a dentry or %NULL pointer increment the reference count 348 * Given a dentry or %NULL pointer increment the reference count
329 * if appropriate and return the dentry. A dentry will not be 349 * if appropriate and return the dentry. A dentry will not be
330 * destroyed when it has references. dget() should never be 350 * destroyed when it has references.
331 * called for dentries with zero reference counter. For these cases
332 * (preferably none, functions in dcache.c are sufficient for normal
333 * needs and they take necessary precautions) you should hold dcache_lock
334 * and call dget_locked() instead of dget().
335 */ 351 */
336 352static inline struct dentry *dget_dlock(struct dentry *dentry)
353{
354 if (dentry)
355 dentry->d_count++;
356 return dentry;
357}
358
337static inline struct dentry *dget(struct dentry *dentry) 359static inline struct dentry *dget(struct dentry *dentry)
338{ 360{
339 if (dentry) { 361 if (dentry) {
340 BUG_ON(!atomic_read(&dentry->d_count)); 362 spin_lock(&dentry->d_lock);
341 atomic_inc(&dentry->d_count); 363 dget_dlock(dentry);
364 spin_unlock(&dentry->d_lock);
342 } 365 }
343 return dentry; 366 return dentry;
344} 367}
345 368
346extern struct dentry * dget_locked(struct dentry *); 369extern struct dentry *dget_parent(struct dentry *dentry);
347 370
348/** 371/**
349 * d_unhashed - is dentry hashed 372 * d_unhashed - is dentry hashed
@@ -374,21 +397,11 @@ static inline void dont_mount(struct dentry *dentry)
374 spin_unlock(&dentry->d_lock); 397 spin_unlock(&dentry->d_lock);
375} 398}
376 399
377static inline struct dentry *dget_parent(struct dentry *dentry)
378{
379 struct dentry *ret;
380
381 spin_lock(&dentry->d_lock);
382 ret = dget(dentry->d_parent);
383 spin_unlock(&dentry->d_lock);
384 return ret;
385}
386
387extern void dput(struct dentry *); 400extern void dput(struct dentry *);
388 401
389static inline int d_mountpoint(struct dentry *dentry) 402static inline int d_mountpoint(struct dentry *dentry)
390{ 403{
391 return dentry->d_mounted; 404 return dentry->d_flags & DCACHE_MOUNTED;
392} 405}
393 406
394extern struct vfsmount *lookup_mnt(struct path *); 407extern struct vfsmount *lookup_mnt(struct path *);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 090f0eacde29..baf3e556ff0e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -392,6 +392,7 @@ struct inodes_stat_t {
392#include <linux/capability.h> 392#include <linux/capability.h>
393#include <linux/semaphore.h> 393#include <linux/semaphore.h>
394#include <linux/fiemap.h> 394#include <linux/fiemap.h>
395#include <linux/rculist_bl.h>
395 396
396#include <asm/atomic.h> 397#include <asm/atomic.h>
397#include <asm/byteorder.h> 398#include <asm/byteorder.h>
@@ -733,16 +734,31 @@ struct posix_acl;
733#define ACL_NOT_CACHED ((void *)(-1)) 734#define ACL_NOT_CACHED ((void *)(-1))
734 735
735struct inode { 736struct inode {
737 /* RCU path lookup touches following: */
738 umode_t i_mode;
739 uid_t i_uid;
740 gid_t i_gid;
741 const struct inode_operations *i_op;
742 struct super_block *i_sb;
743
744 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
745 unsigned int i_flags;
746 struct mutex i_mutex;
747
748 unsigned long i_state;
749 unsigned long dirtied_when; /* jiffies of first dirtying */
750
736 struct hlist_node i_hash; 751 struct hlist_node i_hash;
737 struct list_head i_wb_list; /* backing dev IO list */ 752 struct list_head i_wb_list; /* backing dev IO list */
738 struct list_head i_lru; /* inode LRU list */ 753 struct list_head i_lru; /* inode LRU list */
739 struct list_head i_sb_list; 754 struct list_head i_sb_list;
740 struct list_head i_dentry; 755 union {
756 struct list_head i_dentry;
757 struct rcu_head i_rcu;
758 };
741 unsigned long i_ino; 759 unsigned long i_ino;
742 atomic_t i_count; 760 atomic_t i_count;
743 unsigned int i_nlink; 761 unsigned int i_nlink;
744 uid_t i_uid;
745 gid_t i_gid;
746 dev_t i_rdev; 762 dev_t i_rdev;
747 unsigned int i_blkbits; 763 unsigned int i_blkbits;
748 u64 i_version; 764 u64 i_version;
@@ -755,13 +771,8 @@ struct inode {
755 struct timespec i_ctime; 771 struct timespec i_ctime;
756 blkcnt_t i_blocks; 772 blkcnt_t i_blocks;
757 unsigned short i_bytes; 773 unsigned short i_bytes;
758 umode_t i_mode;
759 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
760 struct mutex i_mutex;
761 struct rw_semaphore i_alloc_sem; 774 struct rw_semaphore i_alloc_sem;
762 const struct inode_operations *i_op;
763 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 775 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
764 struct super_block *i_sb;
765 struct file_lock *i_flock; 776 struct file_lock *i_flock;
766 struct address_space *i_mapping; 777 struct address_space *i_mapping;
767 struct address_space i_data; 778 struct address_space i_data;
@@ -782,11 +793,6 @@ struct inode {
782 struct hlist_head i_fsnotify_marks; 793 struct hlist_head i_fsnotify_marks;
783#endif 794#endif
784 795
785 unsigned long i_state;
786 unsigned long dirtied_when; /* jiffies of first dirtying */
787
788 unsigned int i_flags;
789
790#ifdef CONFIG_IMA 796#ifdef CONFIG_IMA
791 /* protected by i_lock */ 797 /* protected by i_lock */
792 unsigned int i_readcount; /* struct files open RO */ 798 unsigned int i_readcount; /* struct files open RO */
@@ -1372,13 +1378,13 @@ struct super_block {
1372 const struct xattr_handler **s_xattr; 1378 const struct xattr_handler **s_xattr;
1373 1379
1374 struct list_head s_inodes; /* all inodes */ 1380 struct list_head s_inodes; /* all inodes */
1375 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ 1381 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1376#ifdef CONFIG_SMP 1382#ifdef CONFIG_SMP
1377 struct list_head __percpu *s_files; 1383 struct list_head __percpu *s_files;
1378#else 1384#else
1379 struct list_head s_files; 1385 struct list_head s_files;
1380#endif 1386#endif
1381 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ 1387 /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
1382 struct list_head s_dentry_lru; /* unused dentry lru */ 1388 struct list_head s_dentry_lru; /* unused dentry lru */
1383 int s_nr_dentry_unused; /* # of dentry on lru */ 1389 int s_nr_dentry_unused; /* # of dentry on lru */
1384 1390
@@ -1545,9 +1551,18 @@ struct file_operations {
1545 int (*setlease)(struct file *, long, struct file_lock **); 1551 int (*setlease)(struct file *, long, struct file_lock **);
1546}; 1552};
1547 1553
1554#define IPERM_FLAG_RCU 0x0001
1555
1548struct inode_operations { 1556struct inode_operations {
1549 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
1550 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); 1557 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
1558 void * (*follow_link) (struct dentry *, struct nameidata *);
1559 int (*permission) (struct inode *, int, unsigned int);
1560 int (*check_acl)(struct inode *, int, unsigned int);
1561
1562 int (*readlink) (struct dentry *, char __user *,int);
1563 void (*put_link) (struct dentry *, struct nameidata *, void *);
1564
1565 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
1551 int (*link) (struct dentry *,struct inode *,struct dentry *); 1566 int (*link) (struct dentry *,struct inode *,struct dentry *);
1552 int (*unlink) (struct inode *,struct dentry *); 1567 int (*unlink) (struct inode *,struct dentry *);
1553 int (*symlink) (struct inode *,struct dentry *,const char *); 1568 int (*symlink) (struct inode *,struct dentry *,const char *);
@@ -1556,12 +1571,7 @@ struct inode_operations {
1556 int (*mknod) (struct inode *,struct dentry *,int,dev_t); 1571 int (*mknod) (struct inode *,struct dentry *,int,dev_t);
1557 int (*rename) (struct inode *, struct dentry *, 1572 int (*rename) (struct inode *, struct dentry *,
1558 struct inode *, struct dentry *); 1573 struct inode *, struct dentry *);
1559 int (*readlink) (struct dentry *, char __user *,int);
1560 void * (*follow_link) (struct dentry *, struct nameidata *);
1561 void (*put_link) (struct dentry *, struct nameidata *, void *);
1562 void (*truncate) (struct inode *); 1574 void (*truncate) (struct inode *);
1563 int (*permission) (struct inode *, int);
1564 int (*check_acl)(struct inode *, int);
1565 int (*setattr) (struct dentry *, struct iattr *); 1575 int (*setattr) (struct dentry *, struct iattr *);
1566 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); 1576 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
1567 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 1577 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -1573,7 +1583,7 @@ struct inode_operations {
1573 loff_t len); 1583 loff_t len);
1574 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, 1584 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
1575 u64 len); 1585 u64 len);
1576}; 1586} ____cacheline_aligned;
1577 1587
1578struct seq_file; 1588struct seq_file;
1579 1589
@@ -2158,8 +2168,8 @@ extern sector_t bmap(struct inode *, sector_t);
2158#endif 2168#endif
2159extern int notify_change(struct dentry *, struct iattr *); 2169extern int notify_change(struct dentry *, struct iattr *);
2160extern int inode_permission(struct inode *, int); 2170extern int inode_permission(struct inode *, int);
2161extern int generic_permission(struct inode *, int, 2171extern int generic_permission(struct inode *, int, unsigned int,
2162 int (*check_acl)(struct inode *, int)); 2172 int (*check_acl)(struct inode *, int, unsigned int));
2163 2173
2164static inline bool execute_ok(struct inode *inode) 2174static inline bool execute_ok(struct inode *inode)
2165{ 2175{
@@ -2230,6 +2240,7 @@ extern void iget_failed(struct inode *);
2230extern void end_writeback(struct inode *); 2240extern void end_writeback(struct inode *);
2231extern void __destroy_inode(struct inode *); 2241extern void __destroy_inode(struct inode *);
2232extern struct inode *new_inode(struct super_block *); 2242extern struct inode *new_inode(struct super_block *);
2243extern void free_inode_nonrcu(struct inode *inode);
2233extern int should_remove_suid(struct dentry *); 2244extern int should_remove_suid(struct dentry *);
2234extern int file_remove_suid(struct file *); 2245extern int file_remove_suid(struct file *);
2235 2246
@@ -2446,6 +2457,10 @@ static inline ino_t parent_ino(struct dentry *dentry)
2446{ 2457{
2447 ino_t res; 2458 ino_t res;
2448 2459
2460 /*
2461 * Don't strictly need d_lock here? If the parent ino could change
2462 * then surely we'd have a deeper race in the caller?
2463 */
2449 spin_lock(&dentry->d_lock); 2464 spin_lock(&dentry->d_lock);
2450 res = dentry->d_parent->d_inode->i_ino; 2465 res = dentry->d_parent->d_inode->i_ino;
2451 spin_unlock(&dentry->d_lock); 2466 spin_unlock(&dentry->d_lock);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index a42b5bf02f8b..003dc0fd7347 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -2,10 +2,13 @@
2#define _LINUX_FS_STRUCT_H 2#define _LINUX_FS_STRUCT_H
3 3
4#include <linux/path.h> 4#include <linux/path.h>
5#include <linux/spinlock.h>
6#include <linux/seqlock.h>
5 7
6struct fs_struct { 8struct fs_struct {
7 int users; 9 int users;
8 spinlock_t lock; 10 spinlock_t lock;
11 seqcount_t seq;
9 int umask; 12 int umask;
10 int in_exec; 13 int in_exec;
11 struct path root, pwd; 14 struct path root, pwd;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index b10bcdeaef76..2a53f10712b3 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -17,7 +17,6 @@
17 17
18/* 18/*
19 * fsnotify_d_instantiate - instantiate a dentry for inode 19 * fsnotify_d_instantiate - instantiate a dentry for inode
20 * Called with dcache_lock held.
21 */ 20 */
22static inline void fsnotify_d_instantiate(struct dentry *dentry, 21static inline void fsnotify_d_instantiate(struct dentry *dentry,
23 struct inode *inode) 22 struct inode *inode)
@@ -62,7 +61,6 @@ static inline int fsnotify_perm(struct file *file, int mask)
62 61
63/* 62/*
64 * fsnotify_d_move - dentry has been moved 63 * fsnotify_d_move - dentry has been moved
65 * Called with dcache_lock and dentry->d_lock held.
66 */ 64 */
67static inline void fsnotify_d_move(struct dentry *dentry) 65static inline void fsnotify_d_move(struct dentry *dentry)
68{ 66{
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 7380763595d3..69ad89b50489 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -329,9 +329,15 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
329{ 329{
330 struct dentry *parent; 330 struct dentry *parent;
331 331
332 assert_spin_locked(&dcache_lock);
333 assert_spin_locked(&dentry->d_lock); 332 assert_spin_locked(&dentry->d_lock);
334 333
334 /*
335 * Serialisation of setting PARENT_WATCHED on the dentries is provided
336 * by d_lock. If inotify_inode_watched changes after we have taken
337 * d_lock, the following __fsnotify_update_child_dentry_flags call will
338 * find our entry, so it will spin until we complete here, and update
339 * us with the new state.
340 */
335 parent = dentry->d_parent; 341 parent = dentry->d_parent;
336 if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode)) 342 if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
337 dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; 343 dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
@@ -341,15 +347,12 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
341 347
342/* 348/*
343 * fsnotify_d_instantiate - instantiate a dentry for inode 349 * fsnotify_d_instantiate - instantiate a dentry for inode
344 * Called with dcache_lock held.
345 */ 350 */
346static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) 351static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
347{ 352{
348 if (!inode) 353 if (!inode)
349 return; 354 return;
350 355
351 assert_spin_locked(&dcache_lock);
352
353 spin_lock(&dentry->d_lock); 356 spin_lock(&dentry->d_lock);
354 __fsnotify_update_dcache_flags(dentry); 357 __fsnotify_update_dcache_flags(dentry);
355 spin_unlock(&dentry->d_lock); 358 spin_unlock(&dentry->d_lock);
diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h
index 574bea4013b6..0437e377b555 100644
--- a/include/linux/generic_acl.h
+++ b/include/linux/generic_acl.h
@@ -10,6 +10,6 @@ extern const struct xattr_handler generic_acl_default_handler;
10 10
11int generic_acl_init(struct inode *, struct inode *); 11int generic_acl_init(struct inode *, struct inode *);
12int generic_acl_chmod(struct inode *); 12int generic_acl_chmod(struct inode *);
13int generic_check_acl(struct inode *inode, int mask); 13int generic_check_acl(struct inode *inode, int mask, unsigned int flags);
14 14
15#endif /* LINUX_GENERIC_ACL_H */ 15#endif /* LINUX_GENERIC_ACL_H */
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index c760991b354a..61b9609e55f2 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -593,6 +593,13 @@ enum twl4030_usb_mode {
593 593
594struct twl4030_usb_data { 594struct twl4030_usb_data {
595 enum twl4030_usb_mode usb_mode; 595 enum twl4030_usb_mode usb_mode;
596
597 int (*phy_init)(struct device *dev);
598 int (*phy_exit)(struct device *dev);
599 /* Power on/off the PHY */
600 int (*phy_power)(struct device *dev, int iD, int on);
601 /* enable/disable phy clocks */
602 int (*phy_set_clock)(struct device *dev, int on);
596}; 603};
597 604
598struct twl4030_ins { 605struct twl4030_ins {
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
new file mode 100644
index 000000000000..9ee97e7f2be4
--- /dev/null
+++ b/include/linux/list_bl.h
@@ -0,0 +1,144 @@
1#ifndef _LINUX_LIST_BL_H
2#define _LINUX_LIST_BL_H
3
4#include <linux/list.h>
5
6/*
7 * Special version of lists, where head of the list has a lock in the lowest
8 * bit. This is useful for scalable hash tables without increasing memory
9 * footprint overhead.
10 *
11 * For modification operations, the 0 bit of hlist_bl_head->first
12 * pointer must be set.
13 *
14 * With some small modifications, this can easily be adapted to store several
15 * arbitrary bits (not just a single lock bit), if the need arises to store
16 * some fast and compact auxiliary data.
17 */
18
19#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
20#define LIST_BL_LOCKMASK 1UL
21#else
22#define LIST_BL_LOCKMASK 0UL
23#endif
24
25#ifdef CONFIG_DEBUG_LIST
26#define LIST_BL_BUG_ON(x) BUG_ON(x)
27#else
28#define LIST_BL_BUG_ON(x)
29#endif
30
31
32struct hlist_bl_head {
33 struct hlist_bl_node *first;
34};
35
36struct hlist_bl_node {
37 struct hlist_bl_node *next, **pprev;
38};
39#define INIT_HLIST_BL_HEAD(ptr) \
40 ((ptr)->first = NULL)
41
42static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
43{
44 h->next = NULL;
45 h->pprev = NULL;
46}
47
48#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
49
50static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
51{
52 return !h->pprev;
53}
54
55static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
56{
57 return (struct hlist_bl_node *)
58 ((unsigned long)h->first & ~LIST_BL_LOCKMASK);
59}
60
61static inline void hlist_bl_set_first(struct hlist_bl_head *h,
62 struct hlist_bl_node *n)
63{
64 LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
65 LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK));
66 h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
67}
68
69static inline int hlist_bl_empty(const struct hlist_bl_head *h)
70{
71 return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
72}
73
74static inline void hlist_bl_add_head(struct hlist_bl_node *n,
75 struct hlist_bl_head *h)
76{
77 struct hlist_bl_node *first = hlist_bl_first(h);
78
79 n->next = first;
80 if (first)
81 first->pprev = &n->next;
82 n->pprev = &h->first;
83 hlist_bl_set_first(h, n);
84}
85
86static inline void __hlist_bl_del(struct hlist_bl_node *n)
87{
88 struct hlist_bl_node *next = n->next;
89 struct hlist_bl_node **pprev = n->pprev;
90
91 LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
92
93 /* pprev may be `first`, so be careful not to lose the lock bit */
94 *pprev = (struct hlist_bl_node *)
95 ((unsigned long)next |
96 ((unsigned long)*pprev & LIST_BL_LOCKMASK));
97 if (next)
98 next->pprev = pprev;
99}
100
101static inline void hlist_bl_del(struct hlist_bl_node *n)
102{
103 __hlist_bl_del(n);
104 n->next = LIST_POISON1;
105 n->pprev = LIST_POISON2;
106}
107
108static inline void hlist_bl_del_init(struct hlist_bl_node *n)
109{
110 if (!hlist_bl_unhashed(n)) {
111 __hlist_bl_del(n);
112 INIT_HLIST_BL_NODE(n);
113 }
114}
115
116/**
117 * hlist_bl_for_each_entry - iterate over list of given type
118 * @tpos: the type * to use as a loop cursor.
119 * @pos: the &struct hlist_node to use as a loop cursor.
120 * @head: the head for your list.
121 * @member: the name of the hlist_node within the struct.
122 *
123 */
124#define hlist_bl_for_each_entry(tpos, pos, head, member) \
125 for (pos = hlist_bl_first(head); \
126 pos && \
127 ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
128 pos = pos->next)
129
130/**
131 * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry
132 * @tpos: the type * to use as a loop cursor.
133 * @pos: the &struct hlist_node to use as a loop cursor.
134 * @n: another &struct hlist_node to use as temporary storage
135 * @head: the head for your list.
136 * @member: the name of the hlist_node within the struct.
137 */
138#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \
139 for (pos = hlist_bl_first(head); \
140 pos && ({ n = pos->next; 1; }) && \
141 ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
142 pos = n)
143
144#endif
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5e7a59408dd4..1869ea24a739 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/nodemask.h> 14#include <linux/nodemask.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/seqlock.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17 18
18struct super_block; 19struct super_block;
@@ -46,12 +47,24 @@ struct mnt_namespace;
46 47
47#define MNT_INTERNAL 0x4000 48#define MNT_INTERNAL 0x4000
48 49
50struct mnt_pcp {
51 int mnt_count;
52 int mnt_writers;
53};
54
49struct vfsmount { 55struct vfsmount {
50 struct list_head mnt_hash; 56 struct list_head mnt_hash;
51 struct vfsmount *mnt_parent; /* fs we are mounted on */ 57 struct vfsmount *mnt_parent; /* fs we are mounted on */
52 struct dentry *mnt_mountpoint; /* dentry of mountpoint */ 58 struct dentry *mnt_mountpoint; /* dentry of mountpoint */
53 struct dentry *mnt_root; /* root of the mounted tree */ 59 struct dentry *mnt_root; /* root of the mounted tree */
54 struct super_block *mnt_sb; /* pointer to superblock */ 60 struct super_block *mnt_sb; /* pointer to superblock */
61#ifdef CONFIG_SMP
62 struct mnt_pcp __percpu *mnt_pcp;
63 atomic_t mnt_longrefs;
64#else
65 int mnt_count;
66 int mnt_writers;
67#endif
55 struct list_head mnt_mounts; /* list of children, anchored here */ 68 struct list_head mnt_mounts; /* list of children, anchored here */
56 struct list_head mnt_child; /* and going through their mnt_child */ 69 struct list_head mnt_child; /* and going through their mnt_child */
57 int mnt_flags; 70 int mnt_flags;
@@ -70,57 +83,25 @@ struct vfsmount {
70 struct mnt_namespace *mnt_ns; /* containing namespace */ 83 struct mnt_namespace *mnt_ns; /* containing namespace */
71 int mnt_id; /* mount identifier */ 84 int mnt_id; /* mount identifier */
72 int mnt_group_id; /* peer group identifier */ 85 int mnt_group_id; /* peer group identifier */
73 /*
74 * We put mnt_count & mnt_expiry_mark at the end of struct vfsmount
75 * to let these frequently modified fields in a separate cache line
76 * (so that reads of mnt_flags wont ping-pong on SMP machines)
77 */
78 atomic_t mnt_count;
79 int mnt_expiry_mark; /* true if marked for expiry */ 86 int mnt_expiry_mark; /* true if marked for expiry */
80 int mnt_pinned; 87 int mnt_pinned;
81 int mnt_ghosts; 88 int mnt_ghosts;
82#ifdef CONFIG_SMP
83 int __percpu *mnt_writers;
84#else
85 int mnt_writers;
86#endif
87}; 89};
88 90
89static inline int *get_mnt_writers_ptr(struct vfsmount *mnt)
90{
91#ifdef CONFIG_SMP
92 return mnt->mnt_writers;
93#else
94 return &mnt->mnt_writers;
95#endif
96}
97
98static inline struct vfsmount *mntget(struct vfsmount *mnt)
99{
100 if (mnt)
101 atomic_inc(&mnt->mnt_count);
102 return mnt;
103}
104
105struct file; /* forward dec */ 91struct file; /* forward dec */
106 92
107extern int mnt_want_write(struct vfsmount *mnt); 93extern int mnt_want_write(struct vfsmount *mnt);
108extern int mnt_want_write_file(struct file *file); 94extern int mnt_want_write_file(struct file *file);
109extern int mnt_clone_write(struct vfsmount *mnt); 95extern int mnt_clone_write(struct vfsmount *mnt);
110extern void mnt_drop_write(struct vfsmount *mnt); 96extern void mnt_drop_write(struct vfsmount *mnt);
111extern void mntput_no_expire(struct vfsmount *mnt); 97extern void mntput(struct vfsmount *mnt);
98extern struct vfsmount *mntget(struct vfsmount *mnt);
99extern void mntput_long(struct vfsmount *mnt);
100extern struct vfsmount *mntget_long(struct vfsmount *mnt);
112extern void mnt_pin(struct vfsmount *mnt); 101extern void mnt_pin(struct vfsmount *mnt);
113extern void mnt_unpin(struct vfsmount *mnt); 102extern void mnt_unpin(struct vfsmount *mnt);
114extern int __mnt_is_readonly(struct vfsmount *mnt); 103extern int __mnt_is_readonly(struct vfsmount *mnt);
115 104
116static inline void mntput(struct vfsmount *mnt)
117{
118 if (mnt) {
119 mnt->mnt_expiry_mark = 0;
120 mntput_no_expire(mnt);
121 }
122}
123
124extern struct vfsmount *do_kern_mount(const char *fstype, int flags, 105extern struct vfsmount *do_kern_mount(const char *fstype, int flags,
125 const char *name, void *data); 106 const char *name, void *data);
126 107
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 05b441d93642..18d06add0a40 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -19,7 +19,10 @@ struct nameidata {
19 struct path path; 19 struct path path;
20 struct qstr last; 20 struct qstr last;
21 struct path root; 21 struct path root;
22 struct file *file;
23 struct inode *inode; /* path.dentry.d_inode */
22 unsigned int flags; 24 unsigned int flags;
25 unsigned seq;
23 int last_type; 26 int last_type;
24 unsigned depth; 27 unsigned depth;
25 char *saved_names[MAX_NESTED_LINKS + 1]; 28 char *saved_names[MAX_NESTED_LINKS + 1];
@@ -41,14 +44,15 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
41 * - require a directory 44 * - require a directory
42 * - ending slashes ok even for nonexistent files 45 * - ending slashes ok even for nonexistent files
43 * - internal "there are more path components" flag 46 * - internal "there are more path components" flag
44 * - locked when lookup done with dcache_lock held
45 * - dentry cache is untrusted; force a real lookup 47 * - dentry cache is untrusted; force a real lookup
46 */ 48 */
47#define LOOKUP_FOLLOW 1 49#define LOOKUP_FOLLOW 0x0001
48#define LOOKUP_DIRECTORY 2 50#define LOOKUP_DIRECTORY 0x0002
49#define LOOKUP_CONTINUE 4 51#define LOOKUP_CONTINUE 0x0004
50#define LOOKUP_PARENT 16 52
51#define LOOKUP_REVAL 64 53#define LOOKUP_PARENT 0x0010
54#define LOOKUP_REVAL 0x0020
55#define LOOKUP_RCU 0x0040
52/* 56/*
53 * Intent data 57 * Intent data
54 */ 58 */
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index ef663061d5ac..1c27f201c856 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -184,13 +184,13 @@ struct ncp_entry_info {
184 __u8 file_handle[6]; 184 __u8 file_handle[6];
185}; 185};
186 186
187static inline struct ncp_server *NCP_SBP(struct super_block *sb) 187static inline struct ncp_server *NCP_SBP(const struct super_block *sb)
188{ 188{
189 return sb->s_fs_info; 189 return sb->s_fs_info;
190} 190}
191 191
192#define NCP_SERVER(inode) NCP_SBP((inode)->i_sb) 192#define NCP_SERVER(inode) NCP_SBP((inode)->i_sb)
193static inline struct ncp_inode_info *NCP_FINFO(struct inode *inode) 193static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode)
194{ 194{
195 return container_of(inode, struct ncp_inode_info, vfs_inode); 195 return container_of(inode, struct ncp_inode_info, vfs_inode);
196} 196}
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 29d504d5d1c3..0779bb8f95be 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -351,7 +351,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
351extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); 351extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
352extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); 352extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
353extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); 353extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
354extern int nfs_permission(struct inode *, int); 354extern int nfs_permission(struct inode *, int, unsigned int);
355extern int nfs_open(struct inode *, struct file *); 355extern int nfs_open(struct inode *, struct file *);
356extern int nfs_release(struct inode *, struct file *); 356extern int nfs_release(struct inode *, struct file *);
357extern int nfs_attribute_timeout(struct inode *inode); 357extern int nfs_attribute_timeout(struct inode *inode);
diff --git a/include/linux/path.h b/include/linux/path.h
index edc98dec6266..a581e8c06533 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -10,7 +10,9 @@ struct path {
10}; 10};
11 11
12extern void path_get(struct path *); 12extern void path_get(struct path *);
13extern void path_get_long(struct path *);
13extern void path_put(struct path *); 14extern void path_put(struct path *);
15extern void path_put_long(struct path *);
14 16
15static inline int path_equal(const struct path *path1, const struct path *path2) 17static inline int path_equal(const struct path *path1, const struct path *path2)
16{ 18{
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 67608161df6b..d68283a898bb 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -108,6 +108,25 @@ static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
108 return acl; 108 return acl;
109} 109}
110 110
111static inline int negative_cached_acl(struct inode *inode, int type)
112{
113 struct posix_acl **p, *acl;
114 switch (type) {
115 case ACL_TYPE_ACCESS:
116 p = &inode->i_acl;
117 break;
118 case ACL_TYPE_DEFAULT:
119 p = &inode->i_default_acl;
120 break;
121 default:
122 BUG();
123 }
124 acl = ACCESS_ONCE(*p);
125 if (acl)
126 return 0;
127 return 1;
128}
129
111static inline void set_cached_acl(struct inode *inode, 130static inline void set_cached_acl(struct inode *inode,
112 int type, 131 int type,
113 struct posix_acl *acl) 132 struct posix_acl *acl)
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h
new file mode 100644
index 000000000000..b872b493724d
--- /dev/null
+++ b/include/linux/rculist_bl.h
@@ -0,0 +1,127 @@
1#ifndef _LINUX_RCULIST_BL_H
2#define _LINUX_RCULIST_BL_H
3
4/*
5 * RCU-protected bl list version. See include/linux/list_bl.h.
6 */
7#include <linux/list_bl.h>
8#include <linux/rcupdate.h>
9
10static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
11 struct hlist_bl_node *n)
12{
13 LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
14 LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK));
15 rcu_assign_pointer(h->first,
16 (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
17}
18
19static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
20{
21 return (struct hlist_bl_node *)
22 ((unsigned long)rcu_dereference(h->first) & ~LIST_BL_LOCKMASK);
23}
24
25/**
26 * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
27 * @n: the element to delete from the hash list.
28 *
29 * Note: hlist_bl_unhashed() on the node returns true after this. It is
30 * useful for RCU based read lockfree traversal if the writer side
31 * must know if the list entry is still hashed or already unhashed.
32 *
33 * In particular, it means that we can not poison the forward pointers
34 * that may still be used for walking the hash list and we can only
35 * zero the pprev pointer so list_unhashed() will return true after
36 * this.
37 *
38 * The caller must take whatever precautions are necessary (such as
39 * holding appropriate locks) to avoid racing with another
40 * list-mutation primitive, such as hlist_bl_add_head_rcu() or
41 * hlist_bl_del_rcu(), running on this same list. However, it is
42 * perfectly legal to run concurrently with the _rcu list-traversal
43 * primitives, such as hlist_bl_for_each_entry_rcu().
44 */
45static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
46{
47 if (!hlist_bl_unhashed(n)) {
48 __hlist_bl_del(n);
49 n->pprev = NULL;
50 }
51}
52
53/**
54 * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
55 * @n: the element to delete from the hash list.
56 *
57 * Note: hlist_bl_unhashed() on entry does not return true after this,
58 * the entry is in an undefined state. It is useful for RCU based
59 * lockfree traversal.
60 *
61 * In particular, it means that we can not poison the forward
62 * pointers that may still be used for walking the hash list.
63 *
64 * The caller must take whatever precautions are necessary
65 * (such as holding appropriate locks) to avoid racing
66 * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
67 * or hlist_bl_del_rcu(), running on this same list.
68 * However, it is perfectly legal to run concurrently with
69 * the _rcu list-traversal primitives, such as
70 * hlist_bl_for_each_entry().
71 */
72static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
73{
74 __hlist_bl_del(n);
75 n->pprev = LIST_POISON2;
76}
77
78/**
79 * hlist_bl_add_head_rcu
80 * @n: the element to add to the hash list.
81 * @h: the list to add to.
82 *
83 * Description:
84 * Adds the specified element to the specified hlist_bl,
85 * while permitting racing traversals.
86 *
87 * The caller must take whatever precautions are necessary
88 * (such as holding appropriate locks) to avoid racing
89 * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
90 * or hlist_bl_del_rcu(), running on this same list.
91 * However, it is perfectly legal to run concurrently with
92 * the _rcu list-traversal primitives, such as
93 * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
94 * problems on Alpha CPUs. Regardless of the type of CPU, the
95 * list-traversal primitive must be guarded by rcu_read_lock().
96 */
97static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
98 struct hlist_bl_head *h)
99{
100 struct hlist_bl_node *first;
101
102 /* don't need hlist_bl_first_rcu because we're under lock */
103 first = hlist_bl_first(h);
104
105 n->next = first;
106 if (first)
107 first->pprev = &n->next;
108 n->pprev = &h->first;
109
110 /* need _rcu because we can have concurrent lock free readers */
111 hlist_bl_set_first_rcu(h, n);
112}
113/**
114 * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
115 * @tpos: the type * to use as a loop cursor.
116 * @pos: the &struct hlist_bl_node to use as a loop cursor.
117 * @head: the head for your list.
118 * @member: the name of the hlist_bl_node within the struct.
119 *
120 */
121#define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \
122 for (pos = hlist_bl_first_rcu(head); \
123 pos && \
124 ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
125 pos = rcu_dereference_raw(pos->next))
126
127#endif
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h
index b2cf2089769b..3b94c91f20a6 100644
--- a/include/linux/reiserfs_xattr.h
+++ b/include/linux/reiserfs_xattr.h
@@ -41,7 +41,7 @@ int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
41int reiserfs_lookup_privroot(struct super_block *sb); 41int reiserfs_lookup_privroot(struct super_block *sb);
42int reiserfs_delete_xattrs(struct inode *inode); 42int reiserfs_delete_xattrs(struct inode *inode);
43int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); 43int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
44int reiserfs_permission(struct inode *inode, int mask); 44int reiserfs_permission(struct inode *inode, int mask, unsigned int flags);
45 45
46#ifdef CONFIG_REISERFS_FS_XATTR 46#ifdef CONFIG_REISERFS_FS_XATTR
47#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) 47#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
diff --git a/include/linux/security.h b/include/linux/security.h
index d47a4c24b3e4..1ac42475ea08 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -457,7 +457,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
457 * called when the actual read/write operations are performed. 457 * called when the actual read/write operations are performed.
458 * @inode contains the inode structure to check. 458 * @inode contains the inode structure to check.
459 * @mask contains the permission mask. 459 * @mask contains the permission mask.
460 * @nd contains the nameidata (may be NULL).
461 * Return 0 if permission is granted. 460 * Return 0 if permission is granted.
462 * @inode_setattr: 461 * @inode_setattr:
463 * Check permission before setting file attributes. Note that the kernel 462 * Check permission before setting file attributes. Note that the kernel
@@ -1713,6 +1712,7 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
1713int security_inode_readlink(struct dentry *dentry); 1712int security_inode_readlink(struct dentry *dentry);
1714int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); 1713int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
1715int security_inode_permission(struct inode *inode, int mask); 1714int security_inode_permission(struct inode *inode, int mask);
1715int security_inode_exec_permission(struct inode *inode, unsigned int flags);
1716int security_inode_setattr(struct dentry *dentry, struct iattr *attr); 1716int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
1717int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); 1717int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
1718int security_inode_setxattr(struct dentry *dentry, const char *name, 1718int security_inode_setxattr(struct dentry *dentry, const char *name,
@@ -2102,6 +2102,12 @@ static inline int security_inode_permission(struct inode *inode, int mask)
2102 return 0; 2102 return 0;
2103} 2103}
2104 2104
2105static inline int security_inode_exec_permission(struct inode *inode,
2106 unsigned int flags)
2107{
2108 return 0;
2109}
2110
2105static inline int security_inode_setattr(struct dentry *dentry, 2111static inline int security_inode_setattr(struct dentry *dentry,
2106 struct iattr *attr) 2112 struct iattr *attr)
2107{ 2113{
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 632205ccc25d..e98cd2e57194 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -107,7 +107,7 @@ static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
107{ 107{
108 smp_rmb(); 108 smp_rmb();
109 109
110 return (sl->sequence != start); 110 return unlikely(sl->sequence != start);
111} 111}
112 112
113 113
@@ -125,14 +125,25 @@ typedef struct seqcount {
125#define SEQCNT_ZERO { 0 } 125#define SEQCNT_ZERO { 0 }
126#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) 126#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
127 127
128/* Start of read using pointer to a sequence counter only. */ 128/**
129static inline unsigned read_seqcount_begin(const seqcount_t *s) 129 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
130 * @s: pointer to seqcount_t
131 * Returns: count to be passed to read_seqcount_retry
132 *
133 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
134 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
135 * provided before actually loading any of the variables that are to be
136 * protected in this critical section.
137 *
138 * Use carefully, only in critical code, and comment how the barrier is
139 * provided.
140 */
141static inline unsigned __read_seqcount_begin(const seqcount_t *s)
130{ 142{
131 unsigned ret; 143 unsigned ret;
132 144
133repeat: 145repeat:
134 ret = s->sequence; 146 ret = s->sequence;
135 smp_rmb();
136 if (unlikely(ret & 1)) { 147 if (unlikely(ret & 1)) {
137 cpu_relax(); 148 cpu_relax();
138 goto repeat; 149 goto repeat;
@@ -140,14 +151,56 @@ repeat:
140 return ret; 151 return ret;
141} 152}
142 153
143/* 154/**
144 * Test if reader processed invalid data because sequence number has changed. 155 * read_seqcount_begin - begin a seq-read critical section
156 * @s: pointer to seqcount_t
157 * Returns: count to be passed to read_seqcount_retry
158 *
159 * read_seqcount_begin opens a read critical section of the given seqcount.
160 * Validity of the critical section is tested by checking read_seqcount_retry
161 * function.
162 */
163static inline unsigned read_seqcount_begin(const seqcount_t *s)
164{
165 unsigned ret = __read_seqcount_begin(s);
166 smp_rmb();
167 return ret;
168}
169
170/**
171 * __read_seqcount_retry - end a seq-read critical section (without barrier)
172 * @s: pointer to seqcount_t
173 * @start: count, from read_seqcount_begin
174 * Returns: 1 if retry is required, else 0
175 *
176 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
177 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
178 * provided before actually loading any of the variables that are to be
179 * protected in this critical section.
180 *
181 * Use carefully, only in critical code, and comment how the barrier is
182 * provided.
183 */
184static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
185{
186 return unlikely(s->sequence != start);
187}
188
189/**
190 * read_seqcount_retry - end a seq-read critical section
191 * @s: pointer to seqcount_t
192 * @start: count, from read_seqcount_begin
193 * Returns: 1 if retry is required, else 0
194 *
195 * read_seqcount_retry closes a read critical section of the given seqcount.
196 * If the critical section was invalid, it must be ignored (and typically
197 * retried).
145 */ 198 */
146static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) 199static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
147{ 200{
148 smp_rmb(); 201 smp_rmb();
149 202
150 return s->sequence != start; 203 return __read_seqcount_retry(s, start);
151} 204}
152 205
153 206
@@ -167,6 +220,19 @@ static inline void write_seqcount_end(seqcount_t *s)
167 s->sequence++; 220 s->sequence++;
168} 221}
169 222
223/**
224 * write_seqcount_barrier - invalidate in-progress read-side seq operations
225 * @s: pointer to seqcount_t
226 *
227 * After write_seqcount_barrier, no read-side seq operations will complete
228 * successfully and see data older than this.
229 */
230static inline void write_seqcount_barrier(seqcount_t *s)
231{
232 smp_wmb();
233 s->sequence+=2;
234}
235
170/* 236/*
171 * Possible sw/hw IRQ protected versions of the interfaces. 237 * Possible sw/hw IRQ protected versions of the interfaces.
172 */ 238 */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 59260e21bdf5..fa9086647eb7 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -106,8 +106,6 @@ int kmem_cache_shrink(struct kmem_cache *);
106void kmem_cache_free(struct kmem_cache *, void *); 106void kmem_cache_free(struct kmem_cache *, void *);
107unsigned int kmem_cache_size(struct kmem_cache *); 107unsigned int kmem_cache_size(struct kmem_cache *);
108const char *kmem_cache_name(struct kmem_cache *); 108const char *kmem_cache_name(struct kmem_cache *);
109int kern_ptr_validate(const void *ptr, unsigned long size);
110int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
111 109
112/* 110/*
113 * Please use this macro to create slab caches. Simply specify the 111 * Please use this macro to create slab caches. Simply specify the
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a28eb2592577..bd69b65f3356 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -20,6 +20,7 @@
20#include <linux/completion.h> /* for struct completion */ 20#include <linux/completion.h> /* for struct completion */
21#include <linux/sched.h> /* for current && schedule_timeout */ 21#include <linux/sched.h> /* for current && schedule_timeout */
22#include <linux/mutex.h> /* for struct mutex */ 22#include <linux/mutex.h> /* for struct mutex */
23#include <linux/pm_runtime.h> /* for runtime PM */
23 24
24struct usb_device; 25struct usb_device;
25struct usb_driver; 26struct usb_driver;
@@ -411,8 +412,6 @@ struct usb_tt;
411 * @quirks: quirks of the whole device 412 * @quirks: quirks of the whole device
412 * @urbnum: number of URBs submitted for the whole device 413 * @urbnum: number of URBs submitted for the whole device
413 * @active_duration: total time device is not suspended 414 * @active_duration: total time device is not suspended
414 * @last_busy: time of last use
415 * @autosuspend_delay: in jiffies
416 * @connect_time: time device was first connected 415 * @connect_time: time device was first connected
417 * @do_remote_wakeup: remote wakeup should be enabled 416 * @do_remote_wakeup: remote wakeup should be enabled
418 * @reset_resume: needs reset instead of resume 417 * @reset_resume: needs reset instead of resume
@@ -485,8 +484,6 @@ struct usb_device {
485 unsigned long active_duration; 484 unsigned long active_duration;
486 485
487#ifdef CONFIG_PM 486#ifdef CONFIG_PM
488 unsigned long last_busy;
489 int autosuspend_delay;
490 unsigned long connect_time; 487 unsigned long connect_time;
491 488
492 unsigned do_remote_wakeup:1; 489 unsigned do_remote_wakeup:1;
@@ -531,7 +528,7 @@ extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
531 528
532static inline void usb_mark_last_busy(struct usb_device *udev) 529static inline void usb_mark_last_busy(struct usb_device *udev)
533{ 530{
534 udev->last_busy = jiffies; 531 pm_runtime_mark_last_busy(&udev->dev);
535} 532}
536 533
537#else 534#else
diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h
index 119194c85d10..10ec0699bea4 100644
--- a/include/linux/usb/ch11.h
+++ b/include/linux/usb/ch11.h
@@ -28,6 +28,13 @@
28#define HUB_STOP_TT 11 28#define HUB_STOP_TT 11
29 29
30/* 30/*
31 * Hub class additional requests defined by USB 3.0 spec
32 * See USB 3.0 spec Table 10-6
33 */
34#define HUB_SET_DEPTH 12
35#define HUB_GET_PORT_ERR_COUNT 13
36
37/*
31 * Hub Class feature numbers 38 * Hub Class feature numbers
32 * See USB 2.0 spec Table 11-17 39 * See USB 2.0 spec Table 11-17
33 */ 40 */
@@ -56,6 +63,20 @@
56#define USB_PORT_FEAT_C_PORT_L1 23 63#define USB_PORT_FEAT_C_PORT_L1 23
57 64
58/* 65/*
66 * Port feature selectors added by USB 3.0 spec.
67 * See USB 3.0 spec Table 10-7
68 */
69#define USB_PORT_FEAT_LINK_STATE 5
70#define USB_PORT_FEAT_U1_TIMEOUT 23
71#define USB_PORT_FEAT_U2_TIMEOUT 24
72#define USB_PORT_FEAT_C_LINK_STATE 25
73#define USB_PORT_FEAT_C_CONFIG_ERR 26
74#define USB_PORT_FEAT_REMOTE_WAKE_MASK 27
75#define USB_PORT_FEAT_BH_PORT_RESET 28
76#define USB_PORT_FEAT_C_BH_PORT_RESET 29
77#define USB_PORT_FEAT_FORCE_LINKPM_ACCEPT 30
78
79/*
59 * Hub Status and Hub Change results 80 * Hub Status and Hub Change results
60 * See USB 2.0 spec Table 11-19 and Table 11-20 81 * See USB 2.0 spec Table 11-19 and Table 11-20
61 */ 82 */
@@ -84,6 +105,32 @@ struct usb_port_status {
84#define USB_PORT_STAT_SUPER_SPEED 0x8000 /* Linux-internal */ 105#define USB_PORT_STAT_SUPER_SPEED 0x8000 /* Linux-internal */
85 106
86/* 107/*
108 * Additions to wPortStatus bit field from USB 3.0
109 * See USB 3.0 spec Table 10-10
110 */
111#define USB_PORT_STAT_LINK_STATE 0x01e0
112#define USB_SS_PORT_STAT_POWER 0x0200
113#define USB_PORT_STAT_SPEED_5GBPS 0x0000
114/* Valid only if port is enabled */
115
116/*
117 * Definitions for PORT_LINK_STATE values
118 * (bits 5-8) in wPortStatus
119 */
120#define USB_SS_PORT_LS_U0 0x0000
121#define USB_SS_PORT_LS_U1 0x0020
122#define USB_SS_PORT_LS_U2 0x0040
123#define USB_SS_PORT_LS_U3 0x0060
124#define USB_SS_PORT_LS_SS_DISABLED 0x0080
125#define USB_SS_PORT_LS_RX_DETECT 0x00a0
126#define USB_SS_PORT_LS_SS_INACTIVE 0x00c0
127#define USB_SS_PORT_LS_POLLING 0x00e0
128#define USB_SS_PORT_LS_RECOVERY 0x0100
129#define USB_SS_PORT_LS_HOT_RESET 0x0120
130#define USB_SS_PORT_LS_COMP_MOD 0x0140
131#define USB_SS_PORT_LS_LOOPBACK 0x0160
132
133/*
87 * wPortChange bit field 134 * wPortChange bit field
88 * See USB 2.0 spec Table 11-22 135 * See USB 2.0 spec Table 11-22
89 * Bits 0 to 4 shown, bits 5 to 15 are reserved 136 * Bits 0 to 4 shown, bits 5 to 15 are reserved
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index f917bbbc8901..ab461948b579 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -124,6 +124,16 @@
124#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ 124#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */
125 125
126/* 126/*
127 * Test Mode Selectors
128 * See USB 2.0 spec Table 9-7
129 */
130#define TEST_J 1
131#define TEST_K 2
132#define TEST_SE0_NAK 3
133#define TEST_PACKET 4
134#define TEST_FORCE_EN 5
135
136/*
127 * New Feature Selectors as added by USB 3.0 137 * New Feature Selectors as added by USB 3.0
128 * See USB 3.0 spec Table 9-6 138 * See USB 3.0 spec Table 9-6
129 */ 139 */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0b6e751ea0b1..dd6ee49a0844 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -471,6 +471,10 @@ extern void usb_ep0_reinit(struct usb_device *);
471 471
472/*-------------------------------------------------------------------------*/ 472/*-------------------------------------------------------------------------*/
473 473
474/* class requests from USB 3.0 hub spec, table 10-5 */
475#define SetHubDepth (0x3000 | HUB_SET_DEPTH)
476#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT)
477
474/* 478/*
475 * Generic bandwidth allocation constants/support 479 * Generic bandwidth allocation constants/support
476 */ 480 */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
new file mode 100644
index 000000000000..3675e03b1539
--- /dev/null
+++ b/include/linux/usb/msm_hsusb.h
@@ -0,0 +1,112 @@
1/* linux/include/asm-arm/arch-msm/hsusb.h
2 *
3 * Copyright (C) 2008 Google, Inc.
4 * Author: Brian Swetland <swetland@google.com>
5 * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef __ASM_ARCH_MSM_HSUSB_H
19#define __ASM_ARCH_MSM_HSUSB_H
20
21#include <linux/types.h>
22#include <linux/usb/otg.h>
23
24/**
25 * Supported USB modes
26 *
27 * USB_PERIPHERAL Only peripheral mode is supported.
28 * USB_HOST Only host mode is supported.
29 * USB_OTG OTG mode is supported.
30 *
31 */
32enum usb_mode_type {
33 USB_NONE = 0,
34 USB_PERIPHERAL,
35 USB_HOST,
36 USB_OTG,
37};
38
39/**
40 * OTG control
41 *
42 * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
43 * only configuration.
44 * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
45 * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
46 * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
47 *
48 */
49enum otg_control_type {
50 OTG_NO_CONTROL = 0,
51 OTG_PHY_CONTROL,
52 OTG_PMIC_CONTROL,
53 OTG_USER_CONTROL,
54};
55
56/**
57 * struct msm_otg_platform_data - platform device data
58 * for msm72k_otg driver.
59 * @phy_init_seq: PHY configuration sequence. val, reg pairs
60 * terminated by -1.
61 * @vbus_power: VBUS power on/off routine.
62 * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
63 * @mode: Supported mode (OTG/peripheral/host).
64 * @otg_control: OTG switch controlled by user/Id pin
65 * @default_mode: Default operational mode. Applicable only if
66 * OTG switch is controller by user.
67 *
68 */
69struct msm_otg_platform_data {
70 int *phy_init_seq;
71 void (*vbus_power)(bool on);
72 unsigned power_budget;
73 enum usb_mode_type mode;
74 enum otg_control_type otg_control;
75 enum usb_mode_type default_mode;
76 void (*setup_gpio)(enum usb_otg_state state);
77};
78
79/**
80 * struct msm_otg: OTG driver data. Shared by HCD and DCD.
81 * @otg: USB OTG Transceiver structure.
82 * @pdata: otg device platform data.
83 * @irq: IRQ number assigned for HSUSB controller.
84 * @clk: clock struct of usb_hs_clk.
85 * @pclk: clock struct of usb_hs_pclk.
86 * @phy_reset_clk: clock struct of usb_phy_clk.
87 * @core_clk: clock struct of usb_hs_core_clk.
88 * @regs: ioremapped register base address.
89 * @inputs: OTG state machine inputs(Id, SessValid etc).
90 * @sm_work: OTG state machine work.
91 * @in_lpm: indicates low power mode (LPM) state.
92 * @async_int: Async interrupt arrived.
93 *
94 */
95struct msm_otg {
96 struct otg_transceiver otg;
97 struct msm_otg_platform_data *pdata;
98 int irq;
99 struct clk *clk;
100 struct clk *pclk;
101 struct clk *phy_reset_clk;
102 struct clk *core_clk;
103 void __iomem *regs;
104#define ID 0
105#define B_SESS_VLD 1
106 unsigned long inputs;
107 struct work_struct sm_work;
108 atomic_t in_lpm;
109 int async_int;
110};
111
112#endif
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
new file mode 100644
index 000000000000..b92e17349c7b
--- /dev/null
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2007 Google, Inc.
3 * Author: Brian Swetland <swetland@google.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__
17#define __LINUX_USB_GADGET_MSM72K_UDC_H__
18
19#ifdef CONFIG_ARCH_MSM7X00A
20#define USB_SBUSCFG (MSM_USB_BASE + 0x0090)
21#else
22#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
23#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
24#endif
25#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
26
27#define USB_USBCMD (MSM_USB_BASE + 0x0140)
28#define USB_PORTSC (MSM_USB_BASE + 0x0184)
29#define USB_OTGSC (MSM_USB_BASE + 0x01A4)
30#define USB_USBMODE (MSM_USB_BASE + 0x01A8)
31
32#define USBCMD_RESET 2
33#define USB_USBINTR (MSM_USB_BASE + 0x0148)
34
35#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
36#define PORTSC_PTS_MASK (3 << 30)
37#define PORTSC_PTS_ULPI (3 << 30)
38
39#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170)
40#define ULPI_RUN (1 << 30)
41#define ULPI_WRITE (1 << 29)
42#define ULPI_READ (0 << 29)
43#define ULPI_ADDR(n) (((n) & 255) << 16)
44#define ULPI_DATA(n) ((n) & 255)
45#define ULPI_DATA_READ(n) (((n) >> 8) & 255)
46
47#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
48#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
49
50/* OTG definitions */
51#define OTGSC_INTSTS_MASK (0x7f << 16)
52#define OTGSC_ID (1 << 8)
53#define OTGSC_BSV (1 << 11)
54#define OTGSC_IDIS (1 << 16)
55#define OTGSC_BSVIS (1 << 19)
56#define OTGSC_IDIE (1 << 24)
57#define OTGSC_BSVIE (1 << 27)
58
59#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 2387f9fc8138..eb505250940a 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -3,7 +3,7 @@
3 * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. 3 * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC.
4 * 4 *
5 * Board initialization should put one of these into dev->platform_data, 5 * Board initialization should put one of these into dev->platform_data,
6 * probably on some platform_device named "musb_hdrc". It encapsulates 6 * probably on some platform_device named "musb-hdrc". It encapsulates
7 * key configuration differences between boards. 7 * key configuration differences between boards.
8 */ 8 */
9 9
@@ -120,14 +120,14 @@ struct musb_hdrc_platform_data {
120 /* Power the device on or off */ 120 /* Power the device on or off */
121 int (*set_power)(int state); 121 int (*set_power)(int state);
122 122
123 /* Turn device clock on or off */
124 int (*set_clock)(struct clk *clock, int is_on);
125
126 /* MUSB configuration-specific details */ 123 /* MUSB configuration-specific details */
127 struct musb_hdrc_config *config; 124 struct musb_hdrc_config *config;
128 125
129 /* Architecture specific board data */ 126 /* Architecture specific board data */
130 void *board_data; 127 void *board_data;
128
129 /* Platform specific struct musb_ops pointer */
130 const void *platform_ops;
131}; 131};
132 132
133 133
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 0a5b3711e502..a1a1e7a73ec9 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -116,7 +116,7 @@ struct otg_transceiver {
116/* for board-specific init logic */ 116/* for board-specific init logic */
117extern int otg_set_transceiver(struct otg_transceiver *); 117extern int otg_set_transceiver(struct otg_transceiver *);
118 118
119#if defined(CONFIG_NOP_USB_XCEIV) || defined(CONFIG_NOP_USB_XCEIV_MODULE) 119#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
120/* sometimes transceivers are accessed only through e.g. ULPI */ 120/* sometimes transceivers are accessed only through e.g. ULPI */
121extern void usb_nop_xceiv_register(void); 121extern void usb_nop_xceiv_register(void);
122extern void usb_nop_xceiv_unregister(void); 122extern void usb_nop_xceiv_unregister(void);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index a8631acd37c3..c3e1cbcc2ad2 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -263,6 +263,7 @@ enum iscsi_err {
263 ISCSI_ERR_INVALID_HOST = ISCSI_ERR_BASE + 18, 263 ISCSI_ERR_INVALID_HOST = ISCSI_ERR_BASE + 18,
264 ISCSI_ERR_XMIT_FAILED = ISCSI_ERR_BASE + 19, 264 ISCSI_ERR_XMIT_FAILED = ISCSI_ERR_BASE + 19,
265 ISCSI_ERR_TCP_CONN_CLOSE = ISCSI_ERR_BASE + 20, 265 ISCSI_ERR_TCP_CONN_CLOSE = ISCSI_ERR_BASE + 20,
266 ISCSI_ERR_SCSI_EH_SESSION_RST = ISCSI_ERR_BASE + 21,
266}; 267};
267 268
268/* 269/*
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 5c4c1678f7be..f53c8e31d5fb 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -221,8 +221,8 @@ struct fc_rport_priv {
221 * @InputRequests: Number of input requests 221 * @InputRequests: Number of input requests
222 * @OutputRequests: Number of output requests 222 * @OutputRequests: Number of output requests
223 * @ControlRequests: Number of control requests 223 * @ControlRequests: Number of control requests
224 * @InputMegabytes: Number of received megabytes 224 * @InputBytes: Number of received bytes
225 * @OutputMegabytes: Number of transmitted megabytes 225 * @OutputBytes: Number of transmitted bytes
226 * @VLinkFailureCount: Number of virtual link failures 226 * @VLinkFailureCount: Number of virtual link failures
227 * @MissDiscAdvCount: Number of missing FIP discovery advertisement 227 * @MissDiscAdvCount: Number of missing FIP discovery advertisement
228 */ 228 */
@@ -241,8 +241,8 @@ struct fcoe_dev_stats {
241 u64 InputRequests; 241 u64 InputRequests;
242 u64 OutputRequests; 242 u64 OutputRequests;
243 u64 ControlRequests; 243 u64 ControlRequests;
244 u64 InputMegabytes; 244 u64 InputBytes;
245 u64 OutputMegabytes; 245 u64 OutputBytes;
246 u64 VLinkFailureCount; 246 u64 VLinkFailureCount;
247 u64 MissDiscAdvCount; 247 u64 MissDiscAdvCount;
248}; 248};
@@ -263,7 +263,6 @@ struct fc_seq_els_data {
263 * struct fc_fcp_pkt - FCP request structure (one for each scsi_cmnd request) 263 * struct fc_fcp_pkt - FCP request structure (one for each scsi_cmnd request)
264 * @lp: The associated local port 264 * @lp: The associated local port
265 * @state: The state of the I/O 265 * @state: The state of the I/O
266 * @tgt_flags: Target's flags
267 * @ref_cnt: Reference count 266 * @ref_cnt: Reference count
268 * @scsi_pkt_lock: Lock to protect the SCSI packet (must be taken before the 267 * @scsi_pkt_lock: Lock to protect the SCSI packet (must be taken before the
269 * host_lock if both are to be held at the same time) 268 * host_lock if both are to be held at the same time)
@@ -298,7 +297,6 @@ struct fc_fcp_pkt {
298 /* Housekeeping information */ 297 /* Housekeeping information */
299 struct fc_lport *lp; 298 struct fc_lport *lp;
300 u16 state; 299 u16 state;
301 u16 tgt_flags;
302 atomic_t ref_cnt; 300 atomic_t ref_cnt;
303 spinlock_t scsi_pkt_lock; 301 spinlock_t scsi_pkt_lock;
304 302
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 06f1b5a8ed19..feb6a94c90ea 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -92,10 +92,12 @@ enum fip_state {
92 * @timer_work: &work_struct for doing keep-alives and resets. 92 * @timer_work: &work_struct for doing keep-alives and resets.
93 * @recv_work: &work_struct for receiving FIP frames. 93 * @recv_work: &work_struct for receiving FIP frames.
94 * @fip_recv_list: list of received FIP frames. 94 * @fip_recv_list: list of received FIP frames.
95 * @flogi_req: clone of FLOGI request sent
95 * @rnd_state: state for pseudo-random number generator. 96 * @rnd_state: state for pseudo-random number generator.
96 * @port_id: proposed or selected local-port ID. 97 * @port_id: proposed or selected local-port ID.
97 * @user_mfs: configured maximum FC frame size, including FC header. 98 * @user_mfs: configured maximum FC frame size, including FC header.
98 * @flogi_oxid: exchange ID of most recent fabric login. 99 * @flogi_oxid: exchange ID of most recent fabric login.
100 * @flogi_req_send: send of FLOGI requested
99 * @flogi_count: number of FLOGI attempts in AUTO mode. 101 * @flogi_count: number of FLOGI attempts in AUTO mode.
100 * @map_dest: use the FC_MAP mode for destination MAC addresses. 102 * @map_dest: use the FC_MAP mode for destination MAC addresses.
101 * @spma: supports SPMA server-provided MACs mode 103 * @spma: supports SPMA server-provided MACs mode
@@ -106,6 +108,7 @@ enum fip_state {
106 * @update_mac: LLD-supplied function to handle changes to MAC addresses. 108 * @update_mac: LLD-supplied function to handle changes to MAC addresses.
107 * @get_src_addr: LLD-supplied function to supply a source MAC address. 109 * @get_src_addr: LLD-supplied function to supply a source MAC address.
108 * @ctlr_mutex: lock protecting this structure. 110 * @ctlr_mutex: lock protecting this structure.
111 * @ctlr_lock: spinlock covering flogi_req
109 * 112 *
110 * This structure is used by all FCoE drivers. It contains information 113 * This structure is used by all FCoE drivers. It contains information
111 * needed by all FCoE low-level drivers (LLDs) as well as internal state 114 * needed by all FCoE low-level drivers (LLDs) as well as internal state
@@ -126,12 +129,14 @@ struct fcoe_ctlr {
126 struct work_struct timer_work; 129 struct work_struct timer_work;
127 struct work_struct recv_work; 130 struct work_struct recv_work;
128 struct sk_buff_head fip_recv_list; 131 struct sk_buff_head fip_recv_list;
132 struct sk_buff *flogi_req;
129 133
130 struct rnd_state rnd_state; 134 struct rnd_state rnd_state;
131 u32 port_id; 135 u32 port_id;
132 136
133 u16 user_mfs; 137 u16 user_mfs;
134 u16 flogi_oxid; 138 u16 flogi_oxid;
139 u8 flogi_req_send;
135 u8 flogi_count; 140 u8 flogi_count;
136 u8 map_dest; 141 u8 map_dest;
137 u8 spma; 142 u8 spma;
@@ -143,6 +148,7 @@ struct fcoe_ctlr {
143 void (*update_mac)(struct fc_lport *, u8 *addr); 148 void (*update_mac)(struct fc_lport *, u8 *addr);
144 u8 * (*get_src_addr)(struct fc_lport *); 149 u8 * (*get_src_addr)(struct fc_lport *);
145 struct mutex ctlr_mutex; 150 struct mutex ctlr_mutex;
151 spinlock_t ctlr_lock;
146}; 152};
147 153
148/** 154/**
@@ -155,6 +161,7 @@ struct fcoe_ctlr {
155 * @fcf_mac: Ethernet address of the FCF 161 * @fcf_mac: Ethernet address of the FCF
156 * @vfid: virtual fabric ID 162 * @vfid: virtual fabric ID
157 * @pri: selection priority, smaller values are better 163 * @pri: selection priority, smaller values are better
164 * @flogi_sent: current FLOGI sent to this FCF
158 * @flags: flags received from advertisement 165 * @flags: flags received from advertisement
159 * @fka_period: keep-alive period, in jiffies 166 * @fka_period: keep-alive period, in jiffies
160 * 167 *
@@ -176,6 +183,7 @@ struct fcoe_fcf {
176 u8 fcf_mac[ETH_ALEN]; 183 u8 fcf_mac[ETH_ALEN];
177 184
178 u8 pri; 185 u8 pri;
186 u8 flogi_sent;
179 u16 flags; 187 u16 flags;
180 u32 fka_period; 188 u32 fka_period;
181 u8 fd_flags:1; 189 u8 fd_flags:1;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index b81d969ddc67..748382b32b52 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -89,6 +89,7 @@ enum {
89 ISCSI_TASK_RUNNING, 89 ISCSI_TASK_RUNNING,
90 ISCSI_TASK_ABRT_TMF, /* aborted due to TMF */ 90 ISCSI_TASK_ABRT_TMF, /* aborted due to TMF */
91 ISCSI_TASK_ABRT_SESS_RECOV, /* aborted due to session recovery */ 91 ISCSI_TASK_ABRT_SESS_RECOV, /* aborted due to session recovery */
92 ISCSI_TASK_REQUEUE_SCSIQ, /* qcmd requeueing to scsi-ml */
92}; 93};
93 94
94struct iscsi_r2t_info { 95struct iscsi_r2t_info {
@@ -341,7 +342,7 @@ extern int iscsi_eh_abort(struct scsi_cmnd *sc);
341extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); 342extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
342extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); 343extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
343extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); 344extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
344extern int iscsi_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *sc); 345extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc);
345 346
346/* 347/*
347 * iSCSI host helpers. 348 * iSCSI host helpers.
@@ -419,6 +420,7 @@ extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
419extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t); 420extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
420extern void iscsi_requeue_task(struct iscsi_task *task); 421extern void iscsi_requeue_task(struct iscsi_task *task);
421extern void iscsi_put_task(struct iscsi_task *task); 422extern void iscsi_put_task(struct iscsi_task *task);
423extern void __iscsi_put_task(struct iscsi_task *task);
422extern void __iscsi_get_task(struct iscsi_task *task); 424extern void __iscsi_get_task(struct iscsi_task *task);
423extern void iscsi_complete_scsi_task(struct iscsi_task *task, 425extern void iscsi_complete_scsi_task(struct iscsi_task *task,
424 uint32_t exp_cmdsn, uint32_t max_cmdsn); 426 uint32_t exp_cmdsn, uint32_t max_cmdsn);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 90ce527ecf3d..8f6bb9c7f3eb 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -361,6 +361,8 @@ struct sas_ha_struct {
361 /* The class calls this to send a task for execution. */ 361 /* The class calls this to send a task for execution. */
362 int lldd_max_execute_num; 362 int lldd_max_execute_num;
363 int lldd_queue_size; 363 int lldd_queue_size;
364 int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
365 * their siblings when forming wide ports */
364 366
365 /* LLDD calls these to notify the class of an event. */ 367 /* LLDD calls these to notify the class of an event. */
366 void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event); 368 void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 216af8538cc9..1651fef18831 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -115,33 +115,61 @@ struct scsi_cmnd;
115#define PERSISTENT_RESERVE_OUT 0x5f 115#define PERSISTENT_RESERVE_OUT 0x5f
116#define VARIABLE_LENGTH_CMD 0x7f 116#define VARIABLE_LENGTH_CMD 0x7f
117#define REPORT_LUNS 0xa0 117#define REPORT_LUNS 0xa0
118#define SECURITY_PROTOCOL_IN 0xa2
118#define MAINTENANCE_IN 0xa3 119#define MAINTENANCE_IN 0xa3
119#define MAINTENANCE_OUT 0xa4 120#define MAINTENANCE_OUT 0xa4
120#define MOVE_MEDIUM 0xa5 121#define MOVE_MEDIUM 0xa5
121#define EXCHANGE_MEDIUM 0xa6 122#define EXCHANGE_MEDIUM 0xa6
122#define READ_12 0xa8 123#define READ_12 0xa8
123#define WRITE_12 0xaa 124#define WRITE_12 0xaa
125#define READ_MEDIA_SERIAL_NUMBER 0xab
124#define WRITE_VERIFY_12 0xae 126#define WRITE_VERIFY_12 0xae
125#define VERIFY_12 0xaf 127#define VERIFY_12 0xaf
126#define SEARCH_HIGH_12 0xb0 128#define SEARCH_HIGH_12 0xb0
127#define SEARCH_EQUAL_12 0xb1 129#define SEARCH_EQUAL_12 0xb1
128#define SEARCH_LOW_12 0xb2 130#define SEARCH_LOW_12 0xb2
131#define SECURITY_PROTOCOL_OUT 0xb5
129#define READ_ELEMENT_STATUS 0xb8 132#define READ_ELEMENT_STATUS 0xb8
130#define SEND_VOLUME_TAG 0xb6 133#define SEND_VOLUME_TAG 0xb6
131#define WRITE_LONG_2 0xea 134#define WRITE_LONG_2 0xea
135#define EXTENDED_COPY 0x83
136#define RECEIVE_COPY_RESULTS 0x84
137#define ACCESS_CONTROL_IN 0x86
138#define ACCESS_CONTROL_OUT 0x87
132#define READ_16 0x88 139#define READ_16 0x88
133#define WRITE_16 0x8a 140#define WRITE_16 0x8a
141#define READ_ATTRIBUTE 0x8c
142#define WRITE_ATTRIBUTE 0x8d
134#define VERIFY_16 0x8f 143#define VERIFY_16 0x8f
135#define WRITE_SAME_16 0x93 144#define WRITE_SAME_16 0x93
136#define SERVICE_ACTION_IN 0x9e 145#define SERVICE_ACTION_IN 0x9e
137/* values for service action in */ 146/* values for service action in */
138#define SAI_READ_CAPACITY_16 0x10 147#define SAI_READ_CAPACITY_16 0x10
139#define SAI_GET_LBA_STATUS 0x12 148#define SAI_GET_LBA_STATUS 0x12
149/* values for VARIABLE_LENGTH_CMD service action codes
150 * see spc4r17 Section D.3.5, table D.7 and D.8 */
151#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
140/* values for maintenance in */ 152/* values for maintenance in */
153#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
141#define MI_REPORT_TARGET_PGS 0x0a 154#define MI_REPORT_TARGET_PGS 0x0a
155#define MI_REPORT_ALIASES 0x0b
156#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
157#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
158#define MI_REPORT_PRIORITY 0x0e
159#define MI_REPORT_TIMESTAMP 0x0f
160#define MI_MANAGEMENT_PROTOCOL_IN 0x10
142/* values for maintenance out */ 161/* values for maintenance out */
162#define MO_SET_IDENTIFYING_INFORMATION 0x06
143#define MO_SET_TARGET_PGS 0x0a 163#define MO_SET_TARGET_PGS 0x0a
164#define MO_CHANGE_ALIASES 0x0b
165#define MO_SET_PRIORITY 0x0e
166#define MO_SET_TIMESTAMP 0x0f
167#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
144/* values for variable length command */ 168/* values for variable length command */
169#define XDREAD_32 0x03
170#define XDWRITE_32 0x04
171#define XPWRITE_32 0x06
172#define XDWRITEREAD_32 0x07
145#define READ_32 0x09 173#define READ_32 0x09
146#define VERIFY_32 0x0a 174#define VERIFY_32 0x0a
147#define WRITE_32 0x0b 175#define WRITE_32 0x0b
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 035f4399edbc..14fb6d67e6a3 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -237,11 +237,18 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb)
237 return &ei->vfs_inode; 237 return &ei->vfs_inode;
238} 238}
239 239
240static void mqueue_destroy_inode(struct inode *inode) 240static void mqueue_i_callback(struct rcu_head *head)
241{ 241{
242 struct inode *inode = container_of(head, struct inode, i_rcu);
243 INIT_LIST_HEAD(&inode->i_dentry);
242 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 244 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
243} 245}
244 246
247static void mqueue_destroy_inode(struct inode *inode)
248{
249 call_rcu(&inode->i_rcu, mqueue_i_callback);
250}
251
245static void mqueue_evict_inode(struct inode *inode) 252static void mqueue_evict_inode(struct inode *inode)
246{ 253{
247 struct mqueue_inode_info *info; 254 struct mqueue_inode_info *info;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 66a416b42c18..51cddc11cd85 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -763,6 +763,8 @@ EXPORT_SYMBOL_GPL(cgroup_unlock);
763 * -> cgroup_mkdir. 763 * -> cgroup_mkdir.
764 */ 764 */
765 765
766static struct dentry *cgroup_lookup(struct inode *dir,
767 struct dentry *dentry, struct nameidata *nd);
766static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); 768static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
767static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); 769static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
768static int cgroup_populate_dir(struct cgroup *cgrp); 770static int cgroup_populate_dir(struct cgroup *cgrp);
@@ -874,25 +876,29 @@ static void cgroup_clear_directory(struct dentry *dentry)
874 struct list_head *node; 876 struct list_head *node;
875 877
876 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); 878 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
877 spin_lock(&dcache_lock); 879 spin_lock(&dentry->d_lock);
878 node = dentry->d_subdirs.next; 880 node = dentry->d_subdirs.next;
879 while (node != &dentry->d_subdirs) { 881 while (node != &dentry->d_subdirs) {
880 struct dentry *d = list_entry(node, struct dentry, d_u.d_child); 882 struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
883
884 spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
881 list_del_init(node); 885 list_del_init(node);
882 if (d->d_inode) { 886 if (d->d_inode) {
883 /* This should never be called on a cgroup 887 /* This should never be called on a cgroup
884 * directory with child cgroups */ 888 * directory with child cgroups */
885 BUG_ON(d->d_inode->i_mode & S_IFDIR); 889 BUG_ON(d->d_inode->i_mode & S_IFDIR);
886 d = dget_locked(d); 890 dget_dlock(d);
887 spin_unlock(&dcache_lock); 891 spin_unlock(&d->d_lock);
892 spin_unlock(&dentry->d_lock);
888 d_delete(d); 893 d_delete(d);
889 simple_unlink(dentry->d_inode, d); 894 simple_unlink(dentry->d_inode, d);
890 dput(d); 895 dput(d);
891 spin_lock(&dcache_lock); 896 spin_lock(&dentry->d_lock);
892 } 897 } else
898 spin_unlock(&d->d_lock);
893 node = dentry->d_subdirs.next; 899 node = dentry->d_subdirs.next;
894 } 900 }
895 spin_unlock(&dcache_lock); 901 spin_unlock(&dentry->d_lock);
896} 902}
897 903
898/* 904/*
@@ -900,11 +906,16 @@ static void cgroup_clear_directory(struct dentry *dentry)
900 */ 906 */
901static void cgroup_d_remove_dir(struct dentry *dentry) 907static void cgroup_d_remove_dir(struct dentry *dentry)
902{ 908{
909 struct dentry *parent;
910
903 cgroup_clear_directory(dentry); 911 cgroup_clear_directory(dentry);
904 912
905 spin_lock(&dcache_lock); 913 parent = dentry->d_parent;
914 spin_lock(&parent->d_lock);
915 spin_lock(&dentry->d_lock);
906 list_del_init(&dentry->d_u.d_child); 916 list_del_init(&dentry->d_u.d_child);
907 spin_unlock(&dcache_lock); 917 spin_unlock(&dentry->d_lock);
918 spin_unlock(&parent->d_lock);
908 remove_dir(dentry); 919 remove_dir(dentry);
909} 920}
910 921
@@ -2180,7 +2191,7 @@ static const struct file_operations cgroup_file_operations = {
2180}; 2191};
2181 2192
2182static const struct inode_operations cgroup_dir_inode_operations = { 2193static const struct inode_operations cgroup_dir_inode_operations = {
2183 .lookup = simple_lookup, 2194 .lookup = cgroup_lookup,
2184 .mkdir = cgroup_mkdir, 2195 .mkdir = cgroup_mkdir,
2185 .rmdir = cgroup_rmdir, 2196 .rmdir = cgroup_rmdir,
2186 .rename = cgroup_rename, 2197 .rename = cgroup_rename,
@@ -2196,13 +2207,29 @@ static inline struct cftype *__file_cft(struct file *file)
2196 return __d_cft(file->f_dentry); 2207 return __d_cft(file->f_dentry);
2197} 2208}
2198 2209
2199static int cgroup_create_file(struct dentry *dentry, mode_t mode, 2210static int cgroup_delete_dentry(const struct dentry *dentry)
2200 struct super_block *sb) 2211{
2212 return 1;
2213}
2214
2215static struct dentry *cgroup_lookup(struct inode *dir,
2216 struct dentry *dentry, struct nameidata *nd)
2201{ 2217{
2202 static const struct dentry_operations cgroup_dops = { 2218 static const struct dentry_operations cgroup_dentry_operations = {
2219 .d_delete = cgroup_delete_dentry,
2203 .d_iput = cgroup_diput, 2220 .d_iput = cgroup_diput,
2204 }; 2221 };
2205 2222
2223 if (dentry->d_name.len > NAME_MAX)
2224 return ERR_PTR(-ENAMETOOLONG);
2225 d_set_d_op(dentry, &cgroup_dentry_operations);
2226 d_add(dentry, NULL);
2227 return NULL;
2228}
2229
2230static int cgroup_create_file(struct dentry *dentry, mode_t mode,
2231 struct super_block *sb)
2232{
2206 struct inode *inode; 2233 struct inode *inode;
2207 2234
2208 if (!dentry) 2235 if (!dentry)
@@ -2228,7 +2255,6 @@ static int cgroup_create_file(struct dentry *dentry, mode_t mode,
2228 inode->i_size = 0; 2255 inode->i_size = 0;
2229 inode->i_fop = &cgroup_file_operations; 2256 inode->i_fop = &cgroup_file_operations;
2230 } 2257 }
2231 dentry->d_op = &cgroup_dops;
2232 d_instantiate(dentry, inode); 2258 d_instantiate(dentry, inode);
2233 dget(dentry); /* Extra count - pin the dentry in core */ 2259 dget(dentry); /* Extra count - pin the dentry in core */
2234 return 0; 2260 return 0;
@@ -3638,9 +3664,7 @@ again:
3638 list_del(&cgrp->sibling); 3664 list_del(&cgrp->sibling);
3639 cgroup_unlock_hierarchy(cgrp->root); 3665 cgroup_unlock_hierarchy(cgrp->root);
3640 3666
3641 spin_lock(&cgrp->dentry->d_lock);
3642 d = dget(cgrp->dentry); 3667 d = dget(cgrp->dentry);
3643 spin_unlock(&d->d_lock);
3644 3668
3645 cgroup_d_remove_dir(d); 3669 cgroup_d_remove_dir(d);
3646 dput(d); 3670 dput(d);
diff --git a/mm/filemap.c b/mm/filemap.c
index 6b9aee20f242..ca389394fa2a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -102,9 +102,6 @@
102 * ->inode_lock (zap_pte_range->set_page_dirty) 102 * ->inode_lock (zap_pte_range->set_page_dirty)
103 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 103 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
104 * 104 *
105 * ->task->proc_lock
106 * ->dcache_lock (proc_pid_lookup)
107 *
108 * (code doesn't rely on that order, so you could switch it around) 105 * (code doesn't rely on that order, so you could switch it around)
109 * ->tasklist_lock (memory_failure, collect_procs_ao) 106 * ->tasklist_lock (memory_failure, collect_procs_ao)
110 * ->i_mmap_lock 107 * ->i_mmap_lock
diff --git a/mm/shmem.c b/mm/shmem.c
index 47fdeeb9d636..5ee67c990602 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2415,13 +2415,20 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
2415 return &p->vfs_inode; 2415 return &p->vfs_inode;
2416} 2416}
2417 2417
2418static void shmem_i_callback(struct rcu_head *head)
2419{
2420 struct inode *inode = container_of(head, struct inode, i_rcu);
2421 INIT_LIST_HEAD(&inode->i_dentry);
2422 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2423}
2424
2418static void shmem_destroy_inode(struct inode *inode) 2425static void shmem_destroy_inode(struct inode *inode)
2419{ 2426{
2420 if ((inode->i_mode & S_IFMT) == S_IFREG) { 2427 if ((inode->i_mode & S_IFMT) == S_IFREG) {
2421 /* only struct inode is valid if it's an inline symlink */ 2428 /* only struct inode is valid if it's an inline symlink */
2422 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2429 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2423 } 2430 }
2424 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2431 call_rcu(&inode->i_rcu, shmem_i_callback);
2425} 2432}
2426 2433
2427static void init_once(void *foo) 2434static void init_once(void *foo)
diff --git a/mm/slab.c b/mm/slab.c
index b1e40dafbab3..6107f2380e08 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2781,7 +2781,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2781/* 2781/*
2782 * Map pages beginning at addr to the given cache and slab. This is required 2782 * Map pages beginning at addr to the given cache and slab. This is required
2783 * for the slab allocator to be able to lookup the cache and slab of a 2783 * for the slab allocator to be able to lookup the cache and slab of a
2784 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. 2784 * virtual address for kfree, ksize, and slab debugging.
2785 */ 2785 */
2786static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2786static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2787 void *addr) 2787 void *addr)
@@ -3660,36 +3660,6 @@ void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3660EXPORT_SYMBOL(kmem_cache_alloc_notrace); 3660EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3661#endif 3661#endif
3662 3662
3663/**
3664 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3665 * @cachep: the cache we're checking against
3666 * @ptr: pointer to validate
3667 *
3668 * This verifies that the untrusted pointer looks sane;
3669 * it is _not_ a guarantee that the pointer is actually
3670 * part of the slab cache in question, but it at least
3671 * validates that the pointer can be dereferenced and
3672 * looks half-way sane.
3673 *
3674 * Currently only used for dentry validation.
3675 */
3676int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3677{
3678 unsigned long size = cachep->buffer_size;
3679 struct page *page;
3680
3681 if (unlikely(!kern_ptr_validate(ptr, size)))
3682 goto out;
3683 page = virt_to_page(ptr);
3684 if (unlikely(!PageSlab(page)))
3685 goto out;
3686 if (unlikely(page_get_cache(page) != cachep))
3687 goto out;
3688 return 1;
3689out:
3690 return 0;
3691}
3692
3693#ifdef CONFIG_NUMA 3663#ifdef CONFIG_NUMA
3694void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3664void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3695{ 3665{
diff --git a/mm/slob.c b/mm/slob.c
index 617b6d6c42c7..3588eaaef726 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -678,11 +678,6 @@ int kmem_cache_shrink(struct kmem_cache *d)
678} 678}
679EXPORT_SYMBOL(kmem_cache_shrink); 679EXPORT_SYMBOL(kmem_cache_shrink);
680 680
681int kmem_ptr_validate(struct kmem_cache *a, const void *b)
682{
683 return 0;
684}
685
686static unsigned int slob_ready __read_mostly; 681static unsigned int slob_ready __read_mostly;
687 682
688int slab_is_available(void) 683int slab_is_available(void)
diff --git a/mm/slub.c b/mm/slub.c
index bec0e355fbad..a2fe1727ed85 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1917,17 +1917,6 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1917} 1917}
1918EXPORT_SYMBOL(kmem_cache_free); 1918EXPORT_SYMBOL(kmem_cache_free);
1919 1919
1920/* Figure out on which slab page the object resides */
1921static struct page *get_object_page(const void *x)
1922{
1923 struct page *page = virt_to_head_page(x);
1924
1925 if (!PageSlab(page))
1926 return NULL;
1927
1928 return page;
1929}
1930
1931/* 1920/*
1932 * Object placement in a slab is made very easy because we always start at 1921 * Object placement in a slab is made very easy because we always start at
1933 * offset 0. If we tune the size of the object to the alignment then we can 1922 * offset 0. If we tune the size of the object to the alignment then we can
@@ -2386,35 +2375,6 @@ error:
2386} 2375}
2387 2376
2388/* 2377/*
2389 * Check if a given pointer is valid
2390 */
2391int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2392{
2393 struct page *page;
2394
2395 if (!kern_ptr_validate(object, s->size))
2396 return 0;
2397
2398 page = get_object_page(object);
2399
2400 if (!page || s != page->slab)
2401 /* No slab or wrong slab */
2402 return 0;
2403
2404 if (!check_valid_pointer(s, page, object))
2405 return 0;
2406
2407 /*
2408 * We could also check if the object is on the slabs freelist.
2409 * But this would be too expensive and it seems that the main
2410 * purpose of kmem_ptr_valid() is to check if the object belongs
2411 * to a certain slab.
2412 */
2413 return 1;
2414}
2415EXPORT_SYMBOL(kmem_ptr_validate);
2416
2417/*
2418 * Determine the size of a slab object 2378 * Determine the size of a slab object
2419 */ 2379 */
2420unsigned int kmem_cache_size(struct kmem_cache *s) 2380unsigned int kmem_cache_size(struct kmem_cache *s)
diff --git a/mm/util.c b/mm/util.c
index 73dac81e9f78..f126975ef23e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -186,27 +186,6 @@ void kzfree(const void *p)
186} 186}
187EXPORT_SYMBOL(kzfree); 187EXPORT_SYMBOL(kzfree);
188 188
189int kern_ptr_validate(const void *ptr, unsigned long size)
190{
191 unsigned long addr = (unsigned long)ptr;
192 unsigned long min_addr = PAGE_OFFSET;
193 unsigned long align_mask = sizeof(void *) - 1;
194
195 if (unlikely(addr < min_addr))
196 goto out;
197 if (unlikely(addr > (unsigned long)high_memory - size))
198 goto out;
199 if (unlikely(addr & align_mask))
200 goto out;
201 if (unlikely(!kern_addr_valid(addr)))
202 goto out;
203 if (unlikely(!kern_addr_valid(addr + size - 1)))
204 goto out;
205 return 1;
206out:
207 return 0;
208}
209
210/* 189/*
211 * strndup_user - duplicate an existing string from user space 190 * strndup_user - duplicate an existing string from user space
212 * @s: The string to duplicate 191 * @s: The string to duplicate
diff --git a/net/socket.c b/net/socket.c
index c1663c0ff3d3..ccc576a6a508 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -262,6 +262,7 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
262} 262}
263 263
264 264
265
265static void wq_free_rcu(struct rcu_head *head) 266static void wq_free_rcu(struct rcu_head *head)
266{ 267{
267 struct socket_wq *wq = container_of(head, struct socket_wq, rcu); 268 struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
@@ -360,14 +361,14 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
360 if (unlikely(fd < 0)) 361 if (unlikely(fd < 0))
361 return fd; 362 return fd;
362 363
363 path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name); 364 path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
364 if (unlikely(!path.dentry)) { 365 if (unlikely(!path.dentry)) {
365 put_unused_fd(fd); 366 put_unused_fd(fd);
366 return -ENOMEM; 367 return -ENOMEM;
367 } 368 }
368 path.mnt = mntget(sock_mnt); 369 path.mnt = mntget(sock_mnt);
369 370
370 path.dentry->d_op = &sockfs_dentry_operations; 371 d_set_d_op(path.dentry, &sockfs_dentry_operations);
371 d_instantiate(path.dentry, SOCK_INODE(sock)); 372 d_instantiate(path.dentry, SOCK_INODE(sock));
372 SOCK_INODE(sock)->i_fop = &socket_file_ops; 373 SOCK_INODE(sock)->i_fop = &socket_file_ops;
373 374
@@ -2390,6 +2391,8 @@ EXPORT_SYMBOL(sock_unregister);
2390 2391
2391static int __init sock_init(void) 2392static int __init sock_init(void)
2392{ 2393{
2394 int err;
2395
2393 /* 2396 /*
2394 * Initialize sock SLAB cache. 2397 * Initialize sock SLAB cache.
2395 */ 2398 */
@@ -2406,8 +2409,15 @@ static int __init sock_init(void)
2406 */ 2409 */
2407 2410
2408 init_inodecache(); 2411 init_inodecache();
2409 register_filesystem(&sock_fs_type); 2412
2413 err = register_filesystem(&sock_fs_type);
2414 if (err)
2415 goto out_fs;
2410 sock_mnt = kern_mount(&sock_fs_type); 2416 sock_mnt = kern_mount(&sock_fs_type);
2417 if (IS_ERR(sock_mnt)) {
2418 err = PTR_ERR(sock_mnt);
2419 goto out_mount;
2420 }
2411 2421
2412 /* The real protocol initialization is performed in later initcalls. 2422 /* The real protocol initialization is performed in later initcalls.
2413 */ 2423 */
@@ -2420,7 +2430,13 @@ static int __init sock_init(void)
2420 skb_timestamping_init(); 2430 skb_timestamping_init();
2421#endif 2431#endif
2422 2432
2423 return 0; 2433out:
2434 return err;
2435
2436out_mount:
2437 unregister_filesystem(&sock_fs_type);
2438out_fs:
2439 goto out;
2424} 2440}
2425 2441
2426core_initcall(sock_init); /* early initcall */ 2442core_initcall(sock_init); /* early initcall */
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 10a17a37ec4e..09f01f41e55a 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -162,11 +162,19 @@ rpc_alloc_inode(struct super_block *sb)
162} 162}
163 163
164static void 164static void
165rpc_destroy_inode(struct inode *inode) 165rpc_i_callback(struct rcu_head *head)
166{ 166{
167 struct inode *inode = container_of(head, struct inode, i_rcu);
168 INIT_LIST_HEAD(&inode->i_dentry);
167 kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); 169 kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
168} 170}
169 171
172static void
173rpc_destroy_inode(struct inode *inode)
174{
175 call_rcu(&inode->i_rcu, rpc_i_callback);
176}
177
170static int 178static int
171rpc_pipe_open(struct inode *inode, struct file *filp) 179rpc_pipe_open(struct inode *inode, struct file *filp)
172{ 180{
@@ -430,7 +438,7 @@ void rpc_put_mount(void)
430} 438}
431EXPORT_SYMBOL_GPL(rpc_put_mount); 439EXPORT_SYMBOL_GPL(rpc_put_mount);
432 440
433static int rpc_delete_dentry(struct dentry *dentry) 441static int rpc_delete_dentry(const struct dentry *dentry)
434{ 442{
435 return 1; 443 return 1;
436} 444}
@@ -583,7 +591,7 @@ static struct dentry *__rpc_lookup_create(struct dentry *parent,
583 } 591 }
584 } 592 }
585 if (!dentry->d_inode) 593 if (!dentry->d_inode)
586 dentry->d_op = &rpc_dentry_operations; 594 d_set_d_op(dentry, &rpc_dentry_operations);
587out_err: 595out_err:
588 return dentry; 596 return dentry;
589} 597}
diff --git a/security/security.c b/security/security.c
index e5fb07a3052d..739e40362f44 100644
--- a/security/security.c
+++ b/security/security.c
@@ -513,6 +513,15 @@ int security_inode_permission(struct inode *inode, int mask)
513 return security_ops->inode_permission(inode, mask); 513 return security_ops->inode_permission(inode, mask);
514} 514}
515 515
516int security_inode_exec_permission(struct inode *inode, unsigned int flags)
517{
518 if (unlikely(IS_PRIVATE(inode)))
519 return 0;
520 if (flags)
521 return -ECHILD;
522 return security_ops->inode_permission(inode, MAY_EXEC);
523}
524
516int security_inode_setattr(struct dentry *dentry, struct iattr *attr) 525int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
517{ 526{
518 if (unlikely(IS_PRIVATE(dentry->d_inode))) 527 if (unlikely(IS_PRIVATE(dentry->d_inode)))
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 073fd5b0a53a..43deac219491 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1145,24 +1145,28 @@ static void sel_remove_entries(struct dentry *de)
1145{ 1145{
1146 struct list_head *node; 1146 struct list_head *node;
1147 1147
1148 spin_lock(&dcache_lock); 1148 spin_lock(&de->d_lock);
1149 node = de->d_subdirs.next; 1149 node = de->d_subdirs.next;
1150 while (node != &de->d_subdirs) { 1150 while (node != &de->d_subdirs) {
1151 struct dentry *d = list_entry(node, struct dentry, d_u.d_child); 1151 struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
1152
1153 spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
1152 list_del_init(node); 1154 list_del_init(node);
1153 1155
1154 if (d->d_inode) { 1156 if (d->d_inode) {
1155 d = dget_locked(d); 1157 dget_dlock(d);
1156 spin_unlock(&dcache_lock); 1158 spin_unlock(&de->d_lock);
1159 spin_unlock(&d->d_lock);
1157 d_delete(d); 1160 d_delete(d);
1158 simple_unlink(de->d_inode, d); 1161 simple_unlink(de->d_inode, d);
1159 dput(d); 1162 dput(d);
1160 spin_lock(&dcache_lock); 1163 spin_lock(&de->d_lock);
1161 } 1164 } else
1165 spin_unlock(&d->d_lock);
1162 node = de->d_subdirs.next; 1166 node = de->d_subdirs.next;
1163 } 1167 }
1164 1168
1165 spin_unlock(&dcache_lock); 1169 spin_unlock(&de->d_lock);
1166} 1170}
1167 1171
1168#define BOOL_DIR_NAME "booleans" 1172#define BOOL_DIR_NAME "booleans"
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 1d0bf8fa1922..d1e05b047715 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <net/sock.h> 15#include <net/sock.h>
16#include "common.h" 16#include "common.h"
17#include "../../fs/internal.h"
17 18
18/** 19/**
19 * tomoyo_encode: Convert binary string to ascii string. 20 * tomoyo_encode: Convert binary string to ascii string.